diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 23106888..105be092 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,15 +7,13 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [macos-latest, ubuntu-latest] + os: [macos-13, ubuntu-latest] steps: - - name: Set up Go 1.x - uses: actions/setup-go@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 with: - go-version: ^1.17 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 + go-version-file: go.mod + check-latest: true - name: Compile run: make install test: @@ -23,15 +21,17 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [macos-latest, ubuntu-latest] + os: [macos-13, ubuntu-latest] steps: - - name: Set up Go 1.x - uses: actions/setup-go@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 with: - go-version: ^1.17 - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 + go-version-file: go.mod + check-latest: true + - name: Install certutil on macos + if: ${{ matrix.os == 'macos-13' }} + run: | + brew install nss - name: Test run: make test vet: @@ -39,12 +39,13 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [macos-latest, ubuntu-latest] + os: [macos-13, ubuntu-latest] steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: golangci-lint - uses: golangci/golangci-lint-action@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: go.mod + check-latest: true + - uses: golangci/golangci-lint-action@v3 with: - version: v1.35 args: --timeout 180s diff --git a/Makefile b/Makefile index 7a7c181e..be12fd07 100644 --- a/Makefile +++ b/Makefile @@ -10,8 +10,8 @@ build: .PHONY: vet vet: - docker run --rm -v $$(pwd):/app -w /app golangci/golangci-lint:latest golangci-lint run -v + docker run --rm -v $(CURDIR):/app -w /app golangci/golangci-lint:latest golangci-lint run -v .PHONY: test test: - go test ./... -timeout=180s -coverprofile=c.out -covermode=atomic -count=1 -race -v + go test ./... -timeout=5m -coverprofile=c.out -covermode=atomic -count=1 -race -v diff --git a/app.go b/app.go index 217b43c1..43d5e4ea 100644 --- a/app.go +++ b/app.go @@ -2,9 +2,9 @@ package candy import ( "fmt" - "io/ioutil" "net" "net/url" + "os" "path/filepath" "strconv" "strings" @@ -29,7 +29,7 @@ type AppService struct { } func (f *AppService) FindApps() ([]App, error) { - files, err := ioutil.ReadDir(f.cfg.HostRoot) + files, err := os.ReadDir(f.cfg.HostRoot) if err != nil { return nil, err } @@ -41,7 +41,7 @@ func (f *AppService) FindApps() ([]App, error) { continue } - b, err := ioutil.ReadFile(filepath.Join(f.cfg.HostRoot, file.Name())) + b, err := os.ReadFile(filepath.Join(f.cfg.HostRoot, file.Name())) if err != nil { return nil, err } diff --git a/app_test.go b/app_test.go index ecbc120a..1e52da38 100644 --- a/app_test.go +++ b/app_test.go @@ -1,7 +1,7 @@ package candy import ( - "io/ioutil" + "os" "path/filepath" "testing" @@ -104,7 +104,7 @@ func Test_AppService_FindApps(t *testing.T) { dir := t.TempDir() for k, v := range cc.Hosts { - if err := ioutil.WriteFile(filepath.Join(dir, k), []byte(v), 0o0644); err != nil { + if err := os.WriteFile(filepath.Join(dir, k), []byte(v), 0o0644); err != nil { t.Fatalf("error writing test hosts: %s", err) } } diff --git a/caddy/server.go b/caddy/server.go index feb3cbef..11950b79 100644 --- a/caddy/server.go +++ b/caddy/server.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "strconv" @@ -54,6 +53,29 @@ type caddyServer struct { caddyCfgMutex sync.Mutex } +func (c *caddyServer) waitForServer(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + t := time.NewTicker(1 * time.Second) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + c.cfg.Logger.Info("waiting for Caddy server", zap.Any("cfg", c.cfg)) + err := c.apiRequest(ctx, http.MethodGet, "/config/", nil) + if err == nil { + return nil + } else { + c.cfg.Logger.Debug("error waiting for Caddy server", zap.Error(err)) + } + } + } +} + func (c *caddyServer) Run(ctx context.Context) error { c.cfg.Logger.Info("starting Caddy server", zap.Any("cfg", c.cfg)) defer c.cfg.Logger.Info("shutting down Caddy server") @@ -64,6 +86,10 @@ func (c *caddyServer) Run(ctx context.Context) error { return err } + if err := c.waitForServer(ctx); err != nil { + return err + } + <-ctx.Done() if err := c.stopServer(); err != nil { @@ -139,7 +165,6 @@ func (c *caddyServer) buildConfig(apps []candy.App) *caddy.Config { ), Listen: []string{c.cfg.HTTPAddr}, AutoHTTPS: &caddyhttp.AutoHTTPSConfig{Disabled: true}, - AllowH2C: true, } httpsServer := &caddyhttp.Server{ @@ -155,8 +180,7 @@ func (c *caddyServer) buildConfig(apps []candy.App) *caddy.Config { }, apps, ), - Listen: []string{c.cfg.HTTPSAddr}, - AllowH2C: true, + Listen: []string{c.cfg.HTTPSAddr}, } // Best efforts of parsing corresponding port from addr @@ -180,7 +204,7 @@ func (c *caddyServer) buildConfig(apps []candy.App) *caddy.Config { Automation: &caddytls.AutomationConfig{ Policies: []*caddytls.AutomationPolicy{ { - Subjects: appHosts(apps), + SubjectsRaw: appHosts(apps), IssuersRaw: []json.RawMessage{ caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", nil), }, @@ -275,7 +299,7 @@ func (c *caddyServer) apiRequest(ctx context.Context, method, uri string, v inte // if it didn't work, let the user know if resp.StatusCode >= 400 { - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*10)) + respBody, err := io.ReadAll(io.LimitReader(resp.Body, 1024*10)) if err != nil { return fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err) } diff --git a/cmd/candy/cmd/launch_darwin.go b/cmd/candy/cmd/launch_darwin.go index 562a41c3..0bce13ea 100644 --- a/cmd/candy/cmd/launch_darwin.go +++ b/cmd/candy/cmd/launch_darwin.go @@ -1,4 +1,4 @@ -// +build darwin +//go:build darwin package cmd diff --git a/cmd/candy/cmd/setup_darwin.go b/cmd/candy/cmd/setup_darwin.go index 4bd295e5..73699373 100644 --- a/cmd/candy/cmd/setup_darwin.go +++ b/cmd/candy/cmd/setup_darwin.go @@ -1,11 +1,10 @@ -// +build darwin +//go:build darwin package cmd import ( "errors" "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -75,7 +74,7 @@ func runSetupRunE(c *cobra.Command, args []string) error { file := filepath.Join(resolverDir, "candy-"+domain) content := fmt.Sprintf(resolverTmpl, domain, host, port) - b, err := ioutil.ReadFile(file) + b, err := os.ReadFile(file) if err == nil { if string(b) == content { logger.Info("resolver configuration file unchanged", zap.String("file", file)) @@ -84,7 +83,7 @@ func runSetupRunE(c *cobra.Command, args []string) error { } logger.Info("writing resolver configuration file", zap.String("file", file)) - if err := ioutil.WriteFile(file, []byte(content), 0o644); err != nil { + if err := os.WriteFile(file, []byte(content), 0o644); err != nil { return err } } diff --git a/cmd/candy/cmd/setup_linux.go b/cmd/candy/cmd/setup_linux.go index 1e50045a..fce7ec09 100644 --- a/cmd/candy/cmd/setup_linux.go +++ b/cmd/candy/cmd/setup_linux.go @@ -1,11 +1,10 @@ -// +build linux +//go:build linux package cmd import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -69,7 +68,7 @@ func runSetupRunE(c *cobra.Command, args []string) error { logger = candy.Log() ) - b, err := ioutil.ReadFile(file) + b, err := os.ReadFile(file) if err == nil { if string(b) == content { logger.Info("network name resolution file unchanged", zap.String("file", file)) @@ -78,7 +77,7 @@ func runSetupRunE(c *cobra.Command, args []string) error { } logger.Info("writing network name resolution file", zap.String("file", file)) - if err := ioutil.WriteFile(file, []byte(content), 0o644); err != nil { + if err := os.WriteFile(file, []byte(content), 0o644); err != nil { return err } diff --git a/dns/server.go b/dns/server.go index fd1bee21..97c11f24 100644 --- a/dns/server.go +++ b/dns/server.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net" - "sync" "time" "github.com/miekg/dns" @@ -41,36 +40,26 @@ func (d *dnsServer) Run(ctx context.Context) error { var g run.Group { - var wg sync.WaitGroup - wg.Add(1) udp := &dns.Server{ Handler: mux, Addr: d.cfg.Addr, Net: "udp", } g.Add(func() error { - wg.Done() return udp.ListenAndServe() }, func(err error) { - // Wait for udp server before shutting it down - wg.Wait() _ = udp.ShutdownContext(ctx) }) } { - var wg sync.WaitGroup - wg.Add(1) tcp := &dns.Server{ Handler: mux, Addr: d.cfg.Addr, Net: "tcp", } g.Add(func() error { - wg.Done() return tcp.ListenAndServe() }, func(err error) { - // Wait for tcp server before shutting it down - wg.Wait() _ = tcp.ShutdownContext(ctx) }) } diff --git a/go.mod b/go.mod index 2328eaa0..587b8279 100644 --- a/go.mod +++ b/go.mod @@ -1,116 +1,125 @@ module github.com/owenthereal/candy -go 1.17 +go 1.21 require ( - github.com/caddyserver/caddy/v2 v2.4.5 - github.com/fsnotify/fsnotify v1.5.1 - github.com/google/go-cmp v0.5.6 - github.com/miekg/dns v1.1.43 + github.com/caddyserver/caddy/v2 v2.7.4 + github.com/fsnotify/fsnotify v1.6.0 + github.com/google/go-cmp v0.5.9 + github.com/miekg/dns v1.1.55 github.com/oklog/run v1.1.1-0.20200508094559-c7096881717e - github.com/spf13/cobra v1.2.1 + github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.9.0 - go.uber.org/zap v1.19.1 + go.uber.org/zap v1.25.0 inet.af/tcpproxy v0.0.0-20210824174053-2e577fef49e2 ) require ( - cloud.google.com/go/kms v1.0.0 // indirect + filippo.io/edwards25519 v1.0.0 // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.1.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.2 // indirect - github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/caddyserver/certmagic v0.14.5 // indirect + github.com/caddyserver/certmagic v0.19.2 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/cheekybits/genny v1.0.0 // indirect - github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-kit/kit v0.10.0 // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect - github.com/go-sql-driver/mysql v1.6.0 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.3 // indirect - github.com/google/cel-go v0.7.3 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-sql-driver/mysql v1.7.0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/golang/glog v1.1.0 // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/cel-go v0.15.1 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/huandu/xstrings v1.3.1 // indirect - github.com/imdario/mergo v0.3.11 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect - github.com/klauspost/compress v1.13.4 // indirect - github.com/klauspost/cpuid/v2 v2.0.9 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.0 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/libdns/libdns v0.2.1 // indirect - github.com/lucas-clemente/quic-go v0.23.0 // indirect - github.com/lunixbochs/vtclean v1.0.0 // indirect github.com/magiconair/properties v1.8.5 // indirect - github.com/manifoldco/promptui v0.8.0 // indirect - github.com/marten-seemann/qpack v0.2.1 // indirect - github.com/marten-seemann/qtls-go1-16 v0.1.4 // indirect - github.com/marten-seemann/qtls-go1-17 v0.1.0 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect + github.com/mastercactapus/proxyprotocol v0.0.4 // indirect github.com/mattn/go-colorable v0.1.8 // indirect - github.com/mattn/go-isatty v0.0.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mholt/acmez v1.0.0 // indirect - github.com/micromdm/scep/v2 v2.0.0 // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect - github.com/mitchellh/mapstructure v1.4.2 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/nxadm/tail v1.4.8 // indirect - github.com/onsi/ginkgo v1.16.4 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mholt/acmez v1.2.0 // indirect + github.com/micromdm/scep/v2 v2.1.0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-ps v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.11.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.26.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect - github.com/rs/xid v1.2.1 // indirect - github.com/russross/blackfriday/v2 v2.0.1 // indirect - github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/qtls-go1-20 v0.3.1 // indirect + github.com/quic-go/quic-go v0.37.5 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect - github.com/sirupsen/logrus v1.7.0 // indirect - github.com/smallstep/certificates v0.16.4 // indirect - github.com/smallstep/cli v0.16.1 // indirect - github.com/smallstep/nosql v0.3.8 // indirect - github.com/smallstep/truststore v0.9.6 // indirect + github.com/slackhq/nebula v1.6.1 // indirect + github.com/smallstep/certificates v0.24.3-rc.5 // indirect + github.com/smallstep/nosql v0.6.0 // indirect + github.com/smallstep/truststore v0.12.1 // indirect github.com/spf13/afero v1.6.0 // indirect github.com/spf13/cast v1.4.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/subosito/gotenv v1.2.0 // indirect - github.com/urfave/cli v1.22.5 // indirect - go.etcd.io/bbolt v1.3.6 // indirect - go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect - go.step.sm/cli-utils v0.4.1 // indirect - go.step.sm/crypto v0.9.0 // indirect - go.step.sm/linkedca v0.0.0-20210611183751-27424aae8d25 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect - golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect - golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 // indirect - golang.org/x/term v0.0.0-20210503060354-a79de5458b56 // indirect - golang.org/x/text v0.3.6 // indirect - golang.org/x/tools v0.1.5 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/api v0.57.0 // indirect - google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 // indirect - google.golang.org/grpc v1.40.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect + github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad // indirect + github.com/urfave/cli v1.22.14 // indirect + github.com/zeebo/blake3 v0.2.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect + go.step.sm/cli-utils v0.8.0 // indirect + go.step.sm/crypto v0.33.0 // indirect + go.step.sm/linkedca v0.20.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/tools v0.10.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.56.2 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.63.2 // indirect - gopkg.in/square/go-jose.v2 v2.5.1 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + howett.net/plist v1.0.0 // indirect ) diff --git a/go.sum b/go.sum index db6c9da2..5753e41b 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,8 @@ -bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -21,7 +15,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -30,246 +23,158 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1 h1:DwuSvDZ1pTYGbXo8yOJevCTr3BoBlE+OVkHAKiYQUXc= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= -cloud.google.com/go/kms v1.0.0 h1:YkIeqPXqTAlwXk3Z2/WG0d6h1tqJQjU354WftjEoP9E= -cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0= +cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.17.0/go.mod h1:+17t2ixFwRG4lWRwE+5kipDR9Ef07Jkmc8z0IbMDKUs= -cloud.google.com/go/spanner v1.18.0/go.mod h1:LvAjUXPeJRGNuGpikMULjhLj/t9cRvdc+fxRoLiugXA= -cloud.google.com/go/spanner v1.20.0/go.mod h1:ajR/W06cMHQu7nqQ4irRGplPNoWgejGJlEhlB8xBTKk= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -code.gitea.io/sdk/gitea v0.11.3/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= -contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= -contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= -contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= -contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -contrib.go.opencensus.io/exporter/stackdriver v0.13.7/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ= -contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= -contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= -github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/ThalesIgnite/crypto11 v1.2.4/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= -github.com/ThomasRooney/gexpect v0.0.0-20161231170123-5482f0350944/go.mod h1:sPML5WwI6oxLRLPuuqbtoOKhtmpVDCYtwsps+I+vjIY= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/assert v0.0.0-20170929043011-405dbfeb8e38/go.mod h1:r7bzyVFMNntcxPZXK3/+KdruV1H5KSlyVY0gc+NgInI= -github.com/alecthomas/chroma v0.7.2-0.20200305040604-4f3623dce67a/go.mod h1:fv5SzZPFJbwp2NXJWpFIX7DZS4HgV1K4ew4Pc2OZD9s= -github.com/alecthomas/chroma v0.9.2/go.mod h1:eMuEnpA18XbG/WhOWtCzJHS7WqEtDAI+HxdwoW0nVSk= -github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBoKquHd+jz9nshCh40fOfO+JzsoXy8qTHF68zU0= -github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= -github.com/alecthomas/kong v0.2.1-0.20190708041108-0548c6b1afae/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI= -github.com/alecthomas/kong v0.2.4/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= -github.com/alecthomas/repr v0.0.0-20180818092828-117648cd9897/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f h1:0cEys61Sr2hUBEXfNV8eyQP01oZuBgoMeHunebPirK8= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apache/beam v2.28.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= -github.com/apache/beam v2.30.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= -github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= -github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= -github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b h1:uUXgbcPDK3KpW29o4iy7GtuappbWT0l5NaMo9H9pJDw= github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.30.29/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= -github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.307 h1:2R0/EPgpZcFSUwZhYImq/srjaOrOfLv5MNRzrFyAM38= +github.com/aws/aws-sdk-go v1.44.307/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= -github.com/caddyserver/caddy/v2 v2.4.5 h1:P1mRs6V2cMcagSPn+NWpD+OEYUYLIf6ecOa48cFGeUg= -github.com/caddyserver/caddy/v2 v2.4.5/go.mod h1:YhfZAAh3jWSbG6rEEOM49FwxmcbLY2fZQVlo59Sc/80= -github.com/caddyserver/certmagic v0.14.5 h1:y4HcFzLLBMsTv8sSlAPj5K55mvntX8e8ExcmB/lhO6w= -github.com/caddyserver/certmagic v0.14.5/go.mod h1:/0VQ5og2Jxa5yBQ8eT80wWS7fi/DgNy1uXeXRUJ1Wj0= -github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= +github.com/caddyserver/caddy/v2 v2.7.4 h1:J8nisjdOxnYHXlorUKXY75Gr6iBfudfoGhrJ8t7/flI= +github.com/caddyserver/caddy/v2 v2.7.4/go.mod h1:/OH2g/56QCSCajEWsFa8kjwacziG/YFxeWgKacnK6KE= +github.com/caddyserver/certmagic v0.19.2 h1:HZd1AKLx4592MalEGQS39DKs2ZOAJCEM/xYPMQ2/ui0= +github.com/caddyserver/certmagic v0.19.2/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.0.1-rc1.0.20201003150343-5d1bab4fc658/go.mod h1:2uGEvGm+JSDLd5UAaKIFSbXDcYyeH0fWJP4N2HMMYMI= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd h1:KoJOtZf+6wpQaDTuOWGuo61GxcPBIfhwRxRTaTWGCTc= -github.com/dgraph-io/ristretto v0.0.4-0.20200906165740-41ebdbffecfd/go.mod h1:YylP9MpCYGVZQrly/j/diqcdUetCRRePeBB0c2VGXsA= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac h1:opbrjaN/L8gg6Xh5D04Tem+8xVcz6ajZlGCs49mQgyg= -github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -279,75 +184,57 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o= -github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.4.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-piv/piv-go v1.7.0/go.mod h1:ON2WvQncm7dIkCQ7kYJs+nc3V4jHGfrrJnSF8HKy7Gk= -github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -374,23 +261,21 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.7.3 h1:8v9BSN0avuGwrHFKNCjfiQ/CE6+D6sW+BDyOVoEeP6o= -github.com/google/cel-go v0.7.3/go.mod h1:4EtyFAHT5xNr0Msu0MJjyGxPUgdr9DlcaPyzLt/kkt8= -github.com/google/cel-spec v0.5.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.2-0.20210422104406-9f33727a7a18/go.mod h1:6CKh9dscIRoqc2kC6YUFICHZMT9NrClyPrRVFrdw1QQ= -github.com/google/certificate-transparency-go v1.1.2-0.20210512142713-bed466244fa6/go.mod h1:aF2dp7Dh81mY8Y/zpzyXps4fQW5zQbDu2CxfpJB6NkI= -github.com/google/certificate-transparency-go v1.1.2-0.20210623111010-a50f74f4ce95/go.mod h1:Qj+RD7dL44/KQVYkRk4wDVszkPOzxNcHmuX4HCMEqKg= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.15.1 h1:iTgVZor2x9okXtmTrqO8cg4uvqIeaBcWhXtruaWFMYQ= +github.com/google/cel-go v0.15.1/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY= +github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY= +github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -402,18 +287,17 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= -github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= -github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-tpm v0.3.3 h1:P/ZFNBZYXRxc+z7i5uyd8VP7MaDteuLZInzrH2idRGo= +github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4= +github.com/google/go-tpm-tools v0.3.12 h1:hpWglH4RaZnGVbgOK3IThI5K++jnFvjQ94EIN34xrUU= +github.com/google/go-tpm-tools v0.3.12/go.mod h1:2OtmyPGPuaWWIOjr+IDhNQb6t5njjbSmZtzc350Q6Ro= +github.com/google/go-tspi v0.3.0 h1:ADtq8RKfP+jrTyIWIZDIYcKOMecRqNJFOew2IT0Inus= +github.com/google/go-tspi v0.3.0/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/licenseclassifier v0.0.0-20210325184830-bb04aff29e72/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= @@ -426,74 +310,50 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= -github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/trillian v1.3.14-0.20210409160123-c5ea3abd4a41/go.mod h1:1dPv0CUjNQVFEDuAUFhZql16pw/VlPgaX8qj+g5pVzQ= -github.com/google/trillian v1.3.14-0.20210511103300-67b5f349eefa/go.mod h1:s4jO3Ai4NSvxucdvqUHON0bCqJyoya32eNw6XJwsmNc= -github.com/google/trillian v1.3.14-0.20210622121126-870e0cdde059/go.mod h1:77nhQ5M0g7nqL2S6sjQWUyqQ90h0X26T8cr0pQqqxec= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= -github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXGbreo66NmE+3X3WQ= -github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.4.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/groob/finalizer v0.0.0-20170707115354-4c2ed49aabda/go.mod h1:MyndkAZd5rUMdNogn35MWXBX1UiBigrU8eTj8DoAC2c= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -514,65 +374,92 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.0 h1:Ltaa1ePvc7msFGALnCrqKJVEByu/qYh5jJBYcDtAno4= +github.com/jackc/pgx/v4 v4.18.0/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU= -github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.4 h1:0zhec2I8zGnjWcKyLl6i3gPqKANCCn5e9xmviEEeX6s= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -580,108 +467,86 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis= github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lucas-clemente/quic-go v0.23.0 h1:5vFnKtZ6nHDFsc/F3uuiF4T3y/AXaQdxjUqiVw26GZE= -github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= -github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= -github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/manifoldco/promptui v0.8.0 h1:R95mMF+McvXZQ7j1g8ucVZE1gLP3Sv6j9vlF9kyRqQo= -github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= -github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= -github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= -github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mastercactapus/proxyprotocol v0.0.4 h1:qSY75IZF30ZqIU9iW1ip3I7gTnm8wRAnGWqPxCBVgq0= +github.com/mastercactapus/proxyprotocol v0.0.4/go.mod h1:X8FRVEDZz9FkrIoL4QYTBF4Ka4ELwTv0sah0/5NxCPw= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mholt/acmez v1.0.0 h1:ZAdWrilnq41HTlUO0vMJ6C+z8ZvzQ9I2LR1/Bo+137U= -github.com/mholt/acmez v1.0.0/go.mod h1:8qnn8QA/Ewx8E3ZSsmscqsIjhhpxuy9vqdgbX2ceceM= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/micromdm/scep/v2 v2.0.0 h1:cRzcY0S5QX+0+J+7YC4P2uZSnfMup8S8zJu/bLFgOkA= -github.com/micromdm/scep/v2 v2.0.0/go.mod h1:ouaDs5tcjOjdHD/h8BGaQsWE87MUnQ/wMTMgfMMIpPc= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mholt/acmez v1.2.0 h1:1hhLxSgY5FvH5HCnGUuwbKY2VQVo8IU7rxXKSnZ7F30= +github.com/mholt/acmez v1.2.0/go.mod h1:VT9YwH1xgNX1kmYY89gY8xPJC84BFAisjo8Egigt4kE= +github.com/micromdm/scep/v2 v2.1.0 h1:2fS9Rla7qRR266hvUoEauBJ7J6FhgssEiq2OkSKXmaU= +github.com/micromdm/scep/v2 v2.1.0/go.mod h1:BkF7TkPPhmgJAMtHfP+sFTKXmgzNJgLQlvvGoOExBcc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.42/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -689,60 +554,36 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/newrelic/go-agent v2.15.0+incompatible/go.mod h1:a8Fv1b/fYhFSReoTU6HDkTYIMZeSVNffmoS726Y0LzQ= -github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= -github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.1-0.20200508094559-c7096881717e h1:bxQ+jj+8fdl9112bovUjD/14jj/uboMqjyVoFkqrdGg= github.com/oklog/run v1.1.1-0.20200508094559-c7096881717e/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv/v3 v3.0.1 h1:x06SQA46+PKIUftmEujdwSEpIx8kR+M9eLYsUxeYveU= +github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -751,136 +592,100 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/otp v1.0.0/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nhgMk= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-20 v0.3.1 h1:O4BLOM3hwfVF3AcktIylQXyl7Yi2iBNVy5QsV+ySxbg= +github.com/quic-go/qtls-go1-20 v0.3.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.37.5 h1:pzkYe8AgaxHi+7KJrYBMF+u2rLO5a9kwyCp2dAsljzk= +github.com/quic-go/quic-go v0.37.5/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= -github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189 h1:CmSpbxmewNQbzqztaY0bke1qzHhyNyC29wYgh17Gxfo= -github.com/samfoo/ansi v0.0.0-20160124022901-b6bd2ded7189/go.mod h1:UUwuHEJ9zkkPDxspIHOa59PUeSkGFljESGzbxntLmIg= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/jsonstore v1.1.0 h1:WZBDjgezFS34CHI+myb4s8GGpir3UMpy7vWoCeO0n6E= +github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smallstep/assert v0.0.0-20180720014142-de77670473b5/go.mod h1:TC9A4+RjIOS+HyTH7wG17/gSqVv95uDw2J64dQZx7RE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/slackhq/nebula v1.6.1 h1:/OCTR3abj0Sbf2nGoLUrdDXImrCv0ZVFpVPP5qa0DsM= +github.com/slackhq/nebula v1.6.1/go.mod h1:UmkqnXe4O53QwToSl/gG7sM4BroQwAB7dd4hUaT6MlI= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= -github.com/smallstep/certificates v0.16.0/go.mod h1:oht6bnzBapjumPGXTZK/rBJYLO+O8/TTWt5/VlE9Wd4= -github.com/smallstep/certificates v0.16.4 h1:/dhaR+6reeTHd2etVIjgpZI0CTn6USrhVqakoV0HZ0w= -github.com/smallstep/certificates v0.16.4/go.mod h1:U3Dkt4ttxRxC4yPedzzAQokC121/7d3Sfnj6mNgpw7Q= -github.com/smallstep/certinfo v1.5.1/go.mod h1:gA7HBbue0Wwr3kD60P2UtgTIFfMAOC66D3rzYhI0GZ4= -github.com/smallstep/cli v0.16.1 h1:zFN/B5XF+WbvwfRya11SPfiT7g7LIMRSCaoeQNce3Hw= -github.com/smallstep/cli v0.16.1/go.mod h1:C8IES4TcHN3/Va6x9B+ugJM1t0pwzICHAg+RB2FASg4= -github.com/smallstep/nosql v0.3.6/go.mod h1:h1zC/Z54uNHc8euquLED4qJNCrMHd3nytA141ZZh4qQ= -github.com/smallstep/nosql v0.3.8 h1:1/EWUbbEdz9ai0g9Fd09VekVjtxp+5+gIHpV2PdwW3o= -github.com/smallstep/nosql v0.3.8/go.mod h1:X2qkYpNcW3yjLUvhEHfgGfClpKbFPapewvx7zo4TOFs= -github.com/smallstep/truststore v0.9.6 h1:vNzEJmaJL0XOZD8uouXLmYu4/aP1UQ/wHUopH3qKeYA= -github.com/smallstep/truststore v0.9.6/go.mod h1:HwHKRcBi0RUxxw1LYDpTRhYC4jZUuxPpkHdVonlkoDM= -github.com/smallstep/zcrypto v0.0.0-20200203191936-fbc32cf76bce/go.mod h1:+F24VU3UCxfVFvvqgm5jNUFQOm/L6ed13ImwWGFgg/g= -github.com/smallstep/zlint v0.0.0-20180727184541-d84eaafe274f/go.mod h1:GeHHT7sJDI9ti3oEaFnvx1F4N8n3ZSw2YM1+sbEoxc4= +github.com/smallstep/certificates v0.24.3-rc.5 h1:l9N7NmFqW5it5UcDtbyZ4CrrvYYiHRM7Dj7Mk+YC1Io= +github.com/smallstep/certificates v0.24.3-rc.5/go.mod h1:buhMLsuk9tp7JC1uHeN2sYQTXH1OzGn55erpzUIiOGo= +github.com/smallstep/go-attestation v0.4.4-0.20230509120429-e17291421738 h1:h+cZgVniTaE0uuRMdxTThLaJeuxsv4aas6oStz6f5VQ= +github.com/smallstep/go-attestation v0.4.4-0.20230509120429-e17291421738/go.mod h1:mk2hyNbyai1oon+ilW9t42BuBVw7ee8elDdgrPq4394= +github.com/smallstep/nosql v0.6.0 h1:ur7ysI8s9st0cMXnTvB8tA3+x5Eifmkb6hl4uqNV5jc= +github.com/smallstep/nosql v0.6.0/go.mod h1:jOXwLtockXORUPPZ2MCUcIkGR6w0cN1QGZniY9DITQA= +github.com/smallstep/truststore v0.12.1 h1:guLUKkc1UlsXeS3t6BuVMa4leOOpdiv02PCRTiy1WdY= +github.com/smallstep/truststore v0.12.1/go.mod h1:M4mebeNy28KusGX3lJxpLARIktLcyqBOrj3ZiZ46pqw= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -893,11 +698,8 @@ github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -906,12 +708,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= -github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -920,43 +718,29 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= -github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= -github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= -github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= -github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad h1:JEOo9j4RzDPBJFTU9YZ/QPkLtfV8+6PbZFFOSUx5VP4= +github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad/go.mod h1:kNGUQ3VESx3VZwRwA9MSCUegIl6+saPL8Noq82ozCaU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= -github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= -github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -964,45 +748,24 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.3.6/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark-highlighting v0.0.0-20210516132338-9216f9c5aa01/go.mod h1:TwKQPa5XkCCRC2GRZ5wtfNUTQ2+9/i19mGRijFeJ4BE= -github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= -github.com/zmap/rc2 v0.0.0-20190804163417-abaa70531248/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= -github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= -github.com/zmap/zcertificate v0.0.0-20190521191901-30e388164f71/go.mod h1:gIZi1KPgkZNUQzPZXsZrNnUnxy05nTc0+tmlqvIkhRw= -github.com/zmap/zcrypto v0.0.0-20190329181646-dff83107394d/go.mod h1:ix3q2kpLy0ibAuFXlr7qOhPKwFRRSjuynGuTR8EUPCk= -github.com/zmap/zlint v0.0.0-20190516161541-9047d02cf65a/go.mod h1:xwLbce0UzBXp44sIAL1cii+hoK8j4AxRKlymZA2AIcY= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= +github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= +github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0/go.mod h1:YPwSaBciV5G6Gpt435AasAG3ROetZsKNUzibRa/++oo= -go.etcd.io/etcd/etcdctl/v3 v3.5.0/go.mod h1:vGTfKdsh87RI7kA2JHFBEGxjQEYx+pi299wqEOdi34M= -go.etcd.io/etcd/etcdutl/v3 v3.5.0/go.mod h1:o98rKMCibbFAG8QS9KmvlYDGDShmmIbmRE8vSofzYNg= -go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0/go.mod h1:HnrHxjyCuZ8YDt8PYVyQQ5d1ZQfzJVEtQWllr5Vp/30= -go.etcd.io/etcd/tests/v3 v3.5.0/go.mod h1:f+mtZ1bE1YPvgKdOJV2BKy4JQW0nAFnQehgOE7+WyJE= -go.etcd.io/etcd/v3 v3.5.0-alpha.0/go.mod h1:JZ79d3LV6NUfPjUxXrpiFAYcjhT+06qqw+i28snx8To= -go.etcd.io/etcd/v3 v3.5.0/go.mod h1:FldM0/VzcxYWLvWx1sdA7ghKw7C3L2DvUTzGrcEtsC4= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.mozilla.org/pkcs7 v0.0.0-20210730143726-725912489c62/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= +go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1011,79 +774,57 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.step.sm/cli-utils v0.4.1 h1:QztRUhGYjOPM1I2Nmi7V6XejQyVtcESmo+sbegxvX7Q= -go.step.sm/cli-utils v0.4.1/go.mod h1:hWYVOSlw8W9Pd+BwIbs/aftVVMRms3EG7Q2qLRwc0WA= -go.step.sm/crypto v0.9.0 h1:q2AllTSnVj4NRtyEPkGW2ohArLmbGbe6ZAL/VIOKDzA= -go.step.sm/crypto v0.9.0/go.mod h1:+CYG05Mek1YDqi5WK0ERc6cOpKly2i/a5aZmU1sfGj0= -go.step.sm/linkedca v0.0.0-20210611183751-27424aae8d25 h1:ncJqviWswJT19IdnfOYQGKG1zL7IDy4lAJz1PuM3fgw= -go.step.sm/linkedca v0.0.0-20210611183751-27424aae8d25/go.mod h1:5uTRjozEGSPAZal9xJqlaD38cvJcLe3o1VAFVjqcORo= +go.step.sm/cli-utils v0.8.0 h1:b/Tc1/m3YuQq+u3ghTFP7Dz5zUekZj6GUmd5pCvkEXQ= +go.step.sm/cli-utils v0.8.0/go.mod h1:S77aISrC0pKuflqiDfxxJlUbiXcAanyJ4POOnzFSxD4= +go.step.sm/crypto v0.33.0 h1:fP8awo6YkZ0/rrLhzbHYA3U8g24VnWEebZRnGwUobRo= +go.step.sm/crypto v0.33.0/go.mod h1:rMETKeIA1ZsLBiKT6phQ2IIeBH3GL+XqimeobcqUw1g= +go.step.sm/linkedca v0.20.0 h1:bH41rvyDm3nSSJ5xgGsKUZOpzJcq5x2zacMIeqtq9oI= +go.step.sm/linkedca v0.20.0/go.mod h1:eybHw6ZTpuFmkUQnTBRWM2SPIGaP0VbYeo1bupfPT70= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= +go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1094,10 +835,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw= +golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1120,39 +861,32 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170726083632-f5079bd7f6f7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1160,11 +894,9 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1172,92 +904,81 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20170728174421-0f826bdd13b5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1272,38 +993,27 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1313,11 +1023,24 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365 h1:6wSTsvPddg9gc/mVEEyk9oOAoxn+bT4Z9q1zx+4RwA4= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w= -golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1325,45 +1048,41 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1381,49 +1100,40 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -1438,47 +1148,32 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0 h1:4t9zuDlHLcIx0ZEhmXEeFVCRsiOgpgn2QOH9N0MNjPI= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= +google.golang.org/api v0.132.0/go.mod h1:AeTBC6GpJnJSRJjktDcPX0QwtS8pGYZOV6MSuSCusw0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1495,7 +1190,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1506,22 +1200,15 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210331142528-b7513248f0ba/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210427215850-f767ed18ee4d/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1534,13 +1221,12 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6 h1:2ncG/LajxmrclaZH+ppVi02rQxz4eXYJzGHdFN4Y9UA= -google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1559,7 +1245,6 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= @@ -1571,8 +1256,9 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1584,52 +1270,45 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= -gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= -gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1638,17 +1317,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= -howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= inet.af/tcpproxy v0.0.0-20210824174053-2e577fef49e2 h1:5dsmluHaw3te6yeluBq4oe2VcZq3tljF8l661Chwzwc= inet.af/tcpproxy v0.0.0-20210824174053-2e577fef49e2/go.mod h1:Tojt5kmHpDIR2jMojxzZK2w2ZR7OILODmUo2gaSwjrk= -pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -software.sslmate.com/src/go-pkcs12 v0.0.0-20201103104416-57fc603b7f52/go.mod h1:/xvNRWUqm0+/ZMiF4EX00vrSCMsE4/NHb+Pt3freEeQ= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/server/server_test.go b/server/server_test.go index 2bcdf769..1e12b6fc 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -3,7 +3,7 @@ package server import ( "context" "fmt" - "io/ioutil" + "io" "net" "net/http" "os" @@ -14,6 +14,8 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/owenthereal/candy" + "go.uber.org/zap" ) func Test_Server(t *testing.T) { @@ -26,7 +28,7 @@ func Test_Server(t *testing.T) { tlds = []string{"go-test"} ) - if err := ioutil.WriteFile(filepath.Join(hostRoot, "app"), []byte(adminAddr), 0o644); err != nil { + if err := os.WriteFile(filepath.Join(hostRoot, "app"), []byte(adminAddr), 0o644); err != nil { t.Fatal(err) } @@ -39,8 +41,17 @@ func Test_Server(t *testing.T) { DnsAddr: dnsAddr, }) errch := make(chan error) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { - errch <- svr.Run(context.Background()) + err := svr.Run(ctx) + if err != nil { + candy.Log().Error("error running server", zap.Error(err)) + } + + errch <- err }() t.Run("http addr", func(t *testing.T) { @@ -50,7 +61,7 @@ func Test_Server(t *testing.T) { return err } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -72,7 +83,7 @@ func Test_Server(t *testing.T) { return err } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -94,7 +105,7 @@ func Test_Server(t *testing.T) { return nil } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil } @@ -129,33 +140,33 @@ func Test_Server(t *testing.T) { }) t.Run("add new domain", func(t *testing.T) { - if err := ioutil.WriteFile(filepath.Join(hostRoot, "app2"), []byte(adminAddr), 0o644); err != nil { + if err := os.WriteFile(filepath.Join(hostRoot, "app2"), []byte(adminAddr), 0o644); err != nil { t.Fatal(err) } - r := &net.Resolver{ - PreferGo: true, - Dial: func(ctx context.Context, network, address string) (net.Conn, error) { - return net.Dial("udp", dnsAddr) - }, - } + waitUntil(t, 3, func() error { + r := &net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + return net.Dial("udp", dnsAddr) + }, + } - ips, err := r.LookupHost(context.Background(), "app2.go-test") - if err != nil { - t.Fatal(err) - } + ips, err := r.LookupHost(context.Background(), "app2.go-test") + if err != nil { + t.Fatal(err) + } - if diff := cmp.Diff([]string{"127.0.0.1"}, ips); diff != "" { - t.Fatalf("Unexpected IPs (-want +got): %s", diff) - } + if diff := cmp.Diff([]string{"127.0.0.1"}, ips); diff != "" { + t.Fatalf("Unexpected IPs (-want +got): %s", diff) + } - waitUntil(t, 3, func() error { resp, err := http.Get(fmt.Sprintf("http://%s/config/apps/tls/automation/policies/0/subjects", adminAddr)) if err != nil { return nil } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil } @@ -214,12 +225,12 @@ func Test_Server_Shutdown(t *testing.T) { Config: Config{ HostRoot: hostRoot, Domain: tlds, - HttpAddr: "invalid-addr", + HttpAddr: "", HttpsAddr: randomAddr(t), - AdminAddr: "", // TODO: running into caddy race issue with `go test -race` when replacing admin server. Disabling admin server for this and report upstream. + AdminAddr: randomAddr(t), DnsAddr: randomAddr(t), }, - WantErrMsg: "address invalid-addr: missing port in address", + WantErrMsg: "loading new config: loading http app module: http: invalid configuration: invalid listener address '': missing port in address", }, { Name: "invalid admin addr", @@ -231,31 +242,38 @@ func Test_Server_Shutdown(t *testing.T) { AdminAddr: "invalid-addr", DnsAddr: randomAddr(t), }, - WantErrMsg: "address invalid-addr: missing port in address", + WantErrMsg: "loading new config: starting caddy administration endpoint: listen tcp: lookup invalid-addr", }, { Name: "invalid host root", Config: Config{ - HostRoot: "invalid-host-root", + HostRoot: "/tmp/invalid-host-root", Domain: tlds, HttpAddr: randomAddr(t), HttpsAddr: randomAddr(t), AdminAddr: randomAddr(t), DnsAddr: randomAddr(t), }, - WantErrMsg: "invalid-host-root: no such file or directory", + WantErrMsg: "no such file or directory", }, } for _, c := range cases { c := c t.Run(c.Name, func(t *testing.T) { - //t.Parallel() - errch := make(chan error) srv := New(c.Config) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { - errch <- srv.Run(context.Background()) + err := srv.Run(ctx) + if err != nil { + candy.Log().Error("error running server", zap.Error(err)) + } + + errch <- err }() select { @@ -275,7 +293,7 @@ func randomAddr(t *testing.T) string { } func randomPort(t *testing.T) string { - listener, err := net.Listen("tcp", ":0") + listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/AndreasBriese/bbloom/.travis.yml b/vendor/github.com/AndreasBriese/bbloom/.travis.yml deleted file mode 100644 index 4f2ee4d9..00000000 --- a/vendor/github.com/AndreasBriese/bbloom/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/AndreasBriese/bbloom/LICENSE b/vendor/github.com/AndreasBriese/bbloom/LICENSE deleted file mode 100644 index 4b20050e..00000000 --- a/vendor/github.com/AndreasBriese/bbloom/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -bbloom.go - -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -siphash.go - -// https://github.com/dchest/siphash -// -// Written in 2012 by Dmitry Chestnykh. -// -// To the extent possible under law, the author have dedicated all copyright -// and related and neighboring rights to this software to the public domain -// worldwide. This software is distributed without any warranty. -// http://creativecommons.org/publicdomain/zero/1.0/ -// -// Package siphash implements SipHash-2-4, a fast short-input PRF -// created by Jean-Philippe Aumasson and Daniel J. Bernstein. diff --git a/vendor/github.com/AndreasBriese/bbloom/README.md b/vendor/github.com/AndreasBriese/bbloom/README.md deleted file mode 100644 index d7413c33..00000000 --- a/vendor/github.com/AndreasBriese/bbloom/README.md +++ /dev/null @@ -1,131 +0,0 @@ -## bbloom: a bitset Bloom filter for go/golang -=== - -[![Build Status](https://travis-ci.org/AndreasBriese/bbloom.png?branch=master)](http://travis-ci.org/AndreasBriese/bbloom) - -package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. - -NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom - -=== - -changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. - -This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". -Nonetheless bbloom should work with any other form of entries. - -~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~ - -Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash ) - -Minimum hashset size is: 512 ([4]uint64; will be set automatically). - -###install - -```sh -go get github.com/AndreasBriese/bbloom -``` - -###test -+ change to folder ../bbloom -+ create wordlist in file "words.txt" (you might use `python permut.py`) -+ run 'go test -bench=.' within the folder - -```go -go test -bench=. -``` - -~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~ - -using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively) - -### usage - -after installation add - -```go -import ( - ... - "github.com/AndreasBriese/bbloom" - ... - ) -``` - -at your header. In the program use - -```go -// create a bloom filter for 65536 items and 1 % wrong-positive ratio -bf := bbloom.New(float64(1<<16), float64(0.01)) - -// or -// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly -// bf = bbloom.New(float64(650000), float64(7)) -// or -bf = bbloom.New(650000.0, 7.0) - -// add one item -bf.Add([]byte("butter")) - -// Number of elements added is exposed now -// Note: ElemNum will not be included in JSON export (for compatability to older version) -nOfElementsInFilter := bf.ElemNum - -// check if item is in the filter -isIn := bf.Has([]byte("butter")) // should be true -isNotIn := bf.Has([]byte("Butter")) // should be false - -// 'add only if item is new' to the bloomfilter -added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set -added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new - -// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS -// add one item -bf.AddTS([]byte("peanutbutter")) -// check if item is in the filter -isIn = bf.HasTS([]byte("peanutbutter")) // should be true -isNotIn = bf.HasTS([]byte("peanutButter")) // should be false -// 'add only if item is new' to the bloomfilter -added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set -added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new - -// convert to JSON ([]byte) -Json := bf.JSONMarshal() - -// bloomfilters Mutex is exposed for external un-/locking -// i.e. mutex lock while doing JSON conversion -bf.Mtx.Lock() -Json = bf.JSONMarshal() -bf.Mtx.Unlock() - -// restore a bloom filter from storage -bfNew := bbloom.JSONUnmarshal(Json) - -isInNew := bfNew.Has([]byte("butter")) // should be true -isNotInNew := bfNew.Has([]byte("Butter")) // should be false - -``` - -to work with the bloom filter. - -### why 'fast'? - -It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: - - - Bloom filter (filter size 524288, 7 hashlocs) - github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op) - github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op) - github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op) - github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op) - - github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op) - github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op) - github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op) - github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op) - github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op) - github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op) - -(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz) - - -With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. diff --git a/vendor/github.com/AndreasBriese/bbloom/bbloom.go b/vendor/github.com/AndreasBriese/bbloom/bbloom.go deleted file mode 100644 index c36948fc..00000000 --- a/vendor/github.com/AndreasBriese/bbloom/bbloom.go +++ /dev/null @@ -1,284 +0,0 @@ -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// 2019/08/25 code revision to reduce unsafe use -// Parts are adopted from the fork at ipfs/bbloom after performance rev by -// Steve Allen (https://github.com/Stebalien) -// (see https://github.com/ipfs/bbloom/blob/master/bbloom.go) -// -> func Has -// -> func set -// -> func add - -package bbloom - -import ( - "bytes" - "encoding/json" - "log" - "math" - "sync" - "unsafe" -) - -// helper -// not needed anymore by Set -// var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} - -func getSize(ui64 uint64) (size uint64, exponent uint64) { - if ui64 < uint64(512) { - ui64 = uint64(512) - } - size = uint64(1) - for size < ui64 { - size <<= 1 - exponent++ - } - return size, exponent -} - -func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) { - size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2) - locs := math.Ceil(float64(0.69314718056) * size / numEntries) - return uint64(size), uint64(locs) -} - -// New -// returns a new bloomfilter -func New(params ...float64) (bloomfilter Bloom) { - var entries, locs uint64 - if len(params) == 2 { - if params[1] < 1 { - entries, locs = calcSizeByWrongPositives(params[0], params[1]) - } else { - entries, locs = uint64(params[0]), uint64(params[1]) - } - } else { - log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))") - } - size, exponent := getSize(uint64(entries)) - bloomfilter = Bloom{ - Mtx: &sync.Mutex{}, - sizeExp: exponent, - size: size - 1, - setLocs: locs, - shift: 64 - exponent, - } - bloomfilter.Size(size) - return bloomfilter -} - -// NewWithBoolset -// takes a []byte slice and number of locs per entry -// returns the bloomfilter with a bitset populated according to the input []byte -func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) { - bloomfilter = New(float64(len(*bs)<<3), float64(locs)) - for i, b := range *bs { - *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b - } - return bloomfilter -} - -// bloomJSONImExport -// Im/Export structure used by JSONMarshal / JSONUnmarshal -type bloomJSONImExport struct { - FilterSet []byte - SetLocs uint64 -} - -// JSONUnmarshal -// takes JSON-Object (type bloomJSONImExport) as []bytes -// returns Bloom object -func JSONUnmarshal(dbData []byte) Bloom { - bloomImEx := bloomJSONImExport{} - json.Unmarshal(dbData, &bloomImEx) - buf := bytes.NewBuffer(bloomImEx.FilterSet) - bs := buf.Bytes() - bf := NewWithBoolset(&bs, bloomImEx.SetLocs) - return bf -} - -// -// Bloom filter -type Bloom struct { - Mtx *sync.Mutex - ElemNum uint64 - bitset []uint64 - sizeExp uint64 - size uint64 - setLocs uint64 - shift uint64 -} - -// <--- http://www.cse.yorku.ca/~oz/hash.html -// modified Berkeley DB Hash (32bit) -// hash is casted to l, h = 16bit fragments -// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { -// hash := uint64(len(*b)) -// for _, c := range *b { -// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash -// } -// h = hash >> bl.shift -// l = hash << bl.shift >> bl.shift -// return l, h -// } - -// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm() -// https://131002.net/siphash/ -// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash - -// Add -// set the bit(s) for entry; Adds an entry to the Bloom filter -func (bl *Bloom) Add(entry []byte) { - l, h := bl.sipHash(entry) - for i := uint64(0); i < bl.setLocs; i++ { - bl.set((h + i*l) & bl.size) - bl.ElemNum++ - } -} - -// AddTS -// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry -func (bl *Bloom) AddTS(entry []byte) { - bl.Mtx.Lock() - defer bl.Mtx.Unlock() - bl.Add(entry) -} - -// Has -// check if bit(s) for entry is/are set -// returns true if the entry was added to the Bloom Filter -func (bl Bloom) Has(entry []byte) bool { - l, h := bl.sipHash(entry) - res := true - for i := uint64(0); i < bl.setLocs; i++ { - res = res && bl.isSet((h+i*l)&bl.size) - // https://github.com/ipfs/bbloom/commit/84e8303a9bfb37b2658b85982921d15bbb0fecff - // // Branching here (early escape) is not worth it - // // This is my conclusion from benchmarks - // // (prevents loop unrolling) - // switch bl.IsSet((h + i*l) & bl.size) { - // case false: - // return false - // } - } - return res -} - -// HasTS -// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry -func (bl *Bloom) HasTS(entry []byte) bool { - bl.Mtx.Lock() - defer bl.Mtx.Unlock() - return bl.Has(entry) -} - -// AddIfNotHas -// Only Add entry if it's not present in the bloomfilter -// returns true if entry was added -// returns false if entry was allready registered in the bloomfilter -func (bl Bloom) AddIfNotHas(entry []byte) (added bool) { - if bl.Has(entry) { - return added - } - bl.Add(entry) - return true -} - -// AddIfNotHasTS -// Tread safe: Only Add entry if it's not present in the bloomfilter -// returns true if entry was added -// returns false if entry was allready registered in the bloomfilter -func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) { - bl.Mtx.Lock() - defer bl.Mtx.Unlock() - return bl.AddIfNotHas(entry) -} - -// Size -// make Bloom filter with as bitset of size sz -func (bl *Bloom) Size(sz uint64) { - bl.bitset = make([]uint64, sz>>6) -} - -// Clear -// resets the Bloom filter -func (bl *Bloom) Clear() { - bs := bl.bitset - for i := range bs { - bs[i] = 0 - } -} - -// Set -// set the bit[idx] of bitsit -func (bl *Bloom) set(idx uint64) { - // ommit unsafe - // *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))) |= mask[idx%8] - bl.bitset[idx>>6] |= 1 << (idx % 64) -} - -// IsSet -// check if bit[idx] of bitset is set -// returns true/false -func (bl *Bloom) isSet(idx uint64) bool { - // ommit unsafe - // return (((*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)))) >> (idx % 8)) & 1) == 1 - return bl.bitset[idx>>6]&(1<<(idx%64)) != 0 -} - -// JSONMarshal -// returns JSON-object (type bloomJSONImExport) as []byte -func (bl Bloom) JSONMarshal() []byte { - bloomImEx := bloomJSONImExport{} - bloomImEx.SetLocs = uint64(bl.setLocs) - bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3) - for i := range bloomImEx.FilterSet { - bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) + uintptr(i))) - } - data, err := json.Marshal(bloomImEx) - if err != nil { - log.Fatal("json.Marshal failed: ", err) - } - return data -} - -// // alternative hashFn -// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) { -// h64 := fnv.New64a() -// h64.Write(*b) -// hash := h64.Sum64() -// h = hash >> 32 -// l = hash << 32 >> 32 -// return l, h -// } -// -// // <-- http://partow.net/programming/hashfunctions/index.html -// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3, -// // under the topic of sorting and search chapter 6.4. -// // modified to fit with boolset-length -// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) { -// hash := uint64(len(*b)) -// for _, c := range *b { -// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c) -// } -// h = hash >> bl.shift -// l = hash << bl.sizeExp >> bl.sizeExp -// return l, h -// } diff --git a/vendor/github.com/AndreasBriese/bbloom/sipHash.go b/vendor/github.com/AndreasBriese/bbloom/sipHash.go deleted file mode 100644 index a91d8199..00000000 --- a/vendor/github.com/AndreasBriese/bbloom/sipHash.go +++ /dev/null @@ -1,225 +0,0 @@ -// Written in 2012 by Dmitry Chestnykh. -// -// To the extent possible under law, the author have dedicated all copyright -// and related and neighboring rights to this software to the public domain -// worldwide. This software is distributed without any warranty. -// http://creativecommons.org/publicdomain/zero/1.0/ -// -// Package siphash implements SipHash-2-4, a fast short-input PRF -// created by Jean-Philippe Aumasson and Daniel J. Bernstein. - -package bbloom - -// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit -// parts of 128-bit key: k0 and k1. -func (bl Bloom) sipHash(p []byte) (l, h uint64) { - // Initialization. - v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575 - v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d - v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261 - v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573 - t := uint64(len(p)) << 56 - - // Compression. - for len(p) >= 8 { - - m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | - uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 - - v3 ^= m - - // Round 1. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - // Round 2. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - v0 ^= m - p = p[8:] - } - - // Compress last block. - switch len(p) { - case 7: - t |= uint64(p[6]) << 48 - fallthrough - case 6: - t |= uint64(p[5]) << 40 - fallthrough - case 5: - t |= uint64(p[4]) << 32 - fallthrough - case 4: - t |= uint64(p[3]) << 24 - fallthrough - case 3: - t |= uint64(p[2]) << 16 - fallthrough - case 2: - t |= uint64(p[1]) << 8 - fallthrough - case 1: - t |= uint64(p[0]) - } - - v3 ^= t - - // Round 1. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - // Round 2. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - v0 ^= t - - // Finalization. - v2 ^= 0xff - - // Round 1. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - // Round 2. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - // Round 3. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - // Round 4. - v0 += v1 - v1 = v1<<13 | v1>>51 - v1 ^= v0 - v0 = v0<<32 | v0>>32 - - v2 += v3 - v3 = v3<<16 | v3>>48 - v3 ^= v2 - - v0 += v3 - v3 = v3<<21 | v3>>43 - v3 ^= v0 - - v2 += v1 - v1 = v1<<17 | v1>>47 - v1 ^= v2 - v2 = v2<<32 | v2>>32 - - // return v0 ^ v1 ^ v2 ^ v3 - - hash := v0 ^ v1 ^ v2 ^ v3 - h = hash >> bl.shift - l = hash << bl.shift >> bl.shift - return l, h - -} diff --git a/vendor/github.com/AndreasBriese/bbloom/words.txt b/vendor/github.com/AndreasBriese/bbloom/words.txt deleted file mode 100644 index ad86a31a..00000000 --- a/vendor/github.com/AndreasBriese/bbloom/words.txt +++ /dev/null @@ -1,140 +0,0 @@ -2014/01/01 00:00:00 /info.html -2014/01/01 00:00:00 /info.html -2014/01/01 00:00:01 /info.html -2014/01/01 00:00:02 /info.html -2014/01/01 00:00:03 /info.html -2014/01/01 00:00:04 /info.html -2014/01/01 00:00:05 /info.html -2014/01/01 00:00:06 /info.html -2014/01/01 00:00:07 /info.html -2014/01/01 00:00:08 /info.html -2014/01/01 00:00:09 /info.html -2014/01/01 00:00:10 /info.html -2014/01/01 00:00:11 /info.html -2014/01/01 00:00:12 /info.html -2014/01/01 00:00:13 /info.html -2014/01/01 00:00:14 /info.html -2014/01/01 00:00:15 /info.html -2014/01/01 00:00:16 /info.html -2014/01/01 00:00:17 /info.html -2014/01/01 00:00:18 /info.html -2014/01/01 00:00:19 /info.html -2014/01/01 00:00:20 /info.html -2014/01/01 00:00:21 /info.html -2014/01/01 00:00:22 /info.html -2014/01/01 00:00:23 /info.html -2014/01/01 00:00:24 /info.html -2014/01/01 00:00:25 /info.html -2014/01/01 00:00:26 /info.html -2014/01/01 00:00:27 /info.html -2014/01/01 00:00:28 /info.html -2014/01/01 00:00:29 /info.html -2014/01/01 00:00:30 /info.html -2014/01/01 00:00:31 /info.html -2014/01/01 00:00:32 /info.html -2014/01/01 00:00:33 /info.html -2014/01/01 00:00:34 /info.html -2014/01/01 00:00:35 /info.html -2014/01/01 00:00:36 /info.html -2014/01/01 00:00:37 /info.html -2014/01/01 00:00:38 /info.html -2014/01/01 00:00:39 /info.html -2014/01/01 00:00:40 /info.html -2014/01/01 00:00:41 /info.html -2014/01/01 00:00:42 /info.html -2014/01/01 00:00:43 /info.html -2014/01/01 00:00:44 /info.html -2014/01/01 00:00:45 /info.html -2014/01/01 00:00:46 /info.html -2014/01/01 00:00:47 /info.html -2014/01/01 00:00:48 /info.html -2014/01/01 00:00:49 /info.html -2014/01/01 00:00:50 /info.html -2014/01/01 00:00:51 /info.html -2014/01/01 00:00:52 /info.html -2014/01/01 00:00:53 /info.html -2014/01/01 00:00:54 /info.html -2014/01/01 00:00:55 /info.html -2014/01/01 00:00:56 /info.html -2014/01/01 00:00:57 /info.html -2014/01/01 00:00:58 /info.html -2014/01/01 00:00:59 /info.html -2014/01/01 00:01:00 /info.html -2014/01/01 00:01:01 /info.html -2014/01/01 00:01:02 /info.html -2014/01/01 00:01:03 /info.html -2014/01/01 00:01:04 /info.html -2014/01/01 00:01:05 /info.html -2014/01/01 00:01:06 /info.html -2014/01/01 00:01:07 /info.html -2014/01/01 00:01:08 /info.html -2014/01/01 00:01:09 /info.html -2014/01/01 00:01:10 /info.html -2014/01/01 00:01:11 /info.html -2014/01/01 00:01:12 /info.html -2014/01/01 00:01:13 /info.html -2014/01/01 00:01:14 /info.html -2014/01/01 00:01:15 /info.html -2014/01/01 00:01:16 /info.html -2014/01/01 00:01:17 /info.html -2014/01/01 00:01:18 /info.html -2014/01/01 00:01:19 /info.html -2014/01/01 00:01:20 /info.html -2014/01/01 00:01:21 /info.html -2014/01/01 00:01:22 /info.html -2014/01/01 00:01:23 /info.html -2014/01/01 00:01:24 /info.html -2014/01/01 00:01:25 /info.html -2014/01/01 00:01:26 /info.html -2014/01/01 00:01:27 /info.html -2014/01/01 00:01:28 /info.html -2014/01/01 00:01:29 /info.html -2014/01/01 00:01:30 /info.html -2014/01/01 00:01:31 /info.html -2014/01/01 00:01:32 /info.html -2014/01/01 00:01:33 /info.html -2014/01/01 00:01:34 /info.html -2014/01/01 00:01:35 /info.html -2014/01/01 00:01:36 /info.html -2014/01/01 00:01:37 /info.html -2014/01/01 00:01:38 /info.html -2014/01/01 00:01:39 /info.html -2014/01/01 00:01:40 /info.html -2014/01/01 00:01:41 /info.html -2014/01/01 00:01:42 /info.html -2014/01/01 00:01:43 /info.html -2014/01/01 00:01:44 /info.html -2014/01/01 00:01:45 /info.html -2014/01/01 00:01:46 /info.html -2014/01/01 00:01:47 /info.html -2014/01/01 00:01:48 /info.html -2014/01/01 00:01:49 /info.html -2014/01/01 00:01:50 /info.html -2014/01/01 00:01:51 /info.html -2014/01/01 00:01:52 /info.html -2014/01/01 00:01:53 /info.html -2014/01/01 00:01:54 /info.html -2014/01/01 00:01:55 /info.html -2014/01/01 00:01:56 /info.html -2014/01/01 00:01:57 /info.html -2014/01/01 00:01:58 /info.html -2014/01/01 00:01:59 /info.html -2014/01/01 00:02:00 /info.html -2014/01/01 00:02:01 /info.html -2014/01/01 00:02:02 /info.html -2014/01/01 00:02:03 /info.html -2014/01/01 00:02:04 /info.html -2014/01/01 00:02:05 /info.html -2014/01/01 00:02:06 /info.html -2014/01/01 00:02:07 /info.html -2014/01/01 00:02:08 /info.html -2014/01/01 00:02:09 /info.html -2014/01/01 00:02:10 /info.html -2014/01/01 00:02:11 /info.html -2014/01/01 00:02:12 /info.html -2014/01/01 00:02:13 /info.html -2014/01/01 00:02:14 /info.html -2014/01/01 00:02:15 /info.html -2014/01/01 00:02:16 /info.html -2014/01/01 00:02:17 /info.html -2014/01/01 00:02:18 /info.html diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml deleted file mode 100644 index 4025e01e..00000000 --- a/vendor/github.com/Masterminds/goutils/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - - tip - -script: - - go test -v - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md deleted file mode 100644 index d700ec47..00000000 --- a/vendor/github.com/Masterminds/goutils/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ -# 1.0.1 (2017-05-31) - -## Fixed -- #21: Fix generation of alphanumeric strings (thanks @dbarranco) - -# 1.0.0 (2014-04-30) - -- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/Masterminds/goutils/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md deleted file mode 100644 index 163ffe72..00000000 --- a/vendor/github.com/Masterminds/goutils/README.md +++ /dev/null @@ -1,70 +0,0 @@ -GoUtils -=========== -[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) -[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) - - -GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some -string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: -* WordUtils -* RandomStringUtils -* StringUtils (partial implementation) - -## Installation -If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: - - go get github.com/Masterminds/goutils - -If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. - - -## Documentation -GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) - - -## Usage -The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - } -Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - - } - -## License -GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. - -## Issue Reporting -Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues - -## Website -* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml deleted file mode 100644 index 657564a8..00000000 --- a/vendor/github.com/Masterminds/goutils/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\goutils -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -build: off - -install: - - go version - - go env - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go deleted file mode 100644 index 8dbd9248..00000000 --- a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "unicode" -) - -/* -CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNonAlphaNumeric(count int) (string, error) { - return CryptoRandomAlphaNumericCustom(count, false, false) -} - -/* -CryptoRandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAscii(count int) (string, error) { - return CryptoRandom(count, 32, 127, false, false) -} - -/* -CryptoRandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, false, true) -} - -/* -CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphabetic(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, false) -} - -/* -CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, true) -} - -/* -CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return CryptoRandom(count, 0, 0, letters, numbers) -} - -/* -CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(getCryptoRandomInt(gap) + int64(start)) - } else { - ch = chars[getCryptoRandomInt(gap)+int64(start)] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + getCryptoRandomInt(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + getCryptoRandomInt(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} - -func getCryptoRandomInt(count int) int64 { - nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) - if err != nil { - panic(err) - } - return nBig.Int64() -} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go deleted file mode 100644 index 27267023..00000000 --- a/vendor/github.com/Masterminds/goutils/randomstringutils.go +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "fmt" - "math" - "math/rand" - "time" - "unicode" -) - -// RANDOM provides the time-based seed used to generate random numbers -var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) - -/* -RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNonAlphaNumeric(count int) (string, error) { - return RandomAlphaNumericCustom(count, false, false) -} - -/* -RandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAscii(count int) (string, error) { - return Random(count, 32, 127, false, false) -} - -/* -RandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNumeric(count int) (string, error) { - return Random(count, 0, 0, false, true) -} - -/* -RandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alphabetic characters. - -Parameters: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphabetic(count int) (string, error) { - return Random(count, 0, 0, true, false) -} - -/* -RandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumeric(count int) (string, error) { - return Random(count, 0, 0, true, true) -} - -/* -RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return Random(count, 0, 0, letters, numbers) -} - -/* -Random creates a random string based on a variety of options, using default source of randomness. -This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but -instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) -} - -/* -RandomSeed creates a random string based on a variety of options, using supplied source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. -This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance -with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode decimals) to start at - end - the position in set of chars (ASCII/Unicode decimals) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - random - a source of randomness. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { - - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(random.Intn(gap) + start) - } else { - ch = chars[random.Intn(gap)+start] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + random.Intn(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + random.Intn(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go deleted file mode 100644 index 741bb530..00000000 --- a/vendor/github.com/Masterminds/goutils/stringutils.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "bytes" - "fmt" - "strings" - "unicode" -) - -// Typically returned by functions where a searched item cannot be found -const INDEX_NOT_FOUND = -1 - -/* -Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." - -Specifically, the algorithm is as follows: - - - If str is less than maxWidth characters long, return it. - - Else abbreviate it to (str[0:maxWidth - 3] + "..."). - - If maxWidth is less than 4, return an illegal argument error. - - In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func Abbreviate(str string, maxWidth int) (string, error) { - return AbbreviateFull(str, 0, maxWidth) -} - -/* -AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." -This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not -necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear -somewhere in the result. -In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - offset - left edge of source string - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { - if str == "" { - return "", nil - } - if maxWidth < 4 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") - return "", err - } - if len(str) <= maxWidth { - return str, nil - } - if offset > len(str) { - offset = len(str) - } - if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 - offset = len(str) - (maxWidth - 3) - } - abrevMarker := "..." - if offset <= 4 { - return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; - } - if maxWidth < 7 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") - return "", err - } - if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 - abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) - return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); - } - return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); -} - -/* -DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). -It returns the string without whitespaces. - -Parameter: - str - the string to delete whitespace from, may be nil - -Returns: - the string without whitespaces -*/ -func DeleteWhiteSpace(str string) string { - if str == "" { - return str - } - sz := len(str) - var chs bytes.Buffer - count := 0 - for i := 0; i < sz; i++ { - ch := rune(str[i]) - if !unicode.IsSpace(ch) { - chs.WriteRune(ch) - count++ - } - } - if count == sz { - return str - } - return chs.String() -} - -/* -IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. - -Parameters: - str1 - the first string - str2 - the second string - -Returns: - the index where str1 and str2 begin to differ; -1 if they are equal -*/ -func IndexOfDifference(str1 string, str2 string) int { - if str1 == str2 { - return INDEX_NOT_FOUND - } - if IsEmpty(str1) || IsEmpty(str2) { - return 0 - } - var i int - for i = 0; i < len(str1) && i < len(str2); i++ { - if rune(str1[i]) != rune(str2[i]) { - break - } - } - if i < len(str2) || i < len(str1) { - return i - } - return INDEX_NOT_FOUND -} - -/* -IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: - - goutils.IsBlank("") = true - goutils.IsBlank(" ") = true - goutils.IsBlank("bob") = false - goutils.IsBlank(" bob ") = false - -Parameter: - str - the string to check - -Returns: - true - if the string is whitespace or empty ("") -*/ -func IsBlank(str string) bool { - strLen := len(str) - if str == "" || strLen == 0 { - return true - } - for i := 0; i < strLen; i++ { - if unicode.IsSpace(rune(str[i])) == false { - return false - } - } - return true -} - -/* -IndexOf returns the index of the first instance of sub in str, with the search beginning from the -index start point specified. -1 is returned if sub is not present in str. - -An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. -A start position greater than the string length returns -1. - -Parameters: - str - the string to check - sub - the substring to find - start - the start position; negative treated as zero - -Returns: - the first index where the sub string was found (always >= start) -*/ -func IndexOf(str string, sub string, start int) int { - - if start < 0 { - start = 0 - } - - if len(str) < start { - return INDEX_NOT_FOUND - } - - if IsEmpty(str) || IsEmpty(sub) { - return INDEX_NOT_FOUND - } - - partialIndex := strings.Index(str[start:len(str)], sub) - if partialIndex == -1 { - return INDEX_NOT_FOUND - } - return partialIndex + start -} - -// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. -func IsEmpty(str string) bool { - return len(str) == 0 -} - -// Returns either the passed in string, or if the string is empty, the value of defaultStr. -func DefaultString(str string, defaultStr string) string { - if IsEmpty(str) { - return defaultStr - } - return str -} - -// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. -func DefaultIfBlank(str string, defaultStr string) string { - if IsBlank(str) { - return defaultStr - } - return str -} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go deleted file mode 100644 index 034cad8e..00000000 --- a/vendor/github.com/Masterminds/goutils/wordutils.go +++ /dev/null @@ -1,357 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package goutils provides utility functions to manipulate strings in various ways. -The code snippets below show examples of how to use goutils. Some functions return -errors while others do not, so usage would vary as a result. - -Example: - - package main - - import ( - "fmt" - "github.com/aokoli/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - } -*/ -package goutils - -import ( - "bytes" - "strings" - "unicode" -) - -// VERSION indicates the current version of goutils -const VERSION = "1.0.0" - -/* -Wrap wraps a single line of text, identifying words by ' '. -New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - -Returns: - a line with newlines inserted -*/ -func Wrap(str string, wrapLength int) string { - return WrapCustom(str, wrapLength, "", false) -} - -/* -WrapCustom wraps a single line of text, identifying words by ' '. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - newLineStr - the string to insert for a new line, "" uses '\n' - wrapLongWords - true if long words (such as URLs) should be wrapped - -Returns: - a line with newlines inserted -*/ -func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { - - if str == "" { - return "" - } - if newLineStr == "" { - newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons - } - if wrapLength < 1 { - wrapLength = 1 - } - - inputLineLength := len(str) - offset := 0 - - var wrappedLine bytes.Buffer - - for inputLineLength-offset > wrapLength { - - if rune(str[offset]) == ' ' { - offset++ - continue - } - - end := wrapLength + offset + 1 - spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset - - if spaceToWrapAt >= offset { - // normal word (not longer than wrapLength) - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - - } else { - // long word or URL - if wrapLongWords { - end := wrapLength + offset - // long words are wrapped one line at a time - wrappedLine.WriteString(str[offset:end]) - wrappedLine.WriteString(newLineStr) - offset += wrapLength - } else { - // long words aren't wrapped, just extended beyond limit - end := wrapLength + offset - index := strings.IndexRune(str[end:len(str)], ' ') - if index == -1 { - wrappedLine.WriteString(str[offset:len(str)]) - offset = inputLineLength - } else { - spaceToWrapAt = index + end - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - } - } - } - } - - wrappedLine.WriteString(str[offset:len(str)]) - - return wrappedLine.String() - -} - -/* -Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. -To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). -The delimiters represent a set of characters understood to separate words. The first string character -and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func Capitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - capitalizeNext := true - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - capitalizeNext = true - } else if capitalizeNext { - buffer[i] = unicode.ToTitle(ch) - capitalizeNext = false - } - } - return string(buffer) - -} - -/* -CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a -titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood -to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func CapitalizeFully(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - str = strings.ToLower(str) - return Capitalize(str, delimiters...) -} - -/* -Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. -The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter -character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to uncapitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - uncapitalized string -*/ -func Uncapitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - uncapitalizeNext = true - } else if uncapitalizeNext { - buffer[i] = unicode.ToLower(ch) - uncapitalizeNext = false - } - } - return string(buffer) -} - -/* -SwapCase swaps the case of a string using a word based algorithm. - -Conversion algorithm: - - Upper case character converts to Lower case - Title case character converts to Lower case - Lower case character after Whitespace or at start converts to Title case - Other Lower case character converts to Upper case - Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to swap case - -Returns: - the changed string -*/ -func SwapCase(str string) string { - if str == "" { - return str - } - buffer := []rune(str) - - whitespace := true - - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if unicode.IsUpper(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsTitle(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsLower(ch) { - if whitespace { - buffer[i] = unicode.ToTitle(ch) - whitespace = false - } else { - buffer[i] = unicode.ToUpper(ch) - } - } else { - whitespace = unicode.IsSpace(ch) - } - } - return string(buffer) -} - -/* -Initials extracts the initial letters from each word in the string. The first letter of the string and all first -letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters -parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. - -Parameters: - str - the string to get initials from - delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter -Returns: - string of initial letters -*/ -func Initials(str string, delimiters ...rune) string { - if str == "" { - return str - } - if delimiters != nil && len(delimiters) == 0 { - return "" - } - strLen := len(str) - var buf bytes.Buffer - lastWasGap := true - for i := 0; i < strLen; i++ { - ch := rune(str[i]) - - if isDelimiter(ch, delimiters...) { - lastWasGap = true - } else if lastWasGap { - buf.WriteRune(ch) - lastWasGap = false - } - } - return buf.String() -} - -// private function (lower case func name) -func isDelimiter(ch rune, delimiters ...rune) bool { - if delimiters == nil { - return unicode.IsSpace(ch) - } - for _, delimiter := range delimiters { - if ch == delimiter { - return true - } - } - return false -} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore deleted file mode 100644 index 6b061e61..00000000 --- a/vendor/github.com/Masterminds/semver/v3/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml deleted file mode 100644 index fdbdf144..00000000 --- a/vendor/github.com/Masterminds/semver/v3/.golangci.yml +++ /dev/null @@ -1,26 +0,0 @@ -run: - deadline: 2m - -linters: - disable-all: true - enable: - - deadcode - - dupl - - errcheck - - gofmt - - goimports - - golint - - gosimple - - govet - - ineffassign - - misspell - - nakedret - - structcheck - - unused - - varcheck - -linters-settings: - gofmt: - simplify: true - dupl: - threshold: 400 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md deleted file mode 100644 index 1f90c38d..00000000 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ /dev/null @@ -1,194 +0,0 @@ -# Changelog - -## 3.1.1 (2020-11-23) - -### Fixed - -- #158: Fixed issue with generated regex operation order that could cause problem - -## 3.1.0 (2020-04-15) - -### Added - -- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) - -### Changed - -- #148: More accurate validation messages on constraints - -## 3.0.3 (2019-12-13) - -### Fixed - -- #141: Fixed issue with <= comparison - -## 3.0.2 (2019-11-14) - -### Fixed - -- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) - -## 3.0.1 (2019-09-13) - -### Fixed - -- #125: Fixes issue with module path for v3 - -## 3.0.0 (2019-09-12) - -This is a major release of the semver package which includes API changes. The Go -API is compatible with ^1. The Go API was not changed because many people are using -`go get` without Go modules for their applications and API breaking changes cause -errors which we have or would need to support. - -The changes in this release are the handling based on the data passed into the -functions. These are described in the added and changed sections below. - -### Added - -- StrictNewVersion function. This is similar to NewVersion but will return an - error if the version passed in is not a strict semantic version. For example, - 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly - speaking semantic versions. This function is faster, performs fewer operations, - and uses fewer allocations than NewVersion. -- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. - The Makefile contains the operations used. For more information on you can start - on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing -- Now using Go modules - -### Changed - -- NewVersion has proper prerelease and metadata validation with error messages - to signal an issue with either of them -- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the - version is >=1 the ^ ranges works the same as v1. For major versions of 0 the - rules have changed. The minor version is treated as the stable version unless - a patch is specified and then it is equivalent to =. One difference from npm/js - is that prereleases there are only to a specific version (e.g. 1.2.3). - Prereleases here look over multiple versions and follow semantic version - ordering rules. This pattern now follows along with the expected and requested - handling of this packaged by numerous users. - -## 1.5.0 (2019-09-11) - -### Added - -- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) - -### Changed - -- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) -- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) -- #72: Adding docs comment pointing to vert for a cli -- #71: Update the docs on pre-release comparator handling -- #89: Test with new go versions (thanks @thedevsaddam) -- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) - -### Fixed - -- #78: Fix unchecked error in example code (thanks @ravron) -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case -- #97: Fixed copyright file for proper display on GitHub -- #107: Fix handling prerelease when sorting alphanum and num -- #109: Fixed where Validate sometimes returns wrong message on error - -## 1.4.2 (2018-04-10) - -### Changed - -- #72: Updated the docs to point to vert for a console appliaction -- #71: Update the docs on pre-release comparator handling - -### Fixed - -- #70: Fix the handling of pre-releases and the 0.0.0 release edge case - -## 1.4.1 (2018-04-02) - -### Fixed - -- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) - -## 1.4.0 (2017-10-04) - -### Changed - -- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) - -## 1.3.1 (2017-07-10) - -### Fixed - -- Fixed #57: number comparisons in prerelease sometimes inaccurate - -## 1.3.0 (2017-05-02) - -### Added - -- #45: Added json (un)marshaling support (thanks @mh-cbon) -- Stability marker. See https://masterminds.github.io/stability/ - -### Fixed - -- #51: Fix handling of single digit tilde constraint (thanks @dgodd) - -### Changed - -- #55: The godoc icon moved from png to svg - -## 1.2.3 (2017-04-03) - -### Fixed - -- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * - -## Release 1.2.2 (2016-12-13) - -### Fixed - -- #34: Fixed issue where hyphen range was not working with pre-release parsing. - -## Release 1.2.1 (2016-11-28) - -### Fixed - -- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" - properly. - -## Release 1.2.0 (2016-11-04) - -### Added - -- #20: Added MustParse function for versions (thanks @adamreese) -- #15: Added increment methods on versions (thanks @mh-cbon) - -### Fixed - -- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and - might not satisfy the intended compatibility. The change here ignores pre-releases - on constraint checks (e.g., ~ or ^) when a pre-release is not part of the - constraint. For example, `^1.2.3` will ignore pre-releases while - `^1.2.3-alpha` will include them. - -## Release 1.1.1 (2016-06-30) - -### Changed - -- Issue #9: Speed up version comparison performance (thanks @sdboyer) -- Issue #8: Added benchmarks (thanks @sdboyer) -- Updated Go Report Card URL to new location -- Updated Readme to add code snippet formatting (thanks @mh-cbon) -- Updating tagging to v[SemVer] structure for compatibility with other tools. - -## Release 1.1.0 (2016-03-11) - -- Issue #2: Implemented validation to provide reasons a versions failed a - constraint. - -## Release 1.0.1 (2015-12-31) - -- Fixed #1: * constraint failing on valid versions. - -## Release 1.0.0 (2015-10-20) - -- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt deleted file mode 100644 index 9ff7da9c..00000000 --- a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014-2019, Matt Butcher and Matt Farina - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile deleted file mode 100644 index eac19178..00000000 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -GOPATH=$(shell go env GOPATH) -GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint -GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build -GOFUZZ = $(GOPATH)/bin/go-fuzz - -.PHONY: lint -lint: $(GOLANGCI_LINT) - @echo "==> Linting codebase" - @$(GOLANGCI_LINT) run - -.PHONY: test -test: - @echo "==> Running tests" - GO111MODULE=on go test -v - -.PHONY: test-cover -test-cover: - @echo "==> Running Tests with coverage" - GO111MODULE=on go test -cover . - -.PHONY: fuzz -fuzz: $(GOFUZZBUILD) $(GOFUZZ) - @echo "==> Fuzz testing" - $(GOFUZZBUILD) - $(GOFUZZ) -workdir=_fuzz - -$(GOLANGCI_LINT): - # Install golangci-lint. The configuration for it is in the .golangci.yml - # file in the root of the repository - echo ${GOPATH} - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 - -$(GOFUZZBUILD): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build - -$(GOFUZZ): - cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md deleted file mode 100644 index d8f54dcb..00000000 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ /dev/null @@ -1,244 +0,0 @@ -# SemVer - -The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: - -* Parse semantic versions -* Sort semantic versions -* Check if a semantic version fits within a set of constraints -* Optionally work with a `v` prefix - -[![Stability: -Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) -[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) -[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) -[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) - -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - -## Package Versions - -There are three major versions fo the `semver` package. - -* 3.x.x is the new stable and active version. This version is focused on constraint - compatibility for range handling in other tools from other languages. It has - a similar API to the v1 releases. The development of this version is on the master - branch. The documentation for this version is below. -* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are - no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). - There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). -* 1.x.x is the most widely used version with numerous tagged releases. This is the - previous stable and is still maintained for bug fixes. The development, to fix - bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). - -## Parsing Semantic Versions - -There are two functions that can parse semantic versions. The `StrictNewVersion` -function only parses valid version 2 semantic versions as outlined in the -specification. The `NewVersion` function attempts to coerce a version into a -semantic version and parse it. For example, if there is a leading v or a version -listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid -semantic version (e.g., 1.2.0). In both cases a `Version` object is returned -that can be sorted, compared, and used in constraints. - -When parsing a version an error is returned if there is an issue parsing the -version. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+build345") - -The version object has methods to get the parts of the version, compare it to -other versions, convert the version back into a string, and get the original -string. Getting the original string is useful if the semantic version was coerced -into a valid form. - -## Sorting Semantic Versions - -A set of versions can be sorted using the `sort` package from the standard library. -For example, - -```go -raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} -vs := make([]*semver.Version, len(raw)) -for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v -} - -sort.Sort(semver.Collection(vs)) -``` - -## Checking Version Constraints - -There are two methods for comparing versions. One uses comparison methods on -`Version` instances and the other uses `Constraints`. There are some important -differences to notes between these two methods of comparison. - -1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases - within the comparison. It will provide an answer that is valid with the - comparison section of the spec at https://semver.org/#spec-item-11 -2. When constraint checking is used for checks or validation it will follow a - different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the - ranges does not include one. If you want to have it include pre-releases a - simple solution is to include `-0` in your range. -3. Constraint ranges can have some complex rules including the shorthand use of - ~ and ^. For more details on those see the options below. - -There are differences between the two methods or checking versions because the -comparison methods on `Version` follow the specification while comparison ranges -are not part of the specification. Different packages and tools have taken it -upon themselves to come up with range rules. This has resulted in differences. -For example, npm/js and Cargo/Rust follow similar patterns while PHP has a -different pattern for ^. The comparison features in this package follow the -npm/js and Cargo/Rust lead because applications using it have followed similar -patters with their versions. - -Checking a version against version constraints is one of the most featureful -parts of the package. - -```go -c, err := semver.NewConstraint(">= 1.2.3") -if err != nil { - // Handle constraint not being parsable. -} - -v, err := semver.NewVersion("1.3") -if err != nil { - // Handle version not being parsable. -} -// Check if the version meets the constraints. The a variable will be true. -a := c.Check(v) -``` - -### Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of space or comma separated AND comparisons. These are then separated by || (OR) -comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - -* `=`: equal (aliased to no operator) -* `!=`: not equal -* `>`: greater than -* `<`: less than -* `>=`: greater than or equal to -* `<=`: less than or equal to - -### Working With Prerelease Versions - -Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of prereleases include -development, alpha, beta, and release candidate releases. A prerelease may be -a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precedence, prereleases come before their associated releases. In this -example `1.2.3-beta.1 < 1.2.3`. - -According to the Semantic Version specification prereleases may not be -API compliant with their release counterpart. It says, - -> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. - -SemVer comparisons using constraints without a prerelease comparator will skip -prerelease versions. For example, `>=1.2.3` will skip prereleases when looking -at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. - -The reason for the `0` as a pre-release version in the example comparison is -because pre-releases can only contain ASCII alphanumerics and hyphens (along with -`.` separators), per the spec. Sorting happens in ASCII sort order, again per the -spec. The lowest character is a `0` in ASCII sort order -(see an [ASCII Table](http://www.asciitable.com/)) - -Understanding ASCII sort ordering is important because A-Z comes before a-z. That -means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case -sensitivity doesn't apply here. This is due to ASCII sort ordering which is what -the spec specifies. - -### Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - -* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` -* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` - -### Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the patch level comparison (see tilde below). For example, - -* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `< 3` -* `*` is equivalent to `>= 0.0.0` - -### Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - -* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` -* `~1` is equivalent to `>= 1, < 2` -* `~2.3` is equivalent to `>= 2.3, < 2.4` -* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `~1.x` is equivalent to `>= 1, < 2` - -### Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes once a stable -(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts -as the API stability level. This is useful when comparisons of API versions as a -major change is API breaking. For example, - -* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -* `^2.3` is equivalent to `>= 2.3, < 3` -* `^2.x` is equivalent to `>= 2.0.0, < 3` -* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` -* `^0.2` is equivalent to `>=0.2.0 <0.3.0` -* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` -* `^0.0` is equivalent to `>=0.0.0 <0.1.0` -* `^0` is equivalent to `>=0.0.0 <1.0.0` - -## Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - -```go -c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") -if err != nil { - // Handle constraint not being parseable. -} - -v, err := semver.NewVersion("1.3") -if err != nil { - // Handle version not being parseable. -} - -// Validate a version against a constraint. -a, msgs := c.Validate(v) -// a is false -for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" -} -``` - -## Contribute - -If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) -or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go deleted file mode 100644 index a7823589..00000000 --- a/vendor/github.com/Masterminds/semver/v3/collection.go +++ /dev/null @@ -1,24 +0,0 @@ -package semver - -// Collection is a collection of Version instances and implements the sort -// interface. See the sort package for more details. -// https://golang.org/pkg/sort/ -type Collection []*Version - -// Len returns the length of a collection. The number of Version instances -// on the slice. -func (c Collection) Len() int { - return len(c) -} - -// Less is needed for the sort interface to compare two Version objects on the -// slice. If checks if one is less than the other. -func (c Collection) Less(i, j int) bool { - return c[i].LessThan(c[j]) -} - -// Swap is needed for the sort interface to replace the Version objects -// at two different positions in the slice. -func (c Collection) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go deleted file mode 100644 index 547613f0..00000000 --- a/vendor/github.com/Masterminds/semver/v3/constraints.go +++ /dev/null @@ -1,568 +0,0 @@ -package semver - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strings" -) - -// Constraints is one or more constraint that a semantic version can be -// checked against. -type Constraints struct { - constraints [][]*constraint -} - -// NewConstraint returns a Constraints instance that a Version instance can -// be checked against. If there is a parse error it will be returned. -func NewConstraint(c string) (*Constraints, error) { - - // Rewrite - ranges into a comparison operation. - c = rewriteRange(c) - - ors := strings.Split(c, "||") - or := make([][]*constraint, len(ors)) - for k, v := range ors { - - // TODO: Find a way to validate and fetch all the constraints in a simpler form - - // Validate the segment - if !validConstraintRegex.MatchString(v) { - return nil, fmt.Errorf("improper constraint: %s", v) - } - - cs := findConstraintRegex.FindAllString(v, -1) - if cs == nil { - cs = append(cs, v) - } - result := make([]*constraint, len(cs)) - for i, s := range cs { - pc, err := parseConstraint(s) - if err != nil { - return nil, err - } - - result[i] = pc - } - or[k] = result - } - - o := &Constraints{constraints: or} - return o, nil -} - -// Check tests if a version satisfies the constraints. -func (cs Constraints) Check(v *Version) bool { - // TODO(mattfarina): For v4 of this library consolidate the Check and Validate - // functions as the underlying functions make that possible now. - // loop over the ORs and check the inner ANDs - for _, o := range cs.constraints { - joy := true - for _, c := range o { - if check, _ := c.check(v); !check { - joy = false - break - } - } - - if joy { - return true - } - } - - return false -} - -// Validate checks if a version satisfies a constraint. If not a slice of -// reasons for the failure are returned in addition to a bool. -func (cs Constraints) Validate(v *Version) (bool, []error) { - // loop over the ORs and check the inner ANDs - var e []error - - // Capture the prerelease message only once. When it happens the first time - // this var is marked - var prerelesase bool - for _, o := range cs.constraints { - joy := true - for _, c := range o { - // Before running the check handle the case there the version is - // a prerelease and the check is not searching for prereleases. - if c.con.pre == "" && v.pre != "" { - if !prerelesase { - em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - e = append(e, em) - prerelesase = true - } - joy = false - - } else { - - if _, err := c.check(v); err != nil { - e = append(e, err) - joy = false - } - } - } - - if joy { - return true, []error{} - } - } - - return false, e -} - -func (cs Constraints) String() string { - buf := make([]string, len(cs.constraints)) - var tmp bytes.Buffer - - for k, v := range cs.constraints { - tmp.Reset() - vlen := len(v) - for kk, c := range v { - tmp.WriteString(c.string()) - - // Space separate the AND conditions - if vlen > 1 && kk < vlen-1 { - tmp.WriteString(" ") - } - } - buf[k] = tmp.String() - } - - return strings.Join(buf, " || ") -} - -var constraintOps map[string]cfunc -var constraintRegex *regexp.Regexp -var constraintRangeRegex *regexp.Regexp - -// Used to find individual constraints within a multi-constraint string -var findConstraintRegex *regexp.Regexp - -// Used to validate an segment of ANDs is valid -var validConstraintRegex *regexp.Regexp - -const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -func init() { - constraintOps = map[string]cfunc{ - "": constraintTildeOrEqual, - "=": constraintTildeOrEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "=>": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "=<": constraintLessThanEqual, - "~": constraintTilde, - "~>": constraintTilde, - "^": constraintCaret, - } - - ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` - - constraintRegex = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - ops, - cvRegex)) - - constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( - `\s*(%s)\s+-\s+(%s)\s*`, - cvRegex, cvRegex)) - - findConstraintRegex = regexp.MustCompile(fmt.Sprintf( - `(%s)\s*(%s)`, - ops, - cvRegex)) - - validConstraintRegex = regexp.MustCompile(fmt.Sprintf( - `^(\s*(%s)\s*(%s)\s*\,?)+$`, - ops, - cvRegex)) -} - -// An individual constraint -type constraint struct { - // The version used in the constraint check. For example, if a constraint - // is '<= 2.0.0' the con a version instance representing 2.0.0. - con *Version - - // The original parsed version (e.g., 4.x from != 4.x) - orig string - - // The original operator for the constraint - origfunc string - - // When an x is used as part of the version (e.g., 1.x) - minorDirty bool - dirty bool - patchDirty bool -} - -// Check if a version meets the constraint -func (c *constraint) check(v *Version) (bool, error) { - return constraintOps[c.origfunc](v, c) -} - -// String prints an individual constraint into a string -func (c *constraint) string() string { - return c.origfunc + c.orig -} - -type cfunc func(v *Version, c *constraint) (bool, error) - -func parseConstraint(c string) (*constraint, error) { - if len(c) > 0 { - m := constraintRegex.FindStringSubmatch(c) - if m == nil { - return nil, fmt.Errorf("improper constraint: %s", c) - } - - cs := &constraint{ - orig: m[2], - origfunc: m[1], - } - - ver := m[2] - minorDirty := false - patchDirty := false - dirty := false - if isX(m[3]) || m[3] == "" { - ver = "0.0.0" - dirty = true - } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { - minorDirty = true - dirty = true - ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) - } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { - dirty = true - patchDirty = true - ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) - } - - con, err := NewVersion(ver) - if err != nil { - - // The constraintRegex should catch any regex parsing errors. So, - // we should never get here. - return nil, errors.New("constraint Parser Error") - } - - cs.con = con - cs.minorDirty = minorDirty - cs.patchDirty = patchDirty - cs.dirty = dirty - - return cs, nil - } - - // The rest is the special case where an empty string was passed in which - // is equivalent to * or >=0.0.0 - con, err := StrictNewVersion("0.0.0") - if err != nil { - - // The constraintRegex should catch any regex parsing errors. So, - // we should never get here. - return nil, errors.New("constraint Parser Error") - } - - cs := &constraint{ - con: con, - orig: c, - origfunc: "", - minorDirty: false, - patchDirty: false, - dirty: true, - } - return cs, nil -} - -// Constraint functions -func constraintNotEqual(v *Version, c *constraint) (bool, error) { - if c.dirty { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - if c.con.Major() != v.Major() { - return true, nil - } - if c.con.Minor() != v.Minor() && !c.minorDirty { - return true, nil - } else if c.minorDirty { - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } else if c.con.Patch() != v.Patch() && !c.patchDirty { - return true, nil - } else if c.patchDirty { - // Need to handle prereleases if present - if v.Prerelease() != "" || c.con.Prerelease() != "" { - eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } - } - - eq := v.Equal(c.con) - if eq { - return false, fmt.Errorf("%s is equal to %s", v, c.orig) - } - - return true, nil -} - -func constraintGreaterThan(v *Version, c *constraint) (bool, error) { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - var eq bool - - if !c.dirty { - eq = v.Compare(c.con) == 1 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } - - if v.Major() > c.con.Major() { - return true, nil - } else if v.Major() < c.con.Major() { - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } else if c.minorDirty { - // This is a range case such as >11. When the version is something like - // 11.1.0 is it not > 11. For that we would need 12 or higher - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } else if c.patchDirty { - // This is for ranges such as >11.1. A version of 11.1.1 is not greater - // which one of 11.2.1 is greater - eq = v.Minor() > c.con.Minor() - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) - } - - // If we have gotten here we are not comparing pre-preleases and can use the - // Compare function to accomplish that. - eq = v.Compare(c.con) == 1 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) -} - -func constraintLessThan(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - eq := v.Compare(c.con) < 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) -} - -func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { - - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - eq := v.Compare(c.con) >= 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is less than %s", v, c.orig) -} - -func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - var eq bool - - if !c.dirty { - eq = v.Compare(c.con) <= 0 - if eq { - return true, nil - } - return false, fmt.Errorf("%s is greater than %s", v, c.orig) - } - - if v.Major() > c.con.Major() { - return false, fmt.Errorf("%s is greater than %s", v, c.orig) - } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { - return false, fmt.Errorf("%s is greater than %s", v, c.orig) - } - - return true, nil -} - -// ~*, ~>* --> >= 0.0.0 (any) -// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 -// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 -// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 -// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 -// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 -func constraintTilde(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - if v.LessThan(c.con) { - return false, fmt.Errorf("%s is less than %s", v, c.orig) - } - - // ~0.0.0 is a special case where all constraints are accepted. It's - // equivalent to >= 0.0.0. - if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && - !c.minorDirty && !c.patchDirty { - return true, nil - } - - if v.Major() != c.con.Major() { - return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) - } - - if v.Minor() != c.con.Minor() && !c.minorDirty { - return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) - } - - return true, nil -} - -// When there is a .x (dirty) status it automatically opts in to ~. Otherwise -// it's a straight = -func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - if c.dirty { - return constraintTilde(v, c) - } - - eq := v.Equal(c.con) - if eq { - return true, nil - } - - return false, fmt.Errorf("%s is not equal to %s", v, c.orig) -} - -// ^* --> (any) -// ^1.2.3 --> >=1.2.3 <2.0.0 -// ^1.2 --> >=1.2.0 <2.0.0 -// ^1 --> >=1.0.0 <2.0.0 -// ^0.2.3 --> >=0.2.3 <0.3.0 -// ^0.2 --> >=0.2.0 <0.3.0 -// ^0.0.3 --> >=0.0.3 <0.0.4 -// ^0.0 --> >=0.0.0 <0.1.0 -// ^0 --> >=0.0.0 <1.0.0 -func constraintCaret(v *Version, c *constraint) (bool, error) { - // If there is a pre-release on the version but the constraint isn't looking - // for them assume that pre-releases are not compatible. See issue 21 for - // more details. - if v.Prerelease() != "" && c.con.Prerelease() == "" { - return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) - } - - // This less than handles prereleases - if v.LessThan(c.con) { - return false, fmt.Errorf("%s is less than %s", v, c.orig) - } - - var eq bool - - // ^ when the major > 0 is >=x.y.z < x+1 - if c.con.Major() > 0 || c.minorDirty { - - // ^ has to be within a major range for > 0. Everything less than was - // filtered out with the LessThan call above. This filters out those - // that greater but not within the same major range. - eq = v.Major() == c.con.Major() - if eq { - return true, nil - } - return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) - } - - // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 - if c.con.Major() == 0 && v.Major() > 0 { - return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) - } - // If the con Minor is > 0 it is not dirty - if c.con.Minor() > 0 || c.patchDirty { - eq = v.Minor() == c.con.Minor() - if eq { - return true, nil - } - return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) - } - - // At this point the major is 0 and the minor is 0 and not dirty. The patch - // is not dirty so we need to check if they are equal. If they are not equal - eq = c.con.Patch() == v.Patch() - if eq { - return true, nil - } - return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) -} - -func isX(x string) bool { - switch x { - case "x", "*", "X": - return true - default: - return false - } -} - -func rewriteRange(i string) string { - m := constraintRangeRegex.FindAllStringSubmatch(i, -1) - if m == nil { - return i - } - o := i - for _, v := range m { - t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) - o = strings.Replace(o, v[0], t, 1) - } - - return o -} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go deleted file mode 100644 index 391aa46b..00000000 --- a/vendor/github.com/Masterminds/semver/v3/doc.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. - -Specifically it provides the ability to: - - * Parse semantic versions - * Sort semantic versions - * Check if a semantic version fits within a set of constraints - * Optionally work with a `v` prefix - -Parsing Semantic Versions - -There are two functions that can parse semantic versions. The `StrictNewVersion` -function only parses valid version 2 semantic versions as outlined in the -specification. The `NewVersion` function attempts to coerce a version into a -semantic version and parse it. For example, if there is a leading v or a version -listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid -semantic version (e.g., 1.2.0). In both cases a `Version` object is returned -that can be sorted, compared, and used in constraints. - -When parsing a version an optional error can be returned if there is an issue -parsing the version. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+b345") - -The version object has methods to get the parts of the version, compare it to -other versions, convert the version back into a string, and get the original -string. For more details please see the documentation -at https://godoc.org/github.com/Masterminds/semver. - -Sorting Semantic Versions - -A set of versions can be sorted using the `sort` package from the standard library. -For example, - - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) - -Checking Version Constraints and Comparing Versions - -There are two methods for comparing versions. One uses comparison methods on -`Version` instances and the other is using Constraints. There are some important -differences to notes between these two methods of comparison. - -1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases - within the comparison. It will provide an answer valid with the comparison - spec section at https://semver.org/#spec-item-11 -2. When constraint checking is used for checks or validation it will follow a - different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the - ranges does not include on. If you want to have it include pre-releases a - simple solution is to include `-0` in your range. -3. Constraint ranges can have some complex rules including the shorthard use of - ~ and ^. For more details on those see the options below. - -There are differences between the two methods or checking versions because the -comparison methods on `Version` follow the specification while comparison ranges -are not part of the specification. Different packages and tools have taken it -upon themselves to come up with range rules. This has resulted in differences. -For example, npm/js and Cargo/Rust follow similar patterns which PHP has a -different pattern for ^. The comparison features in this package follow the -npm/js and Cargo/Rust lead because applications using it have followed similar -patters with their versions. - -Checking a version against version constraints is one of the most featureful -parts of the package. - - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parsable. - } - - v, err := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parsable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) - -Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma or space separated AND comparisons. These are then separated by || (OR) -comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. This can also be written as -`">= 1.2, < 3.0.0 || >= 4.2.3"` - -The basic comparisons are: - - * `=`: equal (aliased to no operator) - * `!=`: not equal - * `>`: greater than - * `<`: less than - * `>=`: greater than or equal to - * `<=`: less than or equal to - -Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - - * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` - * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` - -Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the tilde operation. For example, - - * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` - * `>= 1.2.x` is equivalent to `>= 1.2.0` - * `<= 2.x` is equivalent to `<= 3` - * `*` is equivalent to `>= 0.0.0` - -Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - - * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` - * `~1` is equivalent to `>= 1, < 2` - * `~2.3` is equivalent to `>= 2.3 < 2.4` - * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` - * `~1.x` is equivalent to `>= 1 < 2` - -Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes once a stable -(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts -as the API stability level. This is useful when comparisons of API versions as a -major change is API breaking. For example, - - * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` - * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` - * `^2.3` is equivalent to `>= 2.3, < 3` - * `^2.x` is equivalent to `>= 2.0.0, < 3` - * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` - * `^0.2` is equivalent to `>=0.2.0 <0.3.0` - * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` - * `^0.0` is equivalent to `>=0.0.0 <0.1.0` - * `^0` is equivalent to `>=0.0.0 <1.0.0` - -Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - - c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - - // Validate a version against a constraint. - a, msgs := c.Validate(v) - // a is false - for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" - } -*/ -package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go deleted file mode 100644 index a242ad70..00000000 --- a/vendor/github.com/Masterminds/semver/v3/fuzz.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build gofuzz - -package semver - -func Fuzz(data []byte) int { - d := string(data) - - // Test NewVersion - _, _ = NewVersion(d) - - // Test StrictNewVersion - _, _ = StrictNewVersion(d) - - // Test NewConstraint - _, _ = NewConstraint(d) - - // The return value should be 0 normally, 1 if the priority in future tests - // should be increased, and -1 if future tests should skip passing in that - // data. We do not have a reason to change priority so 0 is always returned. - // There are example tests that do this. - return 0 -} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go deleted file mode 100644 index d6b9cda3..00000000 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ /dev/null @@ -1,606 +0,0 @@ -package semver - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -// The compiled version of the regex created at init() is cached here so it -// only needs to be created once. -var versionRegex *regexp.Regexp - -var ( - // ErrInvalidSemVer is returned a version is found to be invalid when - // being parsed. - ErrInvalidSemVer = errors.New("Invalid Semantic Version") - - // ErrEmptyString is returned when an empty string is passed in for parsing. - ErrEmptyString = errors.New("Version string empty") - - // ErrInvalidCharacters is returned when invalid characters are found as - // part of a version - ErrInvalidCharacters = errors.New("Invalid characters in version") - - // ErrSegmentStartsZero is returned when a version segment starts with 0. - // This is invalid in SemVer. - ErrSegmentStartsZero = errors.New("Version segment starts with 0") - - // ErrInvalidMetadata is returned when the metadata is an invalid format - ErrInvalidMetadata = errors.New("Invalid Metadata string") - - // ErrInvalidPrerelease is returned when the pre-release is an invalid format - ErrInvalidPrerelease = errors.New("Invalid Prerelease string") -) - -// semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` - -// Version represents a single semantic version. -type Version struct { - major, minor, patch uint64 - pre string - metadata string - original string -} - -func init() { - versionRegex = regexp.MustCompile("^" + semVerRegex + "$") -} - -const num string = "0123456789" -const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num - -// StrictNewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. Only parses valid semantic versions. -// Performs checking that can find errors within the version. -// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x -// releases of semver provided use the NewSemver() function. -func StrictNewVersion(v string) (*Version, error) { - // Parsing here does not use RegEx in order to increase performance and reduce - // allocations. - - if len(v) == 0 { - return nil, ErrEmptyString - } - - // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build - parts := strings.SplitN(v, ".", 3) - if len(parts) != 3 { - return nil, ErrInvalidSemVer - } - - sv := &Version{ - original: v, - } - - // check for prerelease or build metadata - var extra []string - if strings.ContainsAny(parts[2], "-+") { - // Start with the build metadata first as it needs to be on the right - extra = strings.SplitN(parts[2], "+", 2) - if len(extra) > 1 { - // build metadata found - sv.metadata = extra[1] - parts[2] = extra[0] - } - - extra = strings.SplitN(parts[2], "-", 2) - if len(extra) > 1 { - // prerelease found - sv.pre = extra[1] - parts[2] = extra[0] - } - } - - // Validate the number segments are valid. This includes only having positive - // numbers and no leading 0's. - for _, p := range parts { - if !containsOnly(p, num) { - return nil, ErrInvalidCharacters - } - - if len(p) > 1 && p[0] == '0' { - return nil, ErrSegmentStartsZero - } - } - - // Extract the major, minor, and patch elements onto the returned Version - var err error - sv.major, err = strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return nil, err - } - - sv.minor, err = strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return nil, err - } - - sv.patch, err = strconv.ParseUint(parts[2], 10, 64) - if err != nil { - return nil, err - } - - // No prerelease or build metadata found so returning now as a fastpath. - if sv.pre == "" && sv.metadata == "" { - return sv, nil - } - - if sv.pre != "" { - if err = validatePrerelease(sv.pre); err != nil { - return nil, err - } - } - - if sv.metadata != "" { - if err = validateMetadata(sv.metadata); err != nil { - return nil, err - } - } - - return sv, nil -} - -// NewVersion parses a given version and returns an instance of Version or -// an error if unable to parse the version. If the version is SemVer-ish it -// attempts to convert it to SemVer. If you want to validate it was a strict -// semantic version at parse time see StrictNewVersion(). -func NewVersion(v string) (*Version, error) { - m := versionRegex.FindStringSubmatch(v) - if m == nil { - return nil, ErrInvalidSemVer - } - - sv := &Version{ - metadata: m[8], - pre: m[5], - original: v, - } - - var err error - sv.major, err = strconv.ParseUint(m[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - - if m[2] != "" { - sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - } else { - sv.minor = 0 - } - - if m[3] != "" { - sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) - if err != nil { - return nil, fmt.Errorf("Error parsing version segment: %s", err) - } - } else { - sv.patch = 0 - } - - // Perform some basic due diligence on the extra parts to ensure they are - // valid. - - if sv.pre != "" { - if err = validatePrerelease(sv.pre); err != nil { - return nil, err - } - } - - if sv.metadata != "" { - if err = validateMetadata(sv.metadata); err != nil { - return nil, err - } - } - - return sv, nil -} - -// MustParse parses a given version and panics on error. -func MustParse(v string) *Version { - sv, err := NewVersion(v) - if err != nil { - panic(err) - } - return sv -} - -// String converts a Version object to a string. -// Note, if the original version contained a leading v this version will not. -// See the Original() method to retrieve the original value. Semantic Versions -// don't contain a leading v per the spec. Instead it's optional on -// implementation. -func (v Version) String() string { - var buf bytes.Buffer - - fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original value passed in to be parsed. -func (v *Version) Original() string { - return v.original -} - -// Major returns the major version. -func (v Version) Major() uint64 { - return v.major -} - -// Minor returns the minor version. -func (v Version) Minor() uint64 { - return v.minor -} - -// Patch returns the patch version. -func (v Version) Patch() uint64 { - return v.patch -} - -// Prerelease returns the pre-release version. -func (v Version) Prerelease() string { - return v.pre -} - -// Metadata returns the metadata on the version. -func (v Version) Metadata() string { - return v.metadata -} - -// originalVPrefix returns the original 'v' prefix if any. -func (v Version) originalVPrefix() string { - - // Note, only lowercase v is supported as a prefix by the parser. - if v.original != "" && v.original[:1] == "v" { - return v.original[:1] - } - return "" -} - -// IncPatch produces the next patch version. -// If the current version does not have prerelease/metadata information, -// it unsets metadata and prerelease values, increments patch number. -// If the current version has any of prerelease or metadata information, -// it unsets both values and keeps current patch value -func (v Version) IncPatch() Version { - vNext := v - // according to http://semver.org/#spec-item-9 - // Pre-release versions have a lower precedence than the associated normal version. - // according to http://semver.org/#spec-item-10 - // Build metadata SHOULD be ignored when determining version precedence. - if v.pre != "" { - vNext.metadata = "" - vNext.pre = "" - } else { - vNext.metadata = "" - vNext.pre = "" - vNext.patch = v.patch + 1 - } - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMinor produces the next minor version. -// Sets patch to 0. -// Increments minor number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMinor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = v.minor + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// IncMajor produces the next major version. -// Sets patch to 0. -// Sets minor to 0. -// Increments major number. -// Unsets metadata. -// Unsets prerelease status. -func (v Version) IncMajor() Version { - vNext := v - vNext.metadata = "" - vNext.pre = "" - vNext.patch = 0 - vNext.minor = 0 - vNext.major = v.major + 1 - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext -} - -// SetPrerelease defines the prerelease value. -// Value must not include the required 'hyphen' prefix. -func (v Version) SetPrerelease(prerelease string) (Version, error) { - vNext := v - if len(prerelease) > 0 { - if err := validatePrerelease(prerelease); err != nil { - return vNext, err - } - } - vNext.pre = prerelease - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// SetMetadata defines metadata value. -// Value must not include the required 'plus' prefix. -func (v Version) SetMetadata(metadata string) (Version, error) { - vNext := v - if len(metadata) > 0 { - if err := validateMetadata(metadata); err != nil { - return vNext, err - } - } - vNext.metadata = metadata - vNext.original = v.originalVPrefix() + "" + vNext.String() - return vNext, nil -} - -// LessThan tests if one version is less than another one. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// GreaterThan tests if one version is greater than another one. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// Equal tests if two versions are equal to each other. -// Note, versions can be equal with different metadata since metadata -// is not considered part of the comparable version. -func (v *Version) Equal(o *Version) bool { - return v.Compare(o) == 0 -} - -// Compare compares this version to another one. It returns -1, 0, or 1 if -// the version smaller, equal, or larger than the other version. -// -// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is -// lower than the version without a prerelease. Compare always takes into account -// prereleases. If you want to work with ranges using typical range syntaxes that -// skip prereleases if the range is not looking for them use constraints. -func (v *Version) Compare(o *Version) int { - // Compare the major, minor, and patch version for differences. If a - // difference is found return the comparison. - if d := compareSegment(v.Major(), o.Major()); d != 0 { - return d - } - if d := compareSegment(v.Minor(), o.Minor()); d != 0 { - return d - } - if d := compareSegment(v.Patch(), o.Patch()); d != 0 { - return d - } - - // At this point the major, minor, and patch versions are the same. - ps := v.pre - po := o.Prerelease() - - if ps == "" && po == "" { - return 0 - } - if ps == "" { - return 1 - } - if po == "" { - return -1 - } - - return comparePrerelease(ps, po) -} - -// UnmarshalJSON implements JSON.Unmarshaler interface. -func (v *Version) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - temp, err := NewVersion(s) - if err != nil { - return err - } - v.major = temp.major - v.minor = temp.minor - v.patch = temp.patch - v.pre = temp.pre - v.metadata = temp.metadata - v.original = temp.original - return nil -} - -// MarshalJSON implements JSON.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// Scan implements the SQL.Scanner interface. -func (v *Version) Scan(value interface{}) error { - var s string - s, _ = value.(string) - temp, err := NewVersion(s) - if err != nil { - return err - } - v.major = temp.major - v.minor = temp.minor - v.patch = temp.patch - v.pre = temp.pre - v.metadata = temp.metadata - v.original = temp.original - return nil -} - -// Value implements the Driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} - -func compareSegment(v, o uint64) int { - if v < o { - return -1 - } - if v > o { - return 1 - } - - return 0 -} - -func comparePrerelease(v, o string) int { - - // split the prelease versions by their part. The separator, per the spec, - // is a . - sparts := strings.Split(v, ".") - oparts := strings.Split(o, ".") - - // Find the longer length of the parts to know how many loop iterations to - // go through. - slen := len(sparts) - olen := len(oparts) - - l := slen - if olen > slen { - l = olen - } - - // Iterate over each part of the prereleases to compare the differences. - for i := 0; i < l; i++ { - // Since the lentgh of the parts can be different we need to create - // a placeholder. This is to avoid out of bounds issues. - stemp := "" - if i < slen { - stemp = sparts[i] - } - - otemp := "" - if i < olen { - otemp = oparts[i] - } - - d := comparePrePart(stemp, otemp) - if d != 0 { - return d - } - } - - // Reaching here means two versions are of equal value but have different - // metadata (the part following a +). They are not identical in string form - // but the version comparison finds them to be equal. - return 0 -} - -func comparePrePart(s, o string) int { - // Fastpath if they are equal - if s == o { - return 0 - } - - // When s or o are empty we can use the other in an attempt to determine - // the response. - if s == "" { - if o != "" { - return -1 - } - return 1 - } - - if o == "" { - if s != "" { - return 1 - } - return -1 - } - - // When comparing strings "99" is greater than "103". To handle - // cases like this we need to detect numbers and compare them. According - // to the semver spec, numbers are always positive. If there is a - at the - // start like -99 this is to be evaluated as an alphanum. numbers always - // have precedence over alphanum. Parsing as Uints because negative numbers - // are ignored. - - oi, n1 := strconv.ParseUint(o, 10, 64) - si, n2 := strconv.ParseUint(s, 10, 64) - - // The case where both are strings compare the strings - if n1 != nil && n2 != nil { - if s > o { - return 1 - } - return -1 - } else if n1 != nil { - // o is a string and s is a number - return -1 - } else if n2 != nil { - // s is a string and o is a number - return 1 - } - // Both are numbers - if si > oi { - return 1 - } - return -1 - -} - -// Like strings.ContainsAny but does an only instead of any. -func containsOnly(s string, comp string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(comp, r) - }) == -1 -} - -// From the spec, "Identifiers MUST comprise only -// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. -// Numeric identifiers MUST NOT include leading zeroes.". These segments can -// be dot separated. -func validatePrerelease(p string) error { - eparts := strings.Split(p, ".") - for _, p := range eparts { - if containsOnly(p, num) { - if len(p) > 1 && p[0] == '0' { - return ErrSegmentStartsZero - } - } else if !containsOnly(p, allowed) { - return ErrInvalidPrerelease - } - } - - return nil -} - -// From the spec, "Build metadata MAY be denoted by -// appending a plus sign and a series of dot separated identifiers immediately -// following the patch or pre-release version. Identifiers MUST comprise only -// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." -func validateMetadata(m string) error { - eparts := strings.Split(m, ".") - for _, p := range eparts { - if !containsOnly(p, allowed) { - return ErrInvalidMetadata - } - } - return nil -} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore deleted file mode 100644 index 5e3002f8..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vendor/ -/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md deleted file mode 100644 index fcdd4e88..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md +++ /dev/null @@ -1,370 +0,0 @@ -# Changelog - -## Release 3.2.1 (2021-02-04) - -### Changed - -- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) - -## Release 3.2.0 (2020-12-14) - -### Added - -- #211: Added randInt function (thanks @kochurovro) -- #223: Added fromJson and mustFromJson functions (thanks @mholt) -- #242: Added a bcrypt function (thanks @robbiet480) -- #253: Added randBytes function (thanks @MikaelSmith) -- #254: Added dig function for dicts (thanks @nyarly) -- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) -- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) -- #268: Added and and all functions for testing conditions (thanks @phuslu) -- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf - (thanks @andrewmostello) -- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) -- #270: Extend certificate functions to handle non-RSA keys + add support for - ed25519 keys (thanks @misberner) - -### Changed - -- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer -- Using semver 3.1.1 and mergo 0.3.11 - -### Fixed - -- #249: Fix htmlDateInZone example (thanks @spawnia) - -NOTE: The dependency github.com/imdario/mergo reverted the breaking change in -0.3.9 via 0.3.10 release. - -## Release 3.1.0 (2020-04-16) - -NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 -that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. - -### Added - -- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) -- #224: Added duration filter (thanks @frebib) -- #205: Added `seq` function (thanks @thadc23) - -### Changed - -- #203: Unlambda functions with correct signature (thanks @muesli) -- #236: Updated the license formatting for GitHub display purposes -- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 - as it causes a breaking change for sprig. That issue is tracked at - https://github.com/imdario/mergo/issues/139 - -### Fixed - -- #229: Fix `seq` example in docs (thanks @kalmant) - -## Release 3.0.2 (2019-12-13) - -### Fixed - -- #220: Updating to semver v3.0.3 to fix issue with <= ranges -- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) - -## Release 3.0.1 (2019-12-08) - -### Fixed - -- #212: Updated semver fixing broken constraint checking with ^0.0 - -## Release 3.0.0 (2019-10-02) - -### Added - -- #187: Added durationRound function (thanks @yjp20) -- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) -- #193: Added toRawJson support (thanks @Dean-Coakley) -- #197: Added get support to dicts (thanks @Dean-Coakley) - -### Changed - -- #186: Moving dependency management to Go modules -- #186: Updated semver to v3. This has changes in the way ^ is handled -- #194: Updated documentation on merging and how it copies. Added example using deepCopy -- #196: trunc now supports negative values (thanks @Dean-Coakley) - -## Release 2.22.0 (2019-10-02) - -### Added - -- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) -- #195: Added deepCopy function for use with dicts - -### Changed - -- Updated merge and mergeOverwrite documentation to explain copying and how to - use deepCopy with it - -## Release 2.21.0 (2019-09-18) - -### Added - -- #122: Added encryptAES/decryptAES functions (thanks @n0madic) -- #128: Added toDecimal support (thanks @Dean-Coakley) -- #169: Added list contcat (thanks @astorath) -- #174: Added deepEqual function (thanks @bonifaido) -- #170: Added url parse and join functions (thanks @astorath) - -### Changed - -- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify - -### Fixed - -- #172: Fix semver wildcard example (thanks @piepmatz) -- #175: Fix dateInZone doc example (thanks @s3than) - -## Release 2.20.0 (2019-06-18) - -### Added - -- #164: Adding function to get unix epoch for a time (@mattfarina) -- #166: Adding tests for date_in_zone (@mattfarina) - -### Changed - -- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) -- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) -- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) - -### Fixed - -## Release 2.19.0 (2019-03-02) - -IMPORTANT: This release reverts a change from 2.18.0 - -In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. - -We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. - -### Changed - -- Fix substr panic 35fb796 (Alexey igrychev) -- Remove extra period 1eb7729 (Matthew Lorimor) -- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) -- README edits/fixes/suggestions 08fe136 (Lauri Apple) - - -## Release 2.18.0 (2019-02-12) - -### Added - -- Added mergeOverwrite function -- cryptographic functions that use secure random (see fe1de12) - -### Changed - -- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) -- Handle has for nil list 9c10885 (Daniel Cohen) -- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) -- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) -- Replace outdated goutils imports 01893d2 (Matthew Lorimor) -- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) -- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) - -### Fixed - -- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) -- Fix substr var names and comments d581f80 (Dean Coakley) -- Fix substr documentation 2737203 (Dean Coakley) - -## Release 2.17.1 (2019-01-03) - -### Fixed - -The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. - -## Release 2.17.0 (2019-01-03) - -### Added - -- adds alder32sum function and test 6908fc2 (marshallford) -- Added kebabcase function ca331a1 (Ilyes512) - -### Changed - -- Update goutils to 1.1.0 4e1125d (Matt Butcher) - -### Fixed - -- Fix 'has' documentation e3f2a85 (dean-coakley) -- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) -- fixes spelling errors... not sure how that happened 4cf188a (marshallford) - -## Release 2.16.0 (2018-08-13) - -### Added - -- add splitn function fccb0b0 (Helgi Þorbjörnsson) -- Add slice func df28ca7 (gongdo) -- Generate serial number a3bdffd (Cody Coons) -- Extract values of dict with values function df39312 (Lawrence Jones) - -### Changed - -- Modify panic message for list.slice ae38335 (gongdo) -- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) -- Remove duplicated documentation 1d97af1 (Matthew Fisher) -- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) - -### Fixed - -- Fix file permissions c5f40b5 (gongdo) -- Fix example for buildCustomCert 7779e0d (Tin Lam) - -## Release 2.15.0 (2018-04-02) - -### Added - -- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) -- #66: Add ternary function (thanks @binoculars) -- #67: Allow keys function to take multiple dicts (thanks @binoculars) -- #89: Added sha1sum to crypto function (thanks @benkeil) -- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) -- #92: Add travis testing for go 1.10 -- #93: Adding appveyor config for windows testing - -### Changed - -- #90: Updating to more recent dependencies -- #73: replace satori/go.uuid with google/uuid (thanks @petterw) - -### Fixed - -- #76: Fixed documentation typos (thanks @Thiht) -- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older - -## Release 2.14.1 (2017-12-01) - -### Fixed - -- #60: Fix typo in function name documentation (thanks @neil-ca-moore) -- #61: Removing line with {{ due to blocking github pages genertion -- #64: Update the list functions to handle int, string, and other slices for compatibility - -## Release 2.14.0 (2017-10-06) - -This new version of Sprig adds a set of functions for generating and working with SSL certificates. - -- `genCA` generates an SSL Certificate Authority -- `genSelfSignedCert` generates an SSL self-signed certificate -- `genSignedCert` generates an SSL certificate and key based on a given CA - -## Release 2.13.0 (2017-09-18) - -This release adds new functions, including: - -- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions -- `floor`, `ceil`, and `round` math functions -- `toDate` converts a string to a date -- `nindent` is just like `indent` but also prepends a new line -- `ago` returns the time from `time.Now` - -### Added - -- #40: Added basic regex functionality (thanks @alanquillin) -- #41: Added ceil floor and round functions (thanks @alanquillin) -- #48: Added toDate function (thanks @andreynering) -- #50: Added nindent function (thanks @binoculars) -- #46: Added ago function (thanks @slayer) - -### Changed - -- #51: Updated godocs to include new string functions (thanks @curtisallen) -- #49: Added ability to merge multiple dicts (thanks @binoculars) - -## Release 2.12.0 (2017-05-17) - -- `snakecase`, `camelcase`, and `shuffle` are three new string functions -- `fail` allows you to bail out of a template render when conditions are not met - -## Release 2.11.0 (2017-05-02) - -- Added `toJson` and `toPrettyJson` -- Added `merge` -- Refactored documentation - -## Release 2.10.0 (2017-03-15) - -- Added `semver` and `semverCompare` for Semantic Versions -- `list` replaces `tuple` -- Fixed issue with `join` -- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` - -## Release 2.9.0 (2017-02-23) - -- Added `splitList` to split a list -- Added crypto functions of `genPrivateKey` and `derivePassword` - -## Release 2.8.0 (2016-12-21) - -- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) -- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) - -## Release 2.7.0 (2016-12-01) - -- Added `sha256sum` to generate a hash of an input -- Added functions to convert a numeric or string to `int`, `int64`, `float64` - -## Release 2.6.0 (2016-10-03) - -- Added a `uuidv4` template function for generating UUIDs inside of a template. - -## Release 2.5.0 (2016-08-19) - -- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions -- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) -- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 - -## Release 2.4.0 (2016-08-16) - -- Adds two functions: `until` and `untilStep` - -## Release 2.3.0 (2016-06-21) - -- cat: Concatenate strings with whitespace separators. -- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" -- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" -- indent: Indent blocks of text in a way that is sensitive to "\n" characters. - -## Release 2.2.0 (2016-04-21) - -- Added a `genPrivateKey` function (Thanks @bacongobbler) - -## Release 2.1.0 (2016-03-30) - -- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. -- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. - -## Release 2.0.0 (2016-03-29) - -Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. - -- `min` complements `max` (formerly `biggest`) -- `empty` indicates that a value is the empty value for its type -- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` -- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` -- Date formatters have been added for HTML dates (as used in `date` input fields) -- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). - -## Release 1.2.0 (2016-02-01) - -- Added quote and squote -- Added b32enc and b32dec -- add now takes varargs -- biggest now takes varargs - -## Release 1.1.0 (2015-12-29) - -- Added #4: Added contains function. strings.Contains, but with the arguments - switched to simplify common pipelines. (thanks krancour) -- Added Travis-CI testing support - -## Release 1.0.0 (2015-12-23) - -- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt deleted file mode 100644 index f311b1ea..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2013-2020 Masterminds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile deleted file mode 100644 index 78d409cd..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -.PHONY: test -test: - @echo "==> Running tests" - GO111MODULE=on go test -v - -.PHONY: test-cover -test-cover: - @echo "==> Running Tests with coverage" - GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md deleted file mode 100644 index c37ba01c..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# Sprig: Template functions for Go templates - -[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) -[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) -[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) - -The Go language comes with a [built-in template -language](http://golang.org/pkg/text/template/), but not -very many template functions. Sprig is a library that provides more than 100 commonly -used template functions. - -It is inspired by the template functions found in -[Twig](http://twig.sensiolabs.org/documentation) and in various -JavaScript libraries, such as [underscore.js](http://underscorejs.org/). - -## IMPORTANT NOTES - -Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In -its v0.3.9 release there was a behavior change that impacts merging template -functions in sprig. It is currently recommended to use v0.3.8 of that package. -Using v0.3.9 will cause sprig tests to fail. The issue in mergo is tracked at -https://github.com/imdario/mergo/issues/139. - -## Package Versions - -There are two active major versions of the `sprig` package. - -* v3 is currently stable release series on the `master` branch. The Go API should - remain compatible with v2, the current stable version. Behavior change behind - some functions is the reason for the new major version. -* v2 is the previous stable release series. It has been more than three years since - the initial release of v2. You can read the documentation and see the code - on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. - Bug fixes to this major version will continue for some time. - -## Usage - -**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for -detailed instructions and code snippets for the >100 template functions available. - -**Go developers**: If you'd like to include Sprig as a library in your program, -our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). - -For standard usage, read on. - -### Load the Sprig library - -To load the Sprig `FuncMap`: - -```go - -import ( - "github.com/Masterminds/sprig" - "html/template" -) - -// This example illustrates that the FuncMap *must* be set before the -// templates themselves are loaded. -tpl := template.Must( - template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") -) - - -``` - -### Calling the functions inside of templates - -By convention, all functions are lowercase. This seems to follow the Go -idiom for template functions (as opposed to template methods, which are -TitleCase). For example, this: - -``` -{{ "hello!" | upper | repeat 5 }} -``` - -produces this: - -``` -HELLO!HELLO!HELLO!HELLO!HELLO! -``` - -## Principles Driving Our Function Selection - -We followed these principles to decide which functions to add and how to implement them: - -- Use template functions to build layout. The following - types of operations are within the domain of template functions: - - Formatting - - Layout - - Simple type conversions - - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) -- Template functions should not return errors unless there is no way to print - a sensible value. For example, converting a string to an integer should not - produce an error if conversion fails. Instead, it should display a default - value. -- Simple math is necessary for grid layouts, pagers, and so on. Complex math - (anything other than arithmetic) should be done outside of templates. -- Template functions only deal with the data passed into them. They never retrieve - data from a source. -- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go deleted file mode 100644 index 13a5cd55..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/crypto.go +++ /dev/null @@ -1,653 +0,0 @@ -package sprig - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/hmac" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "hash/adler32" - "io" - "math/big" - "net" - "time" - - "strings" - - "github.com/google/uuid" - bcrypt_lib "golang.org/x/crypto/bcrypt" - "golang.org/x/crypto/scrypt" -) - -func sha256sum(input string) string { - hash := sha256.Sum256([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func sha1sum(input string) string { - hash := sha1.Sum([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func adler32sum(input string) string { - hash := adler32.Checksum([]byte(input)) - return fmt.Sprintf("%d", hash) -} - -func bcrypt(input string) string { - hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) - if err != nil { - return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) - } - - return string(hash) -} - -func htpasswd(username string, password string) string { - if strings.Contains(username, ":") { - return fmt.Sprintf("invalid username: %s", username) - } - return fmt.Sprintf("%s:%s", username, bcrypt(password)) -} - -func randBytes(count int) (string, error) { - buf := make([]byte, count) - if _, err := rand.Read(buf); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(buf), nil -} - -// uuidv4 provides a safe and secure UUID v4 implementation -func uuidv4() string { - return uuid.New().String() -} - -var masterPasswordSeed = "com.lyndir.masterpassword" - -var passwordTypeTemplates = map[string][][]byte{ - "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, - "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), - []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), - []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), - []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), - []byte("CvccCvcvCvccno")}, - "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, - "short": {[]byte("Cvcn")}, - "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, - "pin": {[]byte("nnnn")}, -} - -var templateCharacters = map[byte]string{ - 'V': "AEIOU", - 'C': "BCDFGHJKLMNPQRSTVWXYZ", - 'v': "aeiou", - 'c': "bcdfghjklmnpqrstvwxyz", - 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", - 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", - 'n': "0123456789", - 'o': "@&%?,=[]_:-+*$#!'^~;()/.", - 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", -} - -func derivePassword(counter uint32, passwordType, password, user, site string) string { - var templates = passwordTypeTemplates[passwordType] - if templates == nil { - return fmt.Sprintf("cannot find password template %s", passwordType) - } - - var buffer bytes.Buffer - buffer.WriteString(masterPasswordSeed) - binary.Write(&buffer, binary.BigEndian, uint32(len(user))) - buffer.WriteString(user) - - salt := buffer.Bytes() - key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) - if err != nil { - return fmt.Sprintf("failed to derive password: %s", err) - } - - buffer.Truncate(len(masterPasswordSeed)) - binary.Write(&buffer, binary.BigEndian, uint32(len(site))) - buffer.WriteString(site) - binary.Write(&buffer, binary.BigEndian, counter) - - var hmacv = hmac.New(sha256.New, key) - hmacv.Write(buffer.Bytes()) - var seed = hmacv.Sum(nil) - var temp = templates[int(seed[0])%len(templates)] - - buffer.Truncate(0) - for i, element := range temp { - passChars := templateCharacters[element] - passChar := passChars[int(seed[i+1])%len(passChars)] - buffer.WriteByte(passChar) - } - - return buffer.String() -} - -func generatePrivateKey(typ string) string { - var priv interface{} - var err error - switch typ { - case "", "rsa": - // good enough for government work - priv, err = rsa.GenerateKey(rand.Reader, 4096) - case "dsa": - key := new(dsa.PrivateKey) - // again, good enough for government work - if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { - return fmt.Sprintf("failed to generate dsa params: %s", err) - } - err = dsa.GenerateKey(key, rand.Reader) - priv = key - case "ecdsa": - // again, good enough for government work - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case "ed25519": - _, priv, err = ed25519.GenerateKey(rand.Reader) - default: - return "Unknown type " + typ - } - if err != nil { - return fmt.Sprintf("failed to generate private key: %s", err) - } - - return string(pem.EncodeToMemory(pemBlockForKey(priv))) -} - -// DSAKeyFormat stores the format for DSA keys. -// Used by pemBlockForKey -type DSAKeyFormat struct { - Version int - P, Q, G, Y, X *big.Int -} - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} - case *dsa.PrivateKey: - val := DSAKeyFormat{ - P: k.P, Q: k.Q, G: k.G, - Y: k.Y, X: k.X, - } - bytes, _ := asn1.Marshal(val) - return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} - case *ecdsa.PrivateKey: - b, _ := x509.MarshalECPrivateKey(k) - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - // attempt PKCS#8 format for all other keys - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil - } - return &pem.Block{Type: "PRIVATE KEY", Bytes: b} - } -} - -func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { - block, _ := pem.Decode([]byte(pemBlock)) - if block == nil { - return nil, errors.New("no PEM data in input") - } - - if block.Type == "PRIVATE KEY" { - priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) - } - return priv, nil - } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { - return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) - } - - switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" - case "RSA": - priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) - } - return priv, nil - case "EC": - priv, err := x509.ParseECPrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) - } - return priv, nil - case "DSA": - var k DSAKeyFormat - _, err := asn1.Unmarshal(block.Bytes, &k) - if err != nil { - return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) - } - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, Q: k.Q, G: k.G, - }, - Y: k.Y, - }, - X: k.X, - } - return priv, nil - default: - return nil, fmt.Errorf("invalid private key type %s", block.Type) - } -} - -func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { - switch k := priv.(type) { - case interface{ Public() crypto.PublicKey }: - return k.Public(), nil - case *dsa.PrivateKey: - return &k.PublicKey, nil - default: - return nil, fmt.Errorf("unable to get public key for type %T", priv) - } -} - -type certificate struct { - Cert string - Key string -} - -func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { - crt := certificate{} - - cert, err := base64.StdEncoding.DecodeString(b64cert) - if err != nil { - return crt, errors.New("unable to decode base64 certificate") - } - - key, err := base64.StdEncoding.DecodeString(b64key) - if err != nil { - return crt, errors.New("unable to decode base64 private key") - } - - decodedCert, _ := pem.Decode(cert) - if decodedCert == nil { - return crt, errors.New("unable to decode certificate") - } - _, err = x509.ParseCertificate(decodedCert.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing certificate: decodedCert.Bytes: %s", - err, - ) - } - - _, err = parsePrivateKeyPEM(string(key)) - if err != nil { - return crt, fmt.Errorf( - "error parsing private key: %s", - err, - ) - } - - crt.Cert = string(cert) - crt.Key = string(key) - - return crt, nil -} - -func generateCertificateAuthority( - cn string, - daysValid int, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - - return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -} - -func generateCertificateAuthorityWithPEMKey( - cn string, - daysValid int, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -} - -func generateCertificateAuthorityWithKeyInternal( - cn string, - daysValid int, - priv crypto.PrivateKey, -) (certificate, error) { - ca := certificate{} - - template, err := getBaseCertTemplate(cn, nil, nil, daysValid) - if err != nil { - return ca, err - } - // Override KeyUsage and IsCA - template.KeyUsage = x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | - x509.KeyUsageCertSign - template.IsCA = true - - ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) - - return ca, err -} - -func generateSelfSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -} - -func generateSelfSignedCertificateWithPEMKey( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -} - -func generateSelfSignedCertificateWithKeyInternal( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - priv crypto.PrivateKey, -) (certificate, error) { - cert := certificate{} - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) - - return cert, err -} - -func generateSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -} - -func generateSignedCertificateWithPEMKey( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -} - -func generateSignedCertificateWithKeyInternal( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, - priv crypto.PrivateKey, -) (certificate, error) { - cert := certificate{} - - decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) - if decodedSignerCert == nil { - return cert, errors.New("unable to decode certificate") - } - signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing certificate: decodedSignerCert.Bytes: %s", - err, - ) - } - signerKey, err := parsePrivateKeyPEM(ca.Key) - if err != nil { - return cert, fmt.Errorf( - "error parsing private key: %s", - err, - ) - } - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - cert.Cert, cert.Key, err = getCertAndKey( - template, - priv, - signerCert, - signerKey, - ) - - return cert, err -} - -func getCertAndKey( - template *x509.Certificate, - signeeKey crypto.PrivateKey, - parent *x509.Certificate, - signingKey crypto.PrivateKey, -) (string, string, error) { - signeePubKey, err := getPublicKey(signeeKey) - if err != nil { - return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) - } - derBytes, err := x509.CreateCertificate( - rand.Reader, - template, - parent, - signeePubKey, - signingKey, - ) - if err != nil { - return "", "", fmt.Errorf("error creating certificate: %s", err) - } - - certBuffer := bytes.Buffer{} - if err := pem.Encode( - &certBuffer, - &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) - } - - keyBuffer := bytes.Buffer{} - if err := pem.Encode( - &keyBuffer, - pemBlockForKey(signeeKey), - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding key: %s", err) - } - - return certBuffer.String(), keyBuffer.String(), nil -} - -func getBaseCertTemplate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (*x509.Certificate, error) { - ipAddresses, err := getNetIPs(ips) - if err != nil { - return nil, err - } - dnsNames, err := getAlternateDNSStrs(alternateDNS) - if err != nil { - return nil, err - } - serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) - if err != nil { - return nil, err - } - return &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: cn, - }, - IPAddresses: ipAddresses, - DNSNames: dnsNames, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - BasicConstraintsValid: true, - }, nil -} - -func getNetIPs(ips []interface{}) ([]net.IP, error) { - if ips == nil { - return []net.IP{}, nil - } - var ipStr string - var ok bool - var netIP net.IP - netIPs := make([]net.IP, len(ips)) - for i, ip := range ips { - ipStr, ok = ip.(string) - if !ok { - return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) - } - netIP = net.ParseIP(ipStr) - if netIP == nil { - return nil, fmt.Errorf("error parsing ip: %s", ipStr) - } - netIPs[i] = netIP - } - return netIPs, nil -} - -func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { - if alternateDNS == nil { - return []string{}, nil - } - var dnsStr string - var ok bool - alternateDNSStrs := make([]string, len(alternateDNS)) - for i, dns := range alternateDNS { - dnsStr, ok = dns.(string) - if !ok { - return nil, fmt.Errorf( - "error processing alternate dns name: %v is not a string", - dns, - ) - } - alternateDNSStrs[i] = dnsStr - } - return alternateDNSStrs, nil -} - -func encryptAES(password string, plaintext string) (string, error) { - if plaintext == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - content := []byte(plaintext) - blockSize := block.BlockSize() - padding := blockSize - len(content)%blockSize - padtext := bytes.Repeat([]byte{byte(padding)}, padding) - content = append(content, padtext...) - - ciphertext := make([]byte, aes.BlockSize+len(content)) - - iv := ciphertext[:aes.BlockSize] - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return "", err - } - - mode := cipher.NewCBCEncrypter(block, iv) - mode.CryptBlocks(ciphertext[aes.BlockSize:], content) - - return base64.StdEncoding.EncodeToString(ciphertext), nil -} - -func decryptAES(password string, crypt64 string) (string, error) { - if crypt64 == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - - crypt, err := base64.StdEncoding.DecodeString(crypt64) - if err != nil { - return "", err - } - - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - iv := crypt[:aes.BlockSize] - crypt = crypt[aes.BlockSize:] - decrypted := make([]byte, len(crypt)) - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(decrypted, crypt) - - return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil -} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go deleted file mode 100644 index ed022dda..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/date.go +++ /dev/null @@ -1,152 +0,0 @@ -package sprig - -import ( - "strconv" - "time" -) - -// Given a format and a date, format the date string. -// -// Date can be a `time.Time` or an `int, int32, int64`. -// In the later case, it is treated as seconds since UNIX -// epoch. -func date(fmt string, date interface{}) string { - return dateInZone(fmt, date, "Local") -} - -func htmlDate(date interface{}) string { - return dateInZone("2006-01-02", date, "Local") -} - -func htmlDateInZone(date interface{}, zone string) string { - return dateInZone("2006-01-02", date, zone) -} - -func dateInZone(fmt string, date interface{}, zone string) string { - var t time.Time - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case *time.Time: - t = *date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - case int32: - t = time.Unix(int64(date), 0) - } - - loc, err := time.LoadLocation(zone) - if err != nil { - loc, _ = time.LoadLocation("UTC") - } - - return t.In(loc).Format(fmt) -} - -func dateModify(fmt string, date time.Time) time.Time { - d, err := time.ParseDuration(fmt) - if err != nil { - return date - } - return date.Add(d) -} - -func mustDateModify(fmt string, date time.Time) (time.Time, error) { - d, err := time.ParseDuration(fmt) - if err != nil { - return time.Time{}, err - } - return date.Add(d), nil -} - -func dateAgo(date interface{}) string { - var t time.Time - - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - } - // Drop resolution to seconds - duration := time.Since(t).Round(time.Second) - return duration.String() -} - -func duration(sec interface{}) string { - var n int64 - switch value := sec.(type) { - default: - n = 0 - case string: - n, _ = strconv.ParseInt(value, 10, 64) - case int64: - n = value - } - return (time.Duration(n) * time.Second).String() -} - -func durationRound(duration interface{}) string { - var d time.Duration - switch duration := duration.(type) { - default: - d = 0 - case string: - d, _ = time.ParseDuration(duration) - case int64: - d = time.Duration(duration) - case time.Time: - d = time.Since(duration) - } - - u := uint64(d) - neg := d < 0 - if neg { - u = -u - } - - var ( - year = uint64(time.Hour) * 24 * 365 - month = uint64(time.Hour) * 24 * 30 - day = uint64(time.Hour) * 24 - hour = uint64(time.Hour) - minute = uint64(time.Minute) - second = uint64(time.Second) - ) - switch { - case u > year: - return strconv.FormatUint(u/year, 10) + "y" - case u > month: - return strconv.FormatUint(u/month, 10) + "mo" - case u > day: - return strconv.FormatUint(u/day, 10) + "d" - case u > hour: - return strconv.FormatUint(u/hour, 10) + "h" - case u > minute: - return strconv.FormatUint(u/minute, 10) + "m" - case u > second: - return strconv.FormatUint(u/second, 10) + "s" - } - return "0s" -} - -func toDate(fmt, str string) time.Time { - t, _ := time.ParseInLocation(fmt, str, time.Local) - return t -} - -func mustToDate(fmt, str string) (time.Time, error) { - return time.ParseInLocation(fmt, str, time.Local) -} - -func unixEpoch(date time.Time) string { - return strconv.FormatInt(date.Unix(), 10) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go deleted file mode 100644 index b9f97966..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/defaults.go +++ /dev/null @@ -1,163 +0,0 @@ -package sprig - -import ( - "bytes" - "encoding/json" - "math/rand" - "reflect" - "strings" - "time" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// dfault checks whether `given` is set, and returns default if not set. -// -// This returns `d` if `given` appears not to be set, and `given` otherwise. -// -// For numeric types 0 is unset. -// For strings, maps, arrays, and slices, len() = 0 is considered unset. -// For bool, false is unset. -// Structs are never considered unset. -// -// For everything else, including pointers, a nil value is unset. -func dfault(d interface{}, given ...interface{}) interface{} { - - if empty(given) || empty(given[0]) { - return d - } - return given[0] -} - -// empty returns true if the given value has the zero value for its type. -func empty(given interface{}) bool { - g := reflect.ValueOf(given) - if !g.IsValid() { - return true - } - - // Basically adapted from text/template.isTrue - switch g.Kind() { - default: - return g.IsNil() - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return g.Len() == 0 - case reflect.Bool: - return !g.Bool() - case reflect.Complex64, reflect.Complex128: - return g.Complex() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return g.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return g.Uint() == 0 - case reflect.Float32, reflect.Float64: - return g.Float() == 0 - case reflect.Struct: - return false - } -} - -// coalesce returns the first non-empty value. -func coalesce(v ...interface{}) interface{} { - for _, val := range v { - if !empty(val) { - return val - } - } - return nil -} - -// all returns true if empty(x) is false for all values x in the list. -// If the list is empty, return true. -func all(v ...interface{}) bool { - for _, val := range v { - if empty(val) { - return false - } - } - return true -} - -// any returns true if empty(x) is false for any x in the list. -// If the list is empty, return false. -func any(v ...interface{}) bool { - for _, val := range v { - if !empty(val) { - return true - } - } - return false -} - -// fromJson decodes JSON into a structured value, ignoring errors. -func fromJson(v string) interface{} { - output, _ := mustFromJson(v) - return output -} - -// mustFromJson decodes JSON into a structured value, returning errors. -func mustFromJson(v string) (interface{}, error) { - var output interface{} - err := json.Unmarshal([]byte(v), &output) - return output, err -} - -// toJson encodes an item into a JSON string -func toJson(v interface{}) string { - output, _ := json.Marshal(v) - return string(output) -} - -func mustToJson(v interface{}) (string, error) { - output, err := json.Marshal(v) - if err != nil { - return "", err - } - return string(output), nil -} - -// toPrettyJson encodes an item into a pretty (indented) JSON string -func toPrettyJson(v interface{}) string { - output, _ := json.MarshalIndent(v, "", " ") - return string(output) -} - -func mustToPrettyJson(v interface{}) (string, error) { - output, err := json.MarshalIndent(v, "", " ") - if err != nil { - return "", err - } - return string(output), nil -} - -// toRawJson encodes an item into a JSON string with no escaping of HTML characters. -func toRawJson(v interface{}) string { - output, err := mustToRawJson(v) - if err != nil { - panic(err) - } - return string(output) -} - -// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. -func mustToRawJson(v interface{}) (string, error) { - buf := new(bytes.Buffer) - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err := enc.Encode(&v) - if err != nil { - return "", err - } - return strings.TrimSuffix(buf.String(), "\n"), nil -} - -// ternary returns the first value if the last value is true, otherwise returns the second value. -func ternary(vt interface{}, vf interface{}, v bool) interface{} { - if v { - return vt - } - - return vf -} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go deleted file mode 100644 index ade88969..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/dict.go +++ /dev/null @@ -1,174 +0,0 @@ -package sprig - -import ( - "github.com/imdario/mergo" - "github.com/mitchellh/copystructure" -) - -func get(d map[string]interface{}, key string) interface{} { - if val, ok := d[key]; ok { - return val - } - return "" -} - -func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { - d[key] = value - return d -} - -func unset(d map[string]interface{}, key string) map[string]interface{} { - delete(d, key) - return d -} - -func hasKey(d map[string]interface{}, key string) bool { - _, ok := d[key] - return ok -} - -func pluck(key string, d ...map[string]interface{}) []interface{} { - res := []interface{}{} - for _, dict := range d { - if val, ok := dict[key]; ok { - res = append(res, val) - } - } - return res -} - -func keys(dicts ...map[string]interface{}) []string { - k := []string{} - for _, dict := range dicts { - for key := range dict { - k = append(k, key) - } - } - return k -} - -func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - for _, k := range keys { - if v, ok := dict[k]; ok { - res[k] = v - } - } - return res -} - -func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - - omit := make(map[string]bool, len(keys)) - for _, k := range keys { - omit[k] = true - } - - for k, v := range dict { - if _, ok := omit[k]; !ok { - res[k] = v - } - } - return res -} - -func dict(v ...interface{}) map[string]interface{} { - dict := map[string]interface{}{} - lenv := len(v) - for i := 0; i < lenv; i += 2 { - key := strval(v[i]) - if i+1 >= lenv { - dict[key] = "" - continue - } - dict[key] = v[i+1] - } - return dict -} - -func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - return nil, err - } - } - return dst, nil -} - -func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - return nil, err - } - } - return dst, nil -} - -func values(dict map[string]interface{}) []interface{} { - values := []interface{}{} - for _, value := range dict { - values = append(values, value) - } - - return values -} - -func deepCopy(i interface{}) interface{} { - c, err := mustDeepCopy(i) - if err != nil { - panic("deepCopy error: " + err.Error()) - } - - return c -} - -func mustDeepCopy(i interface{}) (interface{}, error) { - return copystructure.Copy(i) -} - -func dig(ps ...interface{}) (interface{}, error) { - if len(ps) < 3 { - panic("dig needs at least three arguments") - } - dict := ps[len(ps)-1].(map[string]interface{}) - def := ps[len(ps)-2] - ks := make([]string, len(ps)-2) - for i := 0; i < len(ks); i++ { - ks[i] = ps[i].(string) - } - - return digFromDict(dict, def, ks) -} - -func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { - k, ns := ks[0], ks[1:len(ks)] - step, has := dict[k] - if !has { - return d, nil - } - if len(ns) == 0 { - return step, nil - } - return digFromDict(step.(map[string]interface{}), d, ns) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go deleted file mode 100644 index aabb9d44..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package sprig provides template functions for Go. - -This package contains a number of utility functions for working with data -inside of Go `html/template` and `text/template` files. - -To add these functions, use the `template.Funcs()` method: - - t := templates.New("foo").Funcs(sprig.FuncMap()) - -Note that you should add the function map before you parse any template files. - - In several cases, Sprig reverses the order of arguments from the way they - appear in the standard library. This is to make it easier to pipe - arguments into functions. - -See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. -*/ -package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go deleted file mode 100644 index 57fcec1d..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/functions.go +++ /dev/null @@ -1,382 +0,0 @@ -package sprig - -import ( - "errors" - "html/template" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - ttemplate "text/template" - "time" - - util "github.com/Masterminds/goutils" - "github.com/huandu/xstrings" - "github.com/shopspring/decimal" -) - -// FuncMap produces the function map. -// -// Use this to pass the functions into the template engine: -// -// tpl := template.New("foo").Funcs(sprig.FuncMap())) -// -func FuncMap() template.FuncMap { - return HtmlFuncMap() -} - -// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. -func HermeticTxtFuncMap() ttemplate.FuncMap { - r := TxtFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. -func HermeticHtmlFuncMap() template.FuncMap { - r := HtmlFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// TxtFuncMap returns a 'text/template'.FuncMap -func TxtFuncMap() ttemplate.FuncMap { - return ttemplate.FuncMap(GenericFuncMap()) -} - -// HtmlFuncMap returns an 'html/template'.Funcmap -func HtmlFuncMap() template.FuncMap { - return template.FuncMap(GenericFuncMap()) -} - -// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. -func GenericFuncMap() map[string]interface{} { - gfm := make(map[string]interface{}, len(genericMap)) - for k, v := range genericMap { - gfm[k] = v - } - return gfm -} - -// These functions are not guaranteed to evaluate to the same result for given input, because they -// refer to the environment or global state. -var nonhermeticFunctions = []string{ - // Date functions - "date", - "date_in_zone", - "date_modify", - "now", - "htmlDate", - "htmlDateInZone", - "dateInZone", - "dateModify", - - // Strings - "randAlphaNum", - "randAlpha", - "randAscii", - "randNumeric", - "randBytes", - "uuidv4", - - // OS - "env", - "expandenv", - - // Network - "getHostByName", -} - -var genericMap = map[string]interface{}{ - "hello": func() string { return "Hello!" }, - - // Date functions - "ago": dateAgo, - "date": date, - "date_in_zone": dateInZone, - "date_modify": dateModify, - "dateInZone": dateInZone, - "dateModify": dateModify, - "duration": duration, - "durationRound": durationRound, - "htmlDate": htmlDate, - "htmlDateInZone": htmlDateInZone, - "must_date_modify": mustDateModify, - "mustDateModify": mustDateModify, - "mustToDate": mustToDate, - "now": time.Now, - "toDate": toDate, - "unixEpoch": unixEpoch, - - // Strings - "abbrev": abbrev, - "abbrevboth": abbrevboth, - "trunc": trunc, - "trim": strings.TrimSpace, - "upper": strings.ToUpper, - "lower": strings.ToLower, - "title": strings.Title, - "untitle": untitle, - "substr": substring, - // Switch order so that "foo" | repeat 5 - "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, - // Deprecated: Use trimAll. - "trimall": func(a, b string) string { return strings.Trim(b, a) }, - // Switch order so that "$foo" | trimall "$" - "trimAll": func(a, b string) string { return strings.Trim(b, a) }, - "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, - "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, - "nospace": util.DeleteWhiteSpace, - "initials": initials, - "randAlphaNum": randAlphaNumeric, - "randAlpha": randAlpha, - "randAscii": randAscii, - "randNumeric": randNumeric, - "swapcase": util.SwapCase, - "shuffle": xstrings.Shuffle, - "snakecase": xstrings.ToSnakeCase, - "camelcase": xstrings.ToCamelCase, - "kebabcase": xstrings.ToKebabCase, - "wrap": func(l int, s string) string { return util.Wrap(s, l) }, - "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, - // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, - "adler32sum": adler32sum, - "toString": strval, - - // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, - "seq": seq, - "toDecimal": toDecimal, - - //"gt": func(a, b int) bool {return a > b}, - //"gte": func(a, b int) bool {return a >= b}, - //"lt": func(a, b int) bool {return a < b}, - //"lte": func(a, b int) bool {return a <= b}, - - // split "/" foo/bar returns map[int]string{0: foo, 1: bar} - "split": split, - "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, - // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} - "splitn": splitn, - "toStrings": strslice, - - "until": until, - "untilStep": untilStep, - - // VERY basic arithmetic. - "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, - "add": func(i ...interface{}) int64 { - var a int64 = 0 - for _, b := range i { - a += toInt64(b) - } - return a - }, - "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, - "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, - "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, - "mul": func(a interface{}, v ...interface{}) int64 { - val := toInt64(a) - for _, b := range v { - val = val * toInt64(b) - } - return val - }, - "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, - "add1f": func(i interface{}) float64 { - return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) - }, - "addf": func(i ...interface{}) float64 { - a := interface{}(float64(0)) - return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) - }, - "subf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) - }, - "divf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) - }, - "mulf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) - }, - "biggest": max, - "max": max, - "min": min, - "maxf": maxf, - "minf": minf, - "ceil": ceil, - "floor": floor, - "round": round, - - // string slices. Note that we reverse the order b/c that's better - // for template processing. - "join": join, - "sortAlpha": sortAlpha, - - // Defaults - "default": dfault, - "empty": empty, - "coalesce": coalesce, - "all": all, - "any": any, - "compact": compact, - "mustCompact": mustCompact, - "fromJson": fromJson, - "toJson": toJson, - "toPrettyJson": toPrettyJson, - "toRawJson": toRawJson, - "mustFromJson": mustFromJson, - "mustToJson": mustToJson, - "mustToPrettyJson": mustToPrettyJson, - "mustToRawJson": mustToRawJson, - "ternary": ternary, - "deepCopy": deepCopy, - "mustDeepCopy": mustDeepCopy, - - // Reflection - "typeOf": typeOf, - "typeIs": typeIs, - "typeIsLike": typeIsLike, - "kindOf": kindOf, - "kindIs": kindIs, - "deepEqual": reflect.DeepEqual, - - // OS: - "env": os.Getenv, - "expandenv": os.ExpandEnv, - - // Network: - "getHostByName": getHostByName, - - // Paths: - "base": path.Base, - "dir": path.Dir, - "clean": path.Clean, - "ext": path.Ext, - "isAbs": path.IsAbs, - - // Filepaths: - "osBase": filepath.Base, - "osClean": filepath.Clean, - "osDir": filepath.Dir, - "osExt": filepath.Ext, - "osIsAbs": filepath.IsAbs, - - // Encoding: - "b64enc": base64encode, - "b64dec": base64decode, - "b32enc": base32encode, - "b32dec": base32decode, - - // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "get": get, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, - "mergeOverwrite": mergeOverwrite, - "mustMerge": mustMerge, - "mustMergeOverwrite": mustMergeOverwrite, - "values": values, - - "append": push, "push": push, - "mustAppend": mustPush, "mustPush": mustPush, - "prepend": prepend, - "mustPrepend": mustPrepend, - "first": first, - "mustFirst": mustFirst, - "rest": rest, - "mustRest": mustRest, - "last": last, - "mustLast": mustLast, - "initial": initial, - "mustInitial": mustInitial, - "reverse": reverse, - "mustReverse": mustReverse, - "uniq": uniq, - "mustUniq": mustUniq, - "without": without, - "mustWithout": mustWithout, - "has": has, - "mustHas": mustHas, - "slice": slice, - "mustSlice": mustSlice, - "concat": concat, - "dig": dig, - "chunk": chunk, - "mustChunk": mustChunk, - - // Crypto: - "bcrypt": bcrypt, - "htpasswd": htpasswd, - "genPrivateKey": generatePrivateKey, - "derivePassword": derivePassword, - "buildCustomCert": buildCustomCertificate, - "genCA": generateCertificateAuthority, - "genCAWithKey": generateCertificateAuthorityWithPEMKey, - "genSelfSignedCert": generateSelfSignedCertificate, - "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, - "genSignedCert": generateSignedCertificate, - "genSignedCertWithKey": generateSignedCertificateWithPEMKey, - "encryptAES": encryptAES, - "decryptAES": decryptAES, - "randBytes": randBytes, - - // UUIDs: - "uuidv4": uuidv4, - - // SemVer: - "semver": semver, - "semverCompare": semverCompare, - - // Flow Control: - "fail": func(msg string) (string, error) { return "", errors.New(msg) }, - - // Regex - "regexMatch": regexMatch, - "mustRegexMatch": mustRegexMatch, - "regexFindAll": regexFindAll, - "mustRegexFindAll": mustRegexFindAll, - "regexFind": regexFind, - "mustRegexFind": mustRegexFind, - "regexReplaceAll": regexReplaceAll, - "mustRegexReplaceAll": mustRegexReplaceAll, - "regexReplaceAllLiteral": regexReplaceAllLiteral, - "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, - "regexSplit": regexSplit, - "mustRegexSplit": mustRegexSplit, - "regexQuoteMeta": regexQuoteMeta, - - // URLs: - "urlParse": urlParse, - "urlJoin": urlJoin, -} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go deleted file mode 100644 index ca0fbb78..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/list.go +++ /dev/null @@ -1,464 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "reflect" - "sort" -) - -// Reflection is used in these functions so that slices and arrays of strings, -// ints, and other types not implementing []interface{} can be worked with. -// For example, this is useful if you need to work on the output of regexs. - -func list(v ...interface{}) []interface{} { - return v -} - -func push(list interface{}, v interface{}) []interface{} { - l, err := mustPush(list, v) - if err != nil { - panic(err) - } - - return l -} - -func mustPush(list interface{}, v interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append(nl, v), nil - - default: - return nil, fmt.Errorf("Cannot push on type %s", tp) - } -} - -func prepend(list interface{}, v interface{}) []interface{} { - l, err := mustPrepend(list, v) - if err != nil { - panic(err) - } - - return l -} - -func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { - //return append([]interface{}{v}, list...) - - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append([]interface{}{v}, nl...), nil - - default: - return nil, fmt.Errorf("Cannot prepend on type %s", tp) - } -} - -func chunk(size int, list interface{}) [][]interface{} { - l, err := mustChunk(size, list) - if err != nil { - panic(err) - } - - return l -} - -func mustChunk(size int, list interface{}) ([][]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - - cs := int(math.Floor(float64(l-1)/float64(size)) + 1) - nl := make([][]interface{}, cs) - - for i := 0; i < cs; i++ { - clen := size - if i == cs-1 { - clen = int(math.Floor(math.Mod(float64(l), float64(size)))) - if clen == 0 { - clen = size - } - } - - nl[i] = make([]interface{}, clen) - - for j := 0; j < clen; j++ { - ix := i*size + j - nl[i][j] = l2.Index(ix).Interface() - } - } - - return nl, nil - - default: - return nil, fmt.Errorf("Cannot chunk type %s", tp) - } -} - -func last(list interface{}) interface{} { - l, err := mustLast(list) - if err != nil { - panic(err) - } - - return l -} - -func mustLast(list interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - return l2.Index(l - 1).Interface(), nil - default: - return nil, fmt.Errorf("Cannot find last on type %s", tp) - } -} - -func first(list interface{}) interface{} { - l, err := mustFirst(list) - if err != nil { - panic(err) - } - - return l -} - -func mustFirst(list interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - return l2.Index(0).Interface(), nil - default: - return nil, fmt.Errorf("Cannot find first on type %s", tp) - } -} - -func rest(list interface{}) []interface{} { - l, err := mustRest(list) - if err != nil { - panic(err) - } - - return l -} - -func mustRest(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - nl := make([]interface{}, l-1) - for i := 1; i < l; i++ { - nl[i-1] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find rest on type %s", tp) - } -} - -func initial(list interface{}) []interface{} { - l, err := mustInitial(list) - if err != nil { - panic(err) - } - - return l -} - -func mustInitial(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - nl := make([]interface{}, l-1) - for i := 0; i < l-1; i++ { - nl[i] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find initial on type %s", tp) - } -} - -func sortAlpha(list interface{}) []string { - k := reflect.Indirect(reflect.ValueOf(list)).Kind() - switch k { - case reflect.Slice, reflect.Array: - a := strslice(list) - s := sort.StringSlice(a) - s.Sort() - return s - } - return []string{strval(list)} -} - -func reverse(v interface{}) []interface{} { - l, err := mustReverse(v) - if err != nil { - panic(err) - } - - return l -} - -func mustReverse(v interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(v).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(v) - - l := l2.Len() - // We do not sort in place because the incoming array should not be altered. - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[l-i-1] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find reverse on type %s", tp) - } -} - -func compact(list interface{}) []interface{} { - l, err := mustCompact(list) - if err != nil { - panic(err) - } - - return l -} - -func mustCompact(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !empty(item) { - nl = append(nl, item) - } - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot compact on type %s", tp) - } -} - -func uniq(list interface{}) []interface{} { - l, err := mustUniq(list) - if err != nil { - panic(err) - } - - return l -} - -func mustUniq(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - dest := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(dest, item) { - dest = append(dest, item) - } - } - - return dest, nil - default: - return nil, fmt.Errorf("Cannot find uniq on type %s", tp) - } -} - -func inList(haystack []interface{}, needle interface{}) bool { - for _, h := range haystack { - if reflect.DeepEqual(needle, h) { - return true - } - } - return false -} - -func without(list interface{}, omit ...interface{}) []interface{} { - l, err := mustWithout(list, omit...) - if err != nil { - panic(err) - } - - return l -} - -func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - res := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(omit, item) { - res = append(res, item) - } - } - - return res, nil - default: - return nil, fmt.Errorf("Cannot find without on type %s", tp) - } -} - -func has(needle interface{}, haystack interface{}) bool { - l, err := mustHas(needle, haystack) - if err != nil { - panic(err) - } - - return l -} - -func mustHas(needle interface{}, haystack interface{}) (bool, error) { - if haystack == nil { - return false, nil - } - tp := reflect.TypeOf(haystack).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(haystack) - var item interface{} - l := l2.Len() - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if reflect.DeepEqual(needle, item) { - return true, nil - } - } - - return false, nil - default: - return false, fmt.Errorf("Cannot find has on type %s", tp) - } -} - -// $list := [1, 2, 3, 4, 5] -// slice $list -> list[0:5] = list[:] -// slice $list 0 3 -> list[0:3] = list[:3] -// slice $list 3 5 -> list[3:5] -// slice $list 3 -> list[3:5] = list[3:] -func slice(list interface{}, indices ...interface{}) interface{} { - l, err := mustSlice(list, indices...) - if err != nil { - panic(err) - } - - return l -} - -func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - var start, end int - if len(indices) > 0 { - start = toInt(indices[0]) - } - if len(indices) < 2 { - end = l - } else { - end = toInt(indices[1]) - } - - return l2.Slice(start, end).Interface(), nil - default: - return nil, fmt.Errorf("list should be type of slice or array but %s", tp) - } -} - -func concat(lists ...interface{}) interface{} { - var res []interface{} - for _, list := range lists { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - for i := 0; i < l2.Len(); i++ { - res = append(res, l2.Index(i).Interface()) - } - default: - panic(fmt.Sprintf("Cannot concat type %s as list", tp)) - } - } - return res -} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go deleted file mode 100644 index 108d78a9..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/network.go +++ /dev/null @@ -1,12 +0,0 @@ -package sprig - -import ( - "math/rand" - "net" -) - -func getHostByName(name string) string { - addrs, _ := net.LookupHost(name) - //TODO: add error handing when release v3 comes out - return addrs[rand.Intn(len(addrs))] -} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go deleted file mode 100644 index f68e4182..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/numeric.go +++ /dev/null @@ -1,186 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "strconv" - "strings" - - "github.com/spf13/cast" - "github.com/shopspring/decimal" -) - -// toFloat64 converts 64-bit floats -func toFloat64(v interface{}) float64 { - return cast.ToFloat64(v) -} - -func toInt(v interface{}) int { - return cast.ToInt(v) -} - -// toInt64 converts integer types to 64-bit integers -func toInt64(v interface{}) int64 { - return cast.ToInt64(v) -} - -func max(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb > aa { - aa = bb - } - } - return aa -} - -func maxf(a interface{}, i ...interface{}) float64 { - aa := toFloat64(a) - for _, b := range i { - bb := toFloat64(b) - aa = math.Max(aa, bb) - } - return aa -} - -func min(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb < aa { - aa = bb - } - } - return aa -} - -func minf(a interface{}, i ...interface{}) float64 { - aa := toFloat64(a) - for _, b := range i { - bb := toFloat64(b) - aa = math.Min(aa, bb) - } - return aa -} - -func until(count int) []int { - step := 1 - if count < 0 { - step = -1 - } - return untilStep(0, count, step) -} - -func untilStep(start, stop, step int) []int { - v := []int{} - - if stop < start { - if step >= 0 { - return v - } - for i := start; i > stop; i += step { - v = append(v, i) - } - return v - } - - if step <= 0 { - return v - } - for i := start; i < stop; i += step { - v = append(v, i) - } - return v -} - -func floor(a interface{}) float64 { - aa := toFloat64(a) - return math.Floor(aa) -} - -func ceil(a interface{}) float64 { - aa := toFloat64(a) - return math.Ceil(aa) -} - -func round(a interface{}, p int, rOpt ...float64) float64 { - roundOn := .5 - if len(rOpt) > 0 { - roundOn = rOpt[0] - } - val := toFloat64(a) - places := toFloat64(p) - - var round float64 - pow := math.Pow(10, places) - digit := pow * val - _, div := math.Modf(digit) - if div >= roundOn { - round = math.Ceil(digit) - } else { - round = math.Floor(digit) - } - return round / pow -} - -// converts unix octal to decimal -func toDecimal(v interface{}) int64 { - result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) - if err != nil { - return 0 - } - return result -} - -func seq(params ...int) string { - increment := 1 - switch len(params) { - case 0: - return "" - case 1: - start := 1 - end := params[0] - if end < start { - increment = -1 - } - return intArrayToString(untilStep(start, end+increment, increment), " ") - case 3: - start := params[0] - end := params[2] - step := params[1] - if end < start { - increment = -1 - if step > 0 { - return "" - } - } - return intArrayToString(untilStep(start, end+increment, step), " ") - case 2: - start := params[0] - end := params[1] - step := 1 - if end < start { - step = -1 - } - return intArrayToString(untilStep(start, end+step, step), " ") - default: - return "" - } -} - -func intArrayToString(slice []int, delimeter string) string { - return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") -} - -// performs a float and subsequent decimal.Decimal conversion on inputs, -// and iterates through a and b executing the mathmetical operation f -func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { - prt := decimal.NewFromFloat(toFloat64(a)) - for _, x := range b { - dx := decimal.NewFromFloat(toFloat64(x)) - prt = f(prt, dx) - } - rslt, _ := prt.Float64() - return rslt -} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go deleted file mode 100644 index 8a65c132..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/reflect.go +++ /dev/null @@ -1,28 +0,0 @@ -package sprig - -import ( - "fmt" - "reflect" -) - -// typeIs returns true if the src is the type named in target. -func typeIs(target string, src interface{}) bool { - return target == typeOf(src) -} - -func typeIsLike(target string, src interface{}) bool { - t := typeOf(src) - return target == t || "*"+target == t -} - -func typeOf(src interface{}) string { - return fmt.Sprintf("%T", src) -} - -func kindIs(target string, src interface{}) bool { - return target == kindOf(src) -} - -func kindOf(src interface{}) string { - return reflect.ValueOf(src).Kind().String() -} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go deleted file mode 100644 index fab55101..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/regex.go +++ /dev/null @@ -1,83 +0,0 @@ -package sprig - -import ( - "regexp" -) - -func regexMatch(regex string, s string) bool { - match, _ := regexp.MatchString(regex, s) - return match -} - -func mustRegexMatch(regex string, s string) (bool, error) { - return regexp.MatchString(regex, s) -} - -func regexFindAll(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.FindAllString(s, n) -} - -func mustRegexFindAll(regex string, s string, n int) ([]string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return []string{}, err - } - return r.FindAllString(s, n), nil -} - -func regexFind(regex string, s string) string { - r := regexp.MustCompile(regex) - return r.FindString(s) -} - -func mustRegexFind(regex string, s string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.FindString(s), nil -} - -func regexReplaceAll(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -} - -func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.ReplaceAllString(s, repl), nil -} - -func regexReplaceAllLiteral(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllLiteralString(s, repl) -} - -func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.ReplaceAllLiteralString(s, repl), nil -} - -func regexSplit(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.Split(s, n) -} - -func mustRegexSplit(regex string, s string, n int) ([]string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return []string{}, err - } - return r.Split(s, n), nil -} - -func regexQuoteMeta(s string) string { - return regexp.QuoteMeta(s) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go deleted file mode 100644 index 3fbe08aa..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/semver.go +++ /dev/null @@ -1,23 +0,0 @@ -package sprig - -import ( - sv2 "github.com/Masterminds/semver/v3" -) - -func semverCompare(constraint, version string) (bool, error) { - c, err := sv2.NewConstraint(constraint) - if err != nil { - return false, err - } - - v, err := sv2.NewVersion(version) - if err != nil { - return false, err - } - - return c.Check(v), nil -} - -func semver(version string) (*sv2.Version, error) { - return sv2.NewVersion(version) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go deleted file mode 100644 index e0ae628c..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/strings.go +++ /dev/null @@ -1,236 +0,0 @@ -package sprig - -import ( - "encoding/base32" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "strings" - - util "github.com/Masterminds/goutils" -) - -func base64encode(v string) string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -func base64decode(v string) string { - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func base32encode(v string) string { - return base32.StdEncoding.EncodeToString([]byte(v)) -} - -func base32decode(v string) string { - data, err := base32.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func abbrev(width int, s string) string { - if width < 4 { - return s - } - r, _ := util.Abbreviate(s, width) - return r -} - -func abbrevboth(left, right int, s string) string { - if right < 4 || left > 0 && right < 7 { - return s - } - r, _ := util.AbbreviateFull(s, left, right) - return r -} -func initials(s string) string { - // Wrap this just to eliminate the var args, which templates don't do well. - return util.Initials(s) -} - -func randAlphaNumeric(count int) string { - // It is not possible, it appears, to actually generate an error here. - r, _ := util.CryptoRandomAlphaNumeric(count) - return r -} - -func randAlpha(count int) string { - r, _ := util.CryptoRandomAlphabetic(count) - return r -} - -func randAscii(count int) string { - r, _ := util.CryptoRandomAscii(count) - return r -} - -func randNumeric(count int) string { - r, _ := util.CryptoRandomNumeric(count) - return r -} - -func untitle(str string) string { - return util.Uncapitalize(str) -} - -func quote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("%q", strval(s))) - } - } - return strings.Join(out, " ") -} - -func squote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("'%v'", s)) - } - } - return strings.Join(out, " ") -} - -func cat(v ...interface{}) string { - v = removeNilElements(v) - r := strings.TrimSpace(strings.Repeat("%v ", len(v))) - return fmt.Sprintf(r, v...) -} - -func indent(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) -} - -func nindent(spaces int, v string) string { - return "\n" + indent(spaces, v) -} - -func replace(old, new, src string) string { - return strings.Replace(src, old, new, -1) -} - -func plural(one, many string, count int) string { - if count == 1 { - return one - } - return many -} - -func strslice(v interface{}) []string { - switch v := v.(type) { - case []string: - return v - case []interface{}: - b := make([]string, 0, len(v)) - for _, s := range v { - if s != nil { - b = append(b, strval(s)) - } - } - return b - default: - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Array, reflect.Slice: - l := val.Len() - b := make([]string, 0, l) - for i := 0; i < l; i++ { - value := val.Index(i).Interface() - if value != nil { - b = append(b, strval(value)) - } - } - return b - default: - if v == nil { - return []string{} - } - - return []string{strval(v)} - } - } -} - -func removeNilElements(v []interface{}) []interface{} { - newSlice := make([]interface{}, 0, len(v)) - for _, i := range v { - if i != nil { - newSlice = append(newSlice, i) - } - } - return newSlice -} - -func strval(v interface{}) string { - switch v := v.(type) { - case string: - return v - case []byte: - return string(v) - case error: - return v.Error() - case fmt.Stringer: - return v.String() - default: - return fmt.Sprintf("%v", v) - } -} - -func trunc(c int, s string) string { - if c < 0 && len(s)+c > 0 { - return s[len(s)+c:] - } - if c >= 0 && len(s) > c { - return s[:c] - } - return s -} - -func join(sep string, v interface{}) string { - return strings.Join(strslice(v), sep) -} - -func split(sep, orig string) map[string]string { - parts := strings.Split(orig, sep) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -func splitn(sep string, n int, orig string) map[string]string { - parts := strings.SplitN(orig, sep, n) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -// substring creates a substring of the given string. -// -// If start is < 0, this calls string[:end]. -// -// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] -// -// Otherwise, this calls string[start, end]. -func substring(start, end int, s string) string { - if start < 0 { - return s[:end] - } - if end < 0 || end > len(s) { - return s[start:] - } - return s[start:end] -} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go deleted file mode 100644 index b8e120e1..00000000 --- a/vendor/github.com/Masterminds/sprig/v3/url.go +++ /dev/null @@ -1,66 +0,0 @@ -package sprig - -import ( - "fmt" - "net/url" - "reflect" -) - -func dictGetOrEmpty(dict map[string]interface{}, key string) string { - value, ok := dict[key] - if !ok { - return "" - } - tp := reflect.TypeOf(value).Kind() - if tp != reflect.String { - panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) - } - return reflect.ValueOf(value).String() -} - -// parses given URL to return dict object -func urlParse(v string) map[string]interface{} { - dict := map[string]interface{}{} - parsedURL, err := url.Parse(v) - if err != nil { - panic(fmt.Sprintf("unable to parse url: %s", err)) - } - dict["scheme"] = parsedURL.Scheme - dict["host"] = parsedURL.Host - dict["hostname"] = parsedURL.Hostname() - dict["path"] = parsedURL.Path - dict["query"] = parsedURL.RawQuery - dict["opaque"] = parsedURL.Opaque - dict["fragment"] = parsedURL.Fragment - if parsedURL.User != nil { - dict["userinfo"] = parsedURL.User.String() - } else { - dict["userinfo"] = "" - } - - return dict -} - -// join given dict to URL string -func urlJoin(d map[string]interface{}) string { - resURL := url.URL{ - Scheme: dictGetOrEmpty(d, "scheme"), - Host: dictGetOrEmpty(d, "host"), - Path: dictGetOrEmpty(d, "path"), - RawQuery: dictGetOrEmpty(d, "query"), - Opaque: dictGetOrEmpty(d, "opaque"), - Fragment: dictGetOrEmpty(d, "fragment"), - } - userinfo := dictGetOrEmpty(d, "userinfo") - var user *url.Userinfo - if userinfo != "" { - tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) - if err != nil { - panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) - } - user = tempURL.User - } - - resURL.User = user - return resURL.String() -} diff --git a/vendor/github.com/antlr/antlr4/LICENSE.txt b/vendor/github.com/antlr/antlr4/LICENSE.txt deleted file mode 100644 index 2042d1bd..00000000 --- a/vendor/github.com/antlr/antlr4/LICENSE.txt +++ /dev/null @@ -1,52 +0,0 @@ -[The "BSD 3-clause license"] -Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -===== - -MIT License for codepointat.js from https://git.io/codepointat -MIT License for fromcodepoint.js from https://git.io/vDW1m - -Copyright Mathias Bynens - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go deleted file mode 100644 index 1592212e..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNInvalidAltNumber int - -type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Used to build DFA predictors for them. - DecisionToState []DecisionState - - // grammarType is the ATN type and is used for deserializing ATNs from strings. - grammarType int - - // lexerActions is referenced by action transitions in the ATN for lexer ATNs. - lexerActions []LexerAction - - // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. - maxTokenType int - - modeNameToStartState map[string]*TokensStartState - - modeToStartState []*TokensStartState - - // ruleToStartState maps from rule index to starting state number. - ruleToStartState []*RuleStartState - - // ruleToStopState maps from rule index to stop state number. - ruleToStopState []*RuleStopState - - // ruleToTokenType maps the rule index to the resulting token type for lexer - // ATNs. For parser ATNs, it maps the rule index to the generated bypass token - // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was - // specified, and otherwise is nil. - ruleToTokenType []int - - states []ATNState -} - -func NewATN(grammarType int, maxTokenType int) *ATN { - return &ATN{ - grammarType: grammarType, - maxTokenType: maxTokenType, - modeNameToStartState: make(map[string]*TokensStartState), - } -} - -// NextTokensInContext computes the set of valid tokens that can occur starting -// in state s. If ctx is nil, the set of tokens will not include what can follow -// the rule surrounding s. In other words, the set will be restricted to tokens -// reachable staying within the rule of s. -func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { - return NewLL1Analyzer(a).Look(s, nil, ctx) -} - -// NextTokensNoContext computes the set of valid tokens that can occur starting -// in s and staying in same rule. Token.EPSILON is in set if we reach end of -// rule. -func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { - if s.GetNextTokenWithinRule() != nil { - return s.GetNextTokenWithinRule() - } - - s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil)) - s.GetNextTokenWithinRule().readOnly = true - - return s.GetNextTokenWithinRule() -} - -func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { - if ctx == nil { - return a.NextTokensNoContext(s) - } - - return a.NextTokensInContext(s, ctx) -} - -func (a *ATN) addState(state ATNState) { - if state != nil { - state.SetATN(a) - state.SetStateNumber(len(a.states)) - } - - a.states = append(a.states, state) -} - -func (a *ATN) removeState(state ATNState) { - a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice -} - -func (a *ATN) defineDecisionState(s DecisionState) int { - a.DecisionToState = append(a.DecisionToState, s) - s.setDecision(len(a.DecisionToState) - 1) - - return s.getDecision() -} - -func (a *ATN) getDecisionState(decision int) DecisionState { - if len(a.DecisionToState) == 0 { - return nil - } - - return a.DecisionToState[decision] -} - -// getExpectedTokens computes the set of input symbols which could follow ATN -// state number stateNumber in the specified full parse context ctx and returns -// the set of potentially valid input symbols which could follow the specified -// state in the specified context. This method considers the complete parser -// context, but does not evaluate semantic predicates (i.e. all predicates -// encountered during the calculation are assumed true). If a path in the ATN -// exists from the starting state to the RuleStopState of the outermost context -// without Matching any symbols, Token.EOF is added to the returned set. -// -// A nil ctx defaults to ParserRuleContext.EMPTY. -// -// It panics if the ATN does not contain state stateNumber. -func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { - if stateNumber < 0 || stateNumber >= len(a.states) { - panic("Invalid state number.") - } - - s := a.states[stateNumber] - following := a.NextTokens(s, nil) - - if !following.contains(TokenEpsilon) { - return following - } - - expected := NewIntervalSet() - - expected.addSet(following) - expected.removeOne(TokenEpsilon) - - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := a.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - - following = a.NextTokens(rt.(*RuleTransition).followState, nil) - expected.addSet(following) - expected.removeOne(TokenEpsilon) - ctx = ctx.GetParent().(RuleContext) - } - - if following.contains(TokenEpsilon) { - expected.addOne(TokenEOF) - } - - return expected -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go deleted file mode 100644 index 0535d524..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type comparable interface { - equals(other interface{}) bool -} - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - comparable - - hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) equals(o interface{}) bool { - if b == o { - return true - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -func (b *BaseATNConfig) hash() int { - var c int - if b.context != nil { - c = b.context.hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func (l *LexerATNConfig) hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.hash()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.hash()) - h = murmurUpdate(h, l.semanticContext.hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.hash()) - h = murmurFinish(h, 6) - return h -} - -func (l *LexerATNConfig) equals(other interface{}) bool { - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.equals(othert.BaseATNConfig) -} - - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go deleted file mode 100644 index d9f74755..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -type ATNConfigSet interface { - hash() int - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *Set - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Equals(other interface{}) bool - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *Set - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewSet(nil, equalATNConfigs), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing := b.configLookup.add(config).(ATNConfig) - - if existing == config { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *Set { - states := NewSet(nil, nil) - - for i := 0; i < len(b.configs); i++ { - states.add(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.length() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -func (b *BaseATNConfigSet) Equals(other interface{}) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary? - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := murmurInit(1) - for _, c := range b.configs { - if c != nil { - h = murmurUpdate(h, c.hash()) - } - } - return murmurFinish(h, len(b.configs)) -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewSet(nil, equalATNConfigs) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - b.configLookup = NewSet(nil, nil) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber() - alts := ai.GetAlt() == bi.GetAlt() - cons := ai.GetSemanticContext().equals(bi.GetSemanticContext()) - - return nums && alts && cons -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go deleted file mode 100644 index 18b89efa..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false} - -type ATNDeserializationOptions struct { - readOnly bool - verifyATN bool - generateRuleBypassTransitions bool -} - -func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions { - o := new(ATNDeserializationOptions) - - if CopyFrom != nil { - o.readOnly = CopyFrom.readOnly - o.verifyATN = CopyFrom.verifyATN - o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions - } - - return o -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go deleted file mode 100644 index 884d39cf..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "encoding/hex" - "fmt" - "strconv" - "strings" - "unicode/utf16" -) - -// This is the earliest supported serialized UUID. -// stick to serialized version for now, we don't need a UUID instance -var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E" -var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089" - -// This list contains all of the currently supported UUIDs, ordered by when -// the feature first appeared in this branch. -var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP} - -var SerializedVersion = 3 - -// This is the current serialized UUID. -var SerializedUUID = AddedUnicodeSMP - -type LoopEndStateIntPair struct { - item0 *LoopEndState - item1 int -} - -type BlockStartStateIntPair struct { - item0 BlockStartState - item1 int -} - -type ATNDeserializer struct { - deserializationOptions *ATNDeserializationOptions - data []rune - pos int - uuid string -} - -func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { - if options == nil { - options = ATNDeserializationOptionsdefaultOptions - } - - return &ATNDeserializer{deserializationOptions: options} -} - -func stringInSlice(a string, list []string) int { - for i, b := range list { - if b == a { - return i - } - } - - return -1 -} - -// isFeatureSupported determines if a particular serialized representation of an -// ATN supports a particular feature, identified by the UUID used for -// serializing the ATN at the time the feature was first introduced. Feature is -// the UUID marking the first time the feature was supported in the serialized -// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently -// being deserialized. It returns true if actualUuid represents a serialized ATN -// at or after the feature identified by feature was introduced, and otherwise -// false. -func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool { - idx1 := stringInSlice(feature, SupportedUUIDs) - - if idx1 < 0 { - return false - } - - idx2 := stringInSlice(actualUUID, SupportedUUIDs) - - return idx2 >= idx1 -} - -func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN { - a.reset(utf16.Decode(data)) - a.checkVersion() - a.checkUUID() - - atn := a.readATN() - - a.readStates(atn) - a.readRules(atn) - a.readModes(atn) - - sets := make([]*IntervalSet, 0) - - // First, deserialize sets with 16-bit arguments <= U+FFFF. - sets = a.readSets(atn, sets, a.readInt) - // Next, if the ATN was serialized with the Unicode SMP feature, - // deserialize sets with 32-bit arguments <= U+10FFFF. - if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) { - sets = a.readSets(atn, sets, a.readInt32) - } - - a.readEdges(atn, sets) - a.readDecisions(atn) - a.readLexerActions(atn) - a.markPrecedenceDecisions(atn) - a.verifyATN(atn) - - if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser { - a.generateRuleBypassTransitions(atn) - // Re-verify after modification - a.verifyATN(atn) - } - - return atn - -} - -func (a *ATNDeserializer) reset(data []rune) { - temp := make([]rune, len(data)) - - for i, c := range data { - // Don't adjust the first value since that's the version number - if i == 0 { - temp[i] = c - } else if c > 1 { - temp[i] = c - 2 - } else { - temp[i] = c + 65533 - } - } - - a.data = temp - a.pos = 0 -} - -func (a *ATNDeserializer) checkVersion() { - version := a.readInt() - - if version != SerializedVersion { - panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").") - } -} - -func (a *ATNDeserializer) checkUUID() { - uuid := a.readUUID() - - if stringInSlice(uuid, SupportedUUIDs) < 0 { - panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).") - } - - a.uuid = uuid -} - -func (a *ATNDeserializer) readATN() *ATN { - grammarType := a.readInt() - maxTokenType := a.readInt() - - return NewATN(grammarType, maxTokenType) -} - -func (a *ATNDeserializer) readStates(atn *ATN) { - loopBackStateNumbers := make([]LoopEndStateIntPair, 0) - endStateNumbers := make([]BlockStartStateIntPair, 0) - - nstates := a.readInt() - - for i := 0; i < nstates; i++ { - stype := a.readInt() - - // Ignore bad types of states - if stype == ATNStateInvalidType { - atn.addState(nil) - - continue - } - - ruleIndex := a.readInt() - - if ruleIndex == 0xFFFF { - ruleIndex = -1 - } - - s := a.stateFactory(stype, ruleIndex) - - if stype == ATNStateLoopEnd { - loopBackStateNumber := a.readInt() - - loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) - } else if s2, ok := s.(BlockStartState); ok { - endStateNumber := a.readInt() - - endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber}) - } - - atn.addState(s) - } - - // Delay the assignment of loop back and end states until we know all the state - // instances have been initialized - for j := 0; j < len(loopBackStateNumbers); j++ { - pair := loopBackStateNumbers[j] - - pair.item0.loopBackState = atn.states[pair.item1] - } - - for j := 0; j < len(endStateNumbers); j++ { - pair := endStateNumbers[j] - - pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) - } - - numNonGreedyStates := a.readInt() - - for j := 0; j < numNonGreedyStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(DecisionState).setNonGreedy(true) - } - - numPrecedenceStates := a.readInt() - - for j := 0; j < numPrecedenceStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true - } -} - -func (a *ATNDeserializer) readRules(atn *ATN) { - nrules := a.readInt() - - if atn.grammarType == ATNTypeLexer { - atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0) - } - - atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0) - - for i := 0; i < nrules; i++ { - s := a.readInt() - startState := atn.states[s].(*RuleStartState) - - atn.ruleToStartState[i] = startState - - if atn.grammarType == ATNTypeLexer { - tokenType := a.readInt() - - if tokenType == 0xFFFF { - tokenType = TokenEOF - } - - atn.ruleToTokenType[i] = tokenType - } - } - - atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0) - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if s2, ok := state.(*RuleStopState); ok { - atn.ruleToStopState[s2.ruleIndex] = s2 - atn.ruleToStartState[s2.ruleIndex].stopState = s2 - } - } -} - -func (a *ATNDeserializer) readModes(atn *ATN) { - nmodes := a.readInt() - - for i := 0; i < nmodes; i++ { - s := a.readInt() - - atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState)) - } -} - -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet { - m := a.readInt() - - for i := 0; i < m; i++ { - iset := NewIntervalSet() - - sets = append(sets, iset) - - n := a.readInt() - containsEOF := a.readInt() - - if containsEOF != 0 { - iset.addOne(-1) - } - - for j := 0; j < n; j++ { - i1 := readUnicode() - i2 := readUnicode() - - iset.addRange(i1, i2) - } - } - - return sets -} - -func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { - nedges := a.readInt() - - for i := 0; i < nedges; i++ { - var ( - src = a.readInt() - trg = a.readInt() - ttype = a.readInt() - arg1 = a.readInt() - arg2 = a.readInt() - arg3 = a.readInt() - trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) - srcState = atn.states[src] - ) - - srcState.AddTransition(trans, -1) - } - - // Edges for rule stop states can be derived, so they are not serialized - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - for j := 0; j < len(state.GetTransitions()); j++ { - var t, ok = state.GetTransitions()[j].(*RuleTransition) - - if !ok { - continue - } - - outermostPrecedenceReturn := -1 - - if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule { - if t.precedence == 0 { - outermostPrecedenceReturn = t.getTarget().GetRuleIndex() - } - } - - trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn) - - atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1) - } - } - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if s2, ok := state.(*BaseBlockStartState); ok { - // We need to know the end state to set its start state - if s2.endState == nil { - panic("IllegalState") - } - - // Block end states can only be associated to a single block start state - if s2.endState.startState != nil { - panic("IllegalState") - } - - s2.endState.startState = state - } - - if s2, ok := state.(*PlusLoopbackState); ok { - for j := 0; j < len(s2.GetTransitions()); j++ { - target := s2.GetTransitions()[j].getTarget() - - if t2, ok := target.(*PlusBlockStartState); ok { - t2.loopBackState = state - } - } - } else if s2, ok := state.(*StarLoopbackState); ok { - for j := 0; j < len(s2.GetTransitions()); j++ { - target := s2.GetTransitions()[j].getTarget() - - if t2, ok := target.(*StarLoopEntryState); ok { - t2.loopBackState = state - } - } - } - } -} - -func (a *ATNDeserializer) readDecisions(atn *ATN) { - ndecisions := a.readInt() - - for i := 0; i < ndecisions; i++ { - s := a.readInt() - decState := atn.states[s].(DecisionState) - - atn.DecisionToState = append(atn.DecisionToState, decState) - decState.setDecision(i) - } -} - -func (a *ATNDeserializer) readLexerActions(atn *ATN) { - if atn.grammarType == ATNTypeLexer { - count := a.readInt() - - atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil) - - for i := 0; i < count; i++ { - actionType := a.readInt() - data1 := a.readInt() - - if data1 == 0xFFFF { - data1 = -1 - } - - data2 := a.readInt() - - if data2 == 0xFFFF { - data2 = -1 - } - - lexerAction := a.lexerActionFactory(actionType, data1, data2) - - atn.lexerActions[i] = lexerAction - } - } -} - -func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { - count := len(atn.ruleToStartState) - - for i := 0; i < count; i++ { - atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 - } - - for i := 0; i < count; i++ { - a.generateRuleBypassTransition(atn, i) - } -} - -func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { - bypassStart := NewBasicBlockStartState() - - bypassStart.ruleIndex = idx - atn.addState(bypassStart) - - bypassStop := NewBlockEndState() - - bypassStop.ruleIndex = idx - atn.addState(bypassStop) - - bypassStart.endState = bypassStop - - atn.defineDecisionState(bypassStart.BaseDecisionState) - - bypassStop.startState = bypassStart - - var excludeTransition Transition - var endState ATNState - - if atn.ruleToStartState[idx].isPrecedenceRule { - // Wrap from the beginning of the rule to the StarLoopEntryState - endState = nil - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if a.stateIsEndStateFor(state, idx) != nil { - endState = state - excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] - - break - } - } - - if excludeTransition == nil { - panic("Couldn't identify final state of the precedence rule prefix section.") - } - } else { - endState = atn.ruleToStopState[idx] - } - - // All non-excluded transitions that currently target end state need to target - // blockEnd instead - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - for j := 0; j < len(state.GetTransitions()); j++ { - transition := state.GetTransitions()[j] - - if transition == excludeTransition { - continue - } - - if transition.getTarget() == endState { - transition.setTarget(bypassStop) - } - } - } - - // All transitions leaving the rule start state need to leave blockStart instead - ruleToStartState := atn.ruleToStartState[idx] - count := len(ruleToStartState.GetTransitions()) - - for count > 0 { - bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) - ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) - } - - // Link the new states - atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) - bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) - - MatchState := NewBasicState() - - atn.addState(MatchState) - MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) - bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) -} - -func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { - if state.GetRuleIndex() != idx { - return nil - } - - if _, ok := state.(*StarLoopEntryState); !ok { - return nil - } - - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if _, ok := maybeLoopEndState.(*LoopEndState); !ok { - return nil - } - - var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { - return state - } - - return nil -} - -// markPrecedenceDecisions analyzes the StarLoopEntryState states in the -// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to -// the correct value. -func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { - for _, state := range atn.states { - if _, ok := state.(*StarLoopEntryState); !ok { - continue - } - - // We analyze the ATN to determine if a ATN decision state is the - // decision for the closure block that determines whether a - // precedence rule should continue or complete. - if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if s3, ok := maybeLoopEndState.(*LoopEndState); ok { - var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if s3.epsilonOnlyTransitions && ok2 { - state.(*StarLoopEntryState).precedenceRuleDecision = true - } - } - } - } -} - -func (a *ATNDeserializer) verifyATN(atn *ATN) { - if !a.deserializationOptions.verifyATN { - return - } - - // Verify assumptions - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if state == nil { - continue - } - - a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") - - switch s2 := state.(type) { - case *PlusBlockStartState: - a.checkCondition(s2.loopBackState != nil, "") - - case *StarLoopEntryState: - a.checkCondition(s2.loopBackState != nil, "") - a.checkCondition(len(s2.GetTransitions()) == 2, "") - - switch s2 := state.(type) { - case *StarBlockStartState: - var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState) - - a.checkCondition(ok2, "") - a.checkCondition(!s2.nonGreedy, "") - - case *LoopEndState: - var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState) - - a.checkCondition(ok2, "") - a.checkCondition(s3.nonGreedy, "") - - default: - panic("IllegalState") - } - - case *StarLoopbackState: - a.checkCondition(len(state.GetTransitions()) == 1, "") - - var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) - - a.checkCondition(ok2, "") - - case *LoopEndState: - a.checkCondition(s2.loopBackState != nil, "") - - case *RuleStartState: - a.checkCondition(s2.stopState != nil, "") - - case *BaseBlockStartState: - a.checkCondition(s2.endState != nil, "") - - case *BlockEndState: - a.checkCondition(s2.startState != nil, "") - - case DecisionState: - a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") - - default: - var _, ok = s2.(*RuleStopState) - - a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") - } - } -} - -func (a *ATNDeserializer) checkCondition(condition bool, message string) { - if !condition { - if message == "" { - message = "IllegalState" - } - - panic(message) - } -} - -func (a *ATNDeserializer) readInt() int { - v := a.data[a.pos] - - a.pos++ - - return int(v) -} - -func (a *ATNDeserializer) readInt32() int { - var low = a.readInt() - var high = a.readInt() - return low | (high << 16) -} - -//TODO -//func (a *ATNDeserializer) readLong() int64 { -// panic("Not implemented") -// var low = a.readInt32() -// var high = a.readInt32() -// return (low & 0x00000000FFFFFFFF) | (high << int32) -//} - -func createByteToHex() []string { - bth := make([]string, 256) - - for i := 0; i < 256; i++ { - bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)})) - } - - return bth -} - -var byteToHex = createByteToHex() - -func (a *ATNDeserializer) readUUID() string { - bb := make([]int, 16) - - for i := 7; i >= 0; i-- { - integer := a.readInt() - - bb[(2*i)+1] = integer & 0xFF - bb[2*i] = (integer >> 8) & 0xFF - } - - return byteToHex[bb[0]] + byteToHex[bb[1]] + - byteToHex[bb[2]] + byteToHex[bb[3]] + "-" + - byteToHex[bb[4]] + byteToHex[bb[5]] + "-" + - byteToHex[bb[6]] + byteToHex[bb[7]] + "-" + - byteToHex[bb[8]] + byteToHex[bb[9]] + "-" + - byteToHex[bb[10]] + byteToHex[bb[11]] + - byteToHex[bb[12]] + byteToHex[bb[13]] + - byteToHex[bb[14]] + byteToHex[bb[15]] -} - -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { - target := atn.states[trg] - - switch typeIndex { - case TransitionEPSILON: - return NewEpsilonTransition(target, -1) - - case TransitionRANGE: - if arg3 != 0 { - return NewRangeTransition(target, TokenEOF, arg2) - } - - return NewRangeTransition(target, arg1, arg2) - - case TransitionRULE: - return NewRuleTransition(atn.states[arg1], arg2, arg3, target) - - case TransitionPREDICATE: - return NewPredicateTransition(target, arg1, arg2, arg3 != 0) - - case TransitionPRECEDENCE: - return NewPrecedencePredicateTransition(target, arg1) - - case TransitionATOM: - if arg3 != 0 { - return NewAtomTransition(target, TokenEOF) - } - - return NewAtomTransition(target, arg1) - - case TransitionACTION: - return NewActionTransition(target, arg1, arg2, arg3 != 0) - - case TransitionSET: - return NewSetTransition(target, sets[arg1]) - - case TransitionNOTSET: - return NewNotSetTransition(target, sets[arg1]) - - case TransitionWILDCARD: - return NewWildcardTransition(target) - } - - panic("The specified transition type is not valid.") -} - -func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { - var s ATNState - - switch typeIndex { - case ATNStateInvalidType: - return nil - - case ATNStateBasic: - s = NewBasicState() - - case ATNStateRuleStart: - s = NewRuleStartState() - - case ATNStateBlockStart: - s = NewBasicBlockStartState() - - case ATNStatePlusBlockStart: - s = NewPlusBlockStartState() - - case ATNStateStarBlockStart: - s = NewStarBlockStartState() - - case ATNStateTokenStart: - s = NewTokensStartState() - - case ATNStateRuleStop: - s = NewRuleStopState() - - case ATNStateBlockEnd: - s = NewBlockEndState() - - case ATNStateStarLoopBack: - s = NewStarLoopbackState() - - case ATNStateStarLoopEntry: - s = NewStarLoopEntryState() - - case ATNStatePlusLoopBack: - s = NewPlusLoopbackState() - - case ATNStateLoopEnd: - s = NewLoopEndState() - - default: - panic(fmt.Sprintf("state type %d is invalid", typeIndex)) - } - - s.SetRuleIndex(ruleIndex) - - return s -} - -func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { - switch typeIndex { - case LexerActionTypeChannel: - return NewLexerChannelAction(data1) - - case LexerActionTypeCustom: - return NewLexerCustomAction(data1, data2) - - case LexerActionTypeMode: - return NewLexerModeAction(data1) - - case LexerActionTypeMore: - return LexerMoreActionINSTANCE - - case LexerActionTypePopMode: - return LexerPopModeActionINSTANCE - - case LexerActionTypePushMode: - return NewLexerPushModeAction(data1) - - case LexerActionTypeSkip: - return LexerSkipActionINSTANCE - - case LexerActionTypeType: - return NewLexerTypeAction(data1) - - default: - panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go deleted file mode 100644 index d5454d6d..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) - -type IATNSimulator interface { - SharedContextCache() *PredictionContextCache - ATN() *ATN - DecisionToDFA() []*DFA -} - -type BaseATNSimulator struct { - atn *ATN - sharedContextCache *PredictionContextCache - decisionToDFA []*DFA -} - -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { - if b.sharedContextCache == nil { - return context - } - - visited := make(map[PredictionContext]PredictionContext) - - return getCachedBasePredictionContext(context, b.sharedContextCache, visited) -} - -func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { - return b.sharedContextCache -} - -func (b *BaseATNSimulator) ATN() *ATN { - return b.atn -} - -func (b *BaseATNSimulator) DecisionToDFA() []*DFA { - return b.decisionToDFA -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go deleted file mode 100644 index 563d5db3..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -// Constants for serialization. -const ( - ATNStateInvalidType = 0 - ATNStateBasic = 1 - ATNStateRuleStart = 2 - ATNStateBlockStart = 3 - ATNStatePlusBlockStart = 4 - ATNStateStarBlockStart = 5 - ATNStateTokenStart = 6 - ATNStateRuleStop = 7 - ATNStateBlockEnd = 8 - ATNStateStarLoopBack = 9 - ATNStateStarLoopEntry = 10 - ATNStatePlusLoopBack = 11 - ATNStateLoopEnd = 12 - - ATNStateInvalidStateNumber = -1 -) - -var ATNStateInitialNumTransitions = 4 - -type ATNState interface { - GetEpsilonOnlyTransitions() bool - - GetRuleIndex() int - SetRuleIndex(int) - - GetNextTokenWithinRule() *IntervalSet - SetNextTokenWithinRule(*IntervalSet) - - GetATN() *ATN - SetATN(*ATN) - - GetStateType() int - - GetStateNumber() int - SetStateNumber(int) - - GetTransitions() []Transition - SetTransitions([]Transition) - AddTransition(Transition, int) - - String() string - hash() int -} - -type BaseATNState struct { - // NextTokenWithinRule caches lookahead during parsing. Not used during construction. - NextTokenWithinRule *IntervalSet - - // atn is the current ATN. - atn *ATN - - epsilonOnlyTransitions bool - - // ruleIndex tracks the Rule index because there are no Rule objects at runtime. - ruleIndex int - - stateNumber int - - stateType int - - // Track the transitions emanating from this ATN state. - transitions []Transition -} - -func NewBaseATNState() *BaseATNState { - return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} -} - -func (as *BaseATNState) GetRuleIndex() int { - return as.ruleIndex -} - -func (as *BaseATNState) SetRuleIndex(v int) { - as.ruleIndex = v -} -func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { - return as.epsilonOnlyTransitions -} - -func (as *BaseATNState) GetATN() *ATN { - return as.atn -} - -func (as *BaseATNState) SetATN(atn *ATN) { - as.atn = atn -} - -func (as *BaseATNState) GetTransitions() []Transition { - return as.transitions -} - -func (as *BaseATNState) SetTransitions(t []Transition) { - as.transitions = t -} - -func (as *BaseATNState) GetStateType() int { - return as.stateType -} - -func (as *BaseATNState) GetStateNumber() int { - return as.stateNumber -} - -func (as *BaseATNState) SetStateNumber(stateNumber int) { - as.stateNumber = stateNumber -} - -func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { - return as.NextTokenWithinRule -} - -func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { - as.NextTokenWithinRule = v -} - -func (as *BaseATNState) hash() int { - return as.stateNumber -} - -func (as *BaseATNState) String() string { - return strconv.Itoa(as.stateNumber) -} - -func (as *BaseATNState) equals(other interface{}) bool { - if ot, ok := other.(ATNState); ok { - return as.stateNumber == ot.GetStateNumber() - } - - return false -} - -func (as *BaseATNState) isNonGreedyExitState() bool { - return false -} - -func (as *BaseATNState) AddTransition(trans Transition, index int) { - if len(as.transitions) == 0 { - as.epsilonOnlyTransitions = trans.getIsEpsilon() - } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { - as.epsilonOnlyTransitions = false - } - - if index == -1 { - as.transitions = append(as.transitions, trans) - } else { - as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) - // TODO: as.transitions.splice(index, 1, trans) - } -} - -type BasicState struct { - *BaseATNState -} - -func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} -} - -type DecisionState interface { - ATNState - - getDecision() int - setDecision(int) - - getNonGreedy() bool - setNonGreedy(bool) -} - -type BaseDecisionState struct { - *BaseATNState - decision int - nonGreedy bool -} - -func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} -} - -func (s *BaseDecisionState) getDecision() int { - return s.decision -} - -func (s *BaseDecisionState) setDecision(b int) { - s.decision = b -} - -func (s *BaseDecisionState) getNonGreedy() bool { - return s.nonGreedy -} - -func (s *BaseDecisionState) setNonGreedy(b bool) { - s.nonGreedy = b -} - -type BlockStartState interface { - DecisionState - - getEndState() *BlockEndState - setEndState(*BlockEndState) -} - -// BaseBlockStartState is the start of a regular (...) block. -type BaseBlockStartState struct { - *BaseDecisionState - endState *BlockEndState -} - -func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} -} - -func (s *BaseBlockStartState) getEndState() *BlockEndState { - return s.endState -} - -func (s *BaseBlockStartState) setEndState(b *BlockEndState) { - s.endState = b -} - -type BasicBlockStartState struct { - *BaseBlockStartState -} - -func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} -} - -// BlockEndState is a terminal node of a simple (a|b|c) block. -type BlockEndState struct { - *BaseATNState - startState ATNState -} - -func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} -} - -// RuleStopState is the last node in the ATN for a rule, unless that rule is the -// start symbol. In that case, there is one transition to EOF. Later, we might -// encode references to all calls to this rule to compute FOLLOW sets for error -// handling. -type RuleStopState struct { - *BaseATNState -} - -func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} -} - -type RuleStartState struct { - *BaseATNState - stopState ATNState - isPrecedenceRule bool -} - -func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} -} - -// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two -// transitions: one to the loop back to start of the block, and one to exit. -type PlusLoopbackState struct { - *BaseDecisionState -} - -func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} -} - -// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a -// decision state; we don't use it for code generation. Somebody might need it, -// it is included for completeness. In reality, PlusLoopbackState is the real -// decision-making node for A+. -type PlusBlockStartState struct { - *BaseBlockStartState - loopBackState ATNState -} - -func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} -} - -// StarBlockStartState is the block that begins a closure loop. -type StarBlockStartState struct { - *BaseBlockStartState -} - -func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} -} - -type StarLoopbackState struct { - *BaseATNState -} - -func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} -} - -type StarLoopEntryState struct { - *BaseDecisionState - loopBackState ATNState - precedenceRuleDecision bool -} - -func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} -} - -// LoopEndState marks the end of a * or + loop. -type LoopEndState struct { - *BaseATNState - loopBackState ATNState -} - -func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} -} - -// TokensStartState is the Tokens rule start state linking to each lexer rule start state. -type TokensStartState struct { - *BaseDecisionState -} - -func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go deleted file mode 100644 index a7b48976..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represent the type of recognizer an ATN applies to. -const ( - ATNTypeLexer = 0 - ATNTypeParser = 1 -) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go deleted file mode 100644 index 70c1207f..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type CharStream interface { - IntStream - GetText(int, int) string - GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go deleted file mode 100644 index 330ff8f3..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// TokenFactory creates CommonToken objects. -type TokenFactory interface { - Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token -} - -// CommonTokenFactory is the default TokenFactory implementation. -type CommonTokenFactory struct { - // copyText indicates whether CommonToken.setText should be called after - // constructing tokens to explicitly set the text. This is useful for cases - // where the input stream might not be able to provide arbitrary substrings of - // text from the input after the lexer creates a token (e.g. the - // implementation of CharStream.GetText in UnbufferedCharStream panics an - // UnsupportedOperationException). Explicitly setting the token text allows - // Token.GetText to be called at any time regardless of the input stream - // implementation. - // - // The default value is false to avoid the performance and memory overhead of - // copying text for every token unless explicitly requested. - copyText bool -} - -func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { - return &CommonTokenFactory{copyText: copyText} -} - -// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not -// explicitly copy token text when constructing tokens. -var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) - -func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { - t := NewCommonToken(source, ttype, channel, start, stop) - - t.line = line - t.column = column - - if text != "" { - t.SetText(text) - } else if c.copyText && source.charStream != nil { - t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) - } - - return t -} - -func (c *CommonTokenFactory) createThin(ttype int, text string) Token { - t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) - t.SetText(text) - - return t -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go deleted file mode 100644 index c90e9b89..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// CommonTokenStream is an implementation of TokenStream that loads tokens from -// a TokenSource on-demand and places the tokens in a buffer to provide access -// to any previous token by index. This token stream ignores the value of -// Token.getChannel. If your parser requires the token stream filter tokens to -// only those on a particular channel, such as Token.DEFAULT_CHANNEL or -// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. -type CommonTokenStream struct { - channel int - - // fetchedEOF indicates whether the Token.EOF token has been fetched from - // tokenSource and added to tokens. This field improves performance for the - // following cases: - // - // consume: The lookahead check in consume to preven consuming the EOF symbol is - // optimized by checking the values of fetchedEOF and p instead of calling LA. - // - // fetch: The check to prevent adding multiple EOF symbols into tokens is - // trivial with bt field. - fetchedEOF bool - - // index indexs into tokens of the current token (next token to consume). - // tokens[p] should be LT(1). It is set to -1 when the stream is first - // constructed or when SetTokenSource is called, indicating that the first token - // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. - index int - - // tokenSource is the TokenSource from which tokens for the bt stream are - // fetched. - tokenSource TokenSource - - // tokens is all tokens fetched from the token source. The list is considered a - // complete view of the input once fetchedEOF is set to true. - tokens []Token -} - -func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { - return &CommonTokenStream{ - channel: channel, - index: -1, - tokenSource: lexer, - tokens: make([]Token, 0), - } -} - -func (c *CommonTokenStream) GetAllTokens() []Token { - return c.tokens -} - -func (c *CommonTokenStream) Mark() int { - return 0 -} - -func (c *CommonTokenStream) Release(marker int) {} - -func (c *CommonTokenStream) reset() { - c.Seek(0) -} - -func (c *CommonTokenStream) Seek(index int) { - c.lazyInit() - c.index = c.adjustSeekIndex(index) -} - -func (c *CommonTokenStream) Get(index int) Token { - c.lazyInit() - - return c.tokens[index] -} - -func (c *CommonTokenStream) Consume() { - SkipEOFCheck := false - - if c.index >= 0 { - if c.fetchedEOF { - // The last token in tokens is EOF. Skip the check if p indexes any fetched. - // token except the last. - SkipEOFCheck = c.index < len(c.tokens)-1 - } else { - // No EOF token in tokens. Skip the check if p indexes a fetched token. - SkipEOFCheck = c.index < len(c.tokens) - } - } else { - // Not yet initialized - SkipEOFCheck = false - } - - if !SkipEOFCheck && c.LA(1) == TokenEOF { - panic("cannot consume EOF") - } - - if c.Sync(c.index + 1) { - c.index = c.adjustSeekIndex(c.index + 1) - } -} - -// Sync makes sure index i in tokens has a token and returns true if a token is -// located at index i and otherwise false. -func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? - - if n > 0 { - fetched := c.fetch(n) - return fetched >= n - } - - return true -} - -// fetch adds n elements to buffer and returns the actual number of elements -// added to the buffer. -func (c *CommonTokenStream) fetch(n int) int { - if c.fetchedEOF { - return 0 - } - - for i := 0; i < n; i++ { - t := c.tokenSource.NextToken() - - t.SetTokenIndex(len(c.tokens)) - c.tokens = append(c.tokens, t) - - if t.GetTokenType() == TokenEOF { - c.fetchedEOF = true - - return i + 1 - } - } - - return n -} - -// GetTokens gets all tokens from start to stop inclusive. -func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { - if start < 0 || stop < 0 { - return nil - } - - c.lazyInit() - - subset := make([]Token, 0) - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - for i := start; i < stop; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - if types == nil || types.contains(t.GetTokenType()) { - subset = append(subset, t) - } - } - - return subset -} - -func (c *CommonTokenStream) LA(i int) int { - return c.LT(i).GetTokenType() -} - -func (c *CommonTokenStream) lazyInit() { - if c.index == -1 { - c.setup() - } -} - -func (c *CommonTokenStream) setup() { - c.Sync(0) - c.index = c.adjustSeekIndex(0) -} - -func (c *CommonTokenStream) GetTokenSource() TokenSource { - return c.tokenSource -} - -// SetTokenSource resets the c token stream by setting its token source. -func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { - c.tokenSource = tokenSource - c.tokens = make([]Token, 0) - c.index = -1 -} - -// NextTokenOnChannel returns the index of the next token on channel given a -// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { - c.Sync(i) - - if i >= len(c.tokens) { - return -1 - } - - token := c.tokens[i] - - for token.GetChannel() != c.channel { - if token.GetTokenType() == TokenEOF { - return -1 - } - - i++ - c.Sync(i) - token = c.tokens[i] - } - - return i -} - -// previousTokenOnChannel returns the index of the previous token on channel -// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if -// there are no tokens on channel between i and 0. -func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { - for i >= 0 && c.tokens[i].GetChannel() != channel { - i-- - } - - return i -} - -// GetHiddenTokensToRight collects all tokens on a specified channel to the -// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL -// or EOF. If channel is -1, it finds any non-default channel token. -func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) - from := tokenIndex + 1 - - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token - var to int - - if nextOnChannel == -1 { - to = len(c.tokens) - 1 - } else { - to = nextOnChannel - } - - return c.filterForChannel(from, to, channel) -} - -// GetHiddenTokensToLeft collects all tokens on channel to the left of the -// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is -// -1, it finds any non default channel token. -func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) - - if prevOnChannel == tokenIndex-1 { - return nil - } - - // If there are none on channel to the left and prevOnChannel == -1 then from = 0 - from := prevOnChannel + 1 - to := tokenIndex - 1 - - return c.filterForChannel(from, to, channel) -} - -func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { - hidden := make([]Token, 0) - - for i := left; i < right+1; i++ { - t := c.tokens[i] - - if channel == -1 { - if t.GetChannel() != LexerDefaultTokenChannel { - hidden = append(hidden, t) - } - } else if t.GetChannel() == channel { - hidden = append(hidden, t) - } - } - - if len(hidden) == 0 { - return nil - } - - return hidden -} - -func (c *CommonTokenStream) GetSourceName() string { - return c.tokenSource.GetSourceName() -} - -func (c *CommonTokenStream) Size() int { - return len(c.tokens) -} - -func (c *CommonTokenStream) Index() int { - return c.index -} - -func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) -} - -func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { - if start == nil || end == nil { - return "" - } - - return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) -} - -func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { - return c.GetTextFromInterval(interval.GetSourceInterval()) -} - -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { - c.lazyInit() - c.Fill() - - if interval == nil { - interval = NewInterval(0, len(c.tokens)-1) - } - - start := interval.Start - stop := interval.Stop - - if start < 0 || stop < 0 { - return "" - } - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - s := "" - - for i := start; i < stop+1; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - s += t.GetText() - } - - return s -} - -// Fill gets all tokens from the lexer until EOF. -func (c *CommonTokenStream) Fill() { - c.lazyInit() - - for c.fetch(1000) == 1000 { - continue - } -} - -func (c *CommonTokenStream) adjustSeekIndex(i int) int { - return c.NextTokenOnChannel(i, c.channel) -} - -func (c *CommonTokenStream) LB(k int) Token { - if k == 0 || c.index-k < 0 { - return nil - } - - i := c.index - n := 1 - - // Find k good tokens looking backward - for n <= k { - // Skip off-channel tokens - i = c.previousTokenOnChannel(i-1, c.channel) - n++ - } - - if i < 0 { - return nil - } - - return c.tokens[i] -} - -func (c *CommonTokenStream) LT(k int) Token { - c.lazyInit() - - if k == 0 { - return nil - } - - if k < 0 { - return c.LB(-k) - } - - i := c.index - n := 1 // We know tokens[n] is valid - - // Find k good tokens - for n < k { - // Skip off-channel tokens, but make sure to not look past EOF - if c.Sync(i + 1) { - i = c.NextTokenOnChannel(i+1, c.channel) - } - - n++ - } - - return c.tokens[i] -} - -// getNumberOfOnChannelTokens counts EOF once. -func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { - var n int - - c.Fill() - - for i := 0; i < len(c.tokens); i++ { - t := c.tokens[i] - - if t.GetChannel() == c.channel { - n++ - } - - if t.GetTokenType() == TokenEOF { - break - } - } - - return n -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go deleted file mode 100644 index d6079aa2..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "sort" - "sync" -) - -type DFA struct { - // atnStartState is the ATN state in which this was created - atnStartState DecisionState - - decision int - - // states is all the DFA states. Use Map to get the old state back; Set can only - // indicate whether it is there. - states map[int]*DFAState - statesMu sync.RWMutex - - s0 *DFAState - s0Mu sync.RWMutex - - // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. - // True if the DFA is for a precedence decision and false otherwise. - precedenceDfa bool -} - -func NewDFA(atnStartState DecisionState, decision int) *DFA { - return &DFA{ - atnStartState: atnStartState, - decision: decision, - states: make(map[int]*DFAState), - } -} - -// getPrecedenceStartState gets the start state for the current precedence and -// returns the start state corresponding to the specified precedence if a start -// state exists for the specified precedence and nil otherwise. d must be a -// precedence DFA. See also isPrecedenceDfa. -func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { - if !d.precedenceDfa { - panic("only precedence DFAs may contain a precedence start state") - } - - d.s0Mu.RLock() - defer d.s0Mu.RUnlock() - - // s0.edges is never nil for a precedence DFA - if precedence < 0 || precedence >= len(d.s0.edges) { - return nil - } - - return d.s0.edges[precedence] -} - -// setPrecedenceStartState sets the start state for the current precedence. d -// must be a precedence DFA. See also isPrecedenceDfa. -func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { - if !d.precedenceDfa { - panic("only precedence DFAs may contain a precedence start state") - } - - if precedence < 0 { - return - } - - d.s0Mu.Lock() - defer d.s0Mu.Unlock() - - // Synchronization on s0 here is ok. When the DFA is turned into a - // precedence DFA, s0 will be initialized once and not updated again. s0.edges - // is never nil for a precedence DFA. - if precedence >= len(d.s0.edges) { - d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...) - } - - d.s0.edges[precedence] = startState -} - -// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs -// from the current DFA configuration, then d.states is cleared, the initial -// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to -// store the start states for individual precedence values if precedenceDfa is -// true or nil otherwise, and d.precedenceDfa is updated. -func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { - if d.precedenceDfa != precedenceDfa { - d.states = make(map[int]*DFAState) - - if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - - precedenceState.edges = make([]*DFAState, 0) - precedenceState.isAcceptState = false - precedenceState.requiresFullContext = false - d.s0 = precedenceState - } else { - d.s0 = nil - } - - d.precedenceDfa = precedenceDfa - } -} - -func (d *DFA) getS0() *DFAState { - d.s0Mu.RLock() - defer d.s0Mu.RUnlock() - return d.s0 -} - -func (d *DFA) setS0(s *DFAState) { - d.s0Mu.Lock() - defer d.s0Mu.Unlock() - d.s0 = s -} - -func (d *DFA) getState(hash int) (*DFAState, bool) { - d.statesMu.RLock() - defer d.statesMu.RUnlock() - s, ok := d.states[hash] - return s, ok -} - -func (d *DFA) setState(hash int, state *DFAState) { - d.statesMu.Lock() - defer d.statesMu.Unlock() - d.states[hash] = state -} - -func (d *DFA) numStates() int { - d.statesMu.RLock() - defer d.statesMu.RUnlock() - return len(d.states) -} - -type dfaStateList []*DFAState - -func (d dfaStateList) Len() int { return len(d) } -func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber } -func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] } - -// sortedStates returns the states in d sorted by their state number. -func (d *DFA) sortedStates() []*DFAState { - vs := make([]*DFAState, 0, len(d.states)) - - for _, v := range d.states { - vs = append(vs, v) - } - - sort.Sort(dfaStateList(vs)) - - return vs -} - -func (d *DFA) String(literalNames []string, symbolicNames []string) string { - if d.s0 == nil { - return "" - } - - return NewDFASerializer(d, literalNames, symbolicNames).String() -} - -func (d *DFA) ToLexerString() string { - if d.s0 == nil { - return "" - } - - return NewLexerDFASerializer(d).String() -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go deleted file mode 100644 index 4c0f6902..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// DFASerializer is a DFA walker that knows how to dump them to serialized -// strings. -type DFASerializer struct { - dfa *DFA - literalNames []string - symbolicNames []string -} - -func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { - if literalNames == nil { - literalNames = make([]string, 0) - } - - if symbolicNames == nil { - symbolicNames = make([]string, 0) - } - - return &DFASerializer{ - dfa: dfa, - literalNames: literalNames, - symbolicNames: symbolicNames, - } -} - -func (d *DFASerializer) String() string { - if d.dfa.s0 == nil { - return "" - } - - buf := "" - states := d.dfa.sortedStates() - - for _, s := range states { - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += d.GetStateString(s) - buf += "-" - buf += d.getEdgeLabel(j) - buf += "->" - buf += d.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} - -func (d *DFASerializer) getEdgeLabel(i int) string { - if i == 0 { - return "EOF" - } else if d.literalNames != nil && i-1 < len(d.literalNames) { - return d.literalNames[i-1] - } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { - return d.symbolicNames[i-1] - } - - return strconv.Itoa(i - 1) -} - -func (d *DFASerializer) GetStateString(s *DFAState) string { - var a, b string - - if s.isAcceptState { - a = ":" - } - - if s.requiresFullContext { - b = "^" - } - - baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b - - if s.isAcceptState { - if s.predicates != nil { - return baseStateStr + "=>" + fmt.Sprint(s.predicates) - } - - return baseStateStr + "=>" + fmt.Sprint(s.prediction) - } - - return baseStateStr -} - -type LexerDFASerializer struct { - *DFASerializer -} - -func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { - return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} -} - -func (l *LexerDFASerializer) getEdgeLabel(i int) string { - return "'" + string(i) + "'" -} - -func (l *LexerDFASerializer) String() string { - if l.dfa.s0 == nil { - return "" - } - - buf := "" - states := l.dfa.sortedStates() - - for i := 0; i < len(states); i++ { - s := states[i] - - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += l.GetStateString(s) - buf += "-" - buf += l.getEdgeLabel(j) - buf += "->" - buf += l.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go deleted file mode 100644 index 38e918ad..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// PredPrediction maps a predicate to a predicted alternative. -type PredPrediction struct { - alt int - pred SemanticContext -} - -func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { - return &PredPrediction{alt: alt, pred: pred} -} - -func (p *PredPrediction) String() string { - return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" -} - -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, -// Ullman p. 117 says: "The DFA uses its state to keep track of all possible -// states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the -// subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state -// as well, however. More importantly, we need to maintain a stack of states, -// tracking the closure operations as they jump from rule to rule, emulating -// rule invocations (method calls). I have to add a stack to simulate the proper -// lookahead sequences for the underlying LL grammar from which the ATN was -// derived. -// -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules -// (if any) followed to arrive at that state. -// -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was -// reached via a different set of rule invocations. -type DFAState struct { - stateNumber int - configs ATNConfigSet - - // edges elements point to the target of the symbol. Shift up by 1 so (-1) - // Token.EOF maps to the first element. - edges []*DFAState - - isAcceptState bool - - // prediction is the ttype we match or alt we predict if the state is accept. - // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or - // requiresFullContext. - prediction int - - lexerActionExecutor *LexerActionExecutor - - // requiresFullContext indicates it was created during an SLL prediction that - // discovered a conflict between the configurations in the state. Future - // ParserATNSimulator.execATN invocations immediately jump doing - // full context prediction if true. - requiresFullContext bool - - // predicates is the predicates associated with the ATN configurations of the - // DFA state during SLL parsing. When we have predicates, requiresFullContext - // is false, since full context prediction evaluates predicates on-the-fly. If - // d is - // not nil, then prediction is ATN.INVALID_ALT_NUMBER. - // - // We only use these for non-requiresFullContext but conflicting states. That - // means we know from the context (it's $ or we don't dip into outer context) - // that it's an ambiguity not a conflict. - // - // This list is computed by - // ParserATNSimulator.predicateDFAState. - predicates []*PredPrediction -} - -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { - if configs == nil { - configs = NewBaseATNConfigSet(false) - } - - return &DFAState{configs: configs, stateNumber: stateNumber} -} - -// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. -func (d *DFAState) GetAltSet() *Set { - alts := NewSet(nil, nil) - - if d.configs != nil { - for _, c := range d.configs.GetItems() { - alts.add(c.GetAlt()) - } - } - - if alts.length() == 0 { - return nil - } - - return alts -} - -func (d *DFAState) setPrediction(v int) { - d.prediction = v -} - -// equals returns whether d equals other. Two DFAStates are equal if their ATN -// configuration sets are the same. This method is used to see if a state -// already exists. -// -// Because the number of alternatives and number of ATN configurations are -// finite, there is a finite number of DFA states that can be processed. This is -// necessary to show that the algorithm terminates. -// -// Cannot test the DFA state numbers here because in -// ParserATNSimulator.addDFAState we need to know if any other state exists that -// has d exact set of ATN configurations. The stateNumber is irrelevant. -func (d *DFAState) equals(other interface{}) bool { - if d == other { - return true - } else if _, ok := other.(*DFAState); !ok { - return false - } - - return d.configs.Equals(other.(*DFAState).configs) -} - -func (d *DFAState) String() string { - var s string - if d.isAcceptState { - if d.predicates != nil { - s = "=>" + fmt.Sprint(d.predicates) - } else { - s = "=>" + fmt.Sprint(d.prediction) - } - } - - return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s) -} - -func (d *DFAState) hash() int { - h := murmurInit(11) - - c := 1 - if d.isAcceptState { - if d.predicates != nil { - for _, p := range d.predicates { - h = murmurUpdate(h, p.alt) - h = murmurUpdate(h, p.pred.hash()) - c += 2 - } - } else { - h = murmurUpdate(h, d.prediction) - c += 1 - } - } - - h = murmurUpdate(h, d.configs.hash()) - return murmurFinish(h, c) -} \ No newline at end of file diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go deleted file mode 100644 index 1fec43d9..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// -// This implementation of {@link ANTLRErrorListener} can be used to identify -// certain potential correctness and performance problems in grammars. "reports" -// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate -// message. -// -// - -type DiagnosticErrorListener struct { - *DefaultErrorListener - - exactOnly bool -} - -func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { - - n := new(DiagnosticErrorListener) - - // whether all ambiguities or only exact ambiguities are Reported. - n.exactOnly = exactOnly - return n -} - -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if d.exactOnly && !exact { - return - } - msg := "reportAmbiguity d=" + - d.getDecisionDescription(recognizer, dfa) + - ": ambigAlts=" + - d.getConflictingAlts(ambigAlts, configs).String() + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - - msg := "reportAttemptingFullContext d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - msg := "reportContextSensitivity d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { - decision := dfa.decision - ruleIndex := dfa.atnStartState.GetRuleIndex() - - ruleNames := recognizer.GetRuleNames() - if ruleIndex < 0 || ruleIndex >= len(ruleNames) { - return strconv.Itoa(decision) - } - ruleName := ruleNames[ruleIndex] - if ruleName == "" { - return strconv.Itoa(decision) - } - return strconv.Itoa(decision) + " (" + ruleName + ")" -} - -// -// Computes the set of conflicting or ambiguous alternatives from a -// configuration set, if that information was not already provided by the -// parser. -// -// @param ReportedAlts The set of conflicting or ambiguous alternatives, as -// Reported by the parser. -// @param configs The conflicting or ambiguous configuration set. -// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise -// returns the set of alternatives represented in {@code configs}. -// -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { - if ReportedAlts != nil { - return ReportedAlts - } - result := NewBitSet() - for _, c := range set.GetItems() { - result.add(c.GetAlt()) - } - - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go deleted file mode 100644 index 028e1a9d..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "os" - "strconv" -) - -// Provides an empty default implementation of {@link ANTLRErrorListener}. The -// default implementation of each method does nothing, but can be overridden as -// necessary. - -type ErrorListener interface { - SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) -} - -type DefaultErrorListener struct { -} - -func NewDefaultErrorListener() *DefaultErrorListener { - return new(DefaultErrorListener) -} - -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { -} - -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { -} - -type ConsoleErrorListener struct { - *DefaultErrorListener -} - -func NewConsoleErrorListener() *ConsoleErrorListener { - return new(ConsoleErrorListener) -} - -// -// Provides a default instance of {@link ConsoleErrorListener}. -// -var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() - -// -// {@inheritDoc} -// -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-// -func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go deleted file mode 100644 index 977a6e45..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go +++ /dev/null @@ -1,758 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - inErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -// -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //inErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//

The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.

-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -// -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

-// -func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -// -// -func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { - // if we've already Reported an error and have not Matched a token - // yet successfully, don't Report any errors. - if d.inErrorRecoveryMode(recognizer) { - return // don't Report spurious errors - } - d.beginErrorCondition(recognizer) - - switch t := e.(type) { - default: - fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) - // fmt.Println(e.stack) - recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) - case *NoViableAltException: - d.ReportNoViableAlternative(recognizer, t) - case *InputMisMatchException: - d.ReportInputMisMatch(recognizer, t) - case *FailedPredicateException: - d.ReportFailedPredicate(recognizer, t) - } -} - -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-// -func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

-// -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
-// -// At the start of a sub rule upon error, {@link //Sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// reSynchronization set of the current rule. -// -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

-// -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

-// -//

ORIGINS

-// -//

Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

-// -//
-// classfunc : 'class' ID '{' member* '}'
-// 
-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

-// -func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.inErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "" - } else { - input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) - } - } else { - input = "" - } - msg := "no viable alternative at input " + d.escapeWSAndQuote(input) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -// -func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { - ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] - msg := "rule " + ruleName + " " + e.message - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This method is called to Report a syntax error which requires the removal -// of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -// -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.inErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//

The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

-// -//

EXTRA TOKEN (single token deletion)

-// -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

-// -//

MISSING TOKEN (single token insertion)

-// -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

-// -//

EXAMPLE

-// -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

-// -//
-// stat &rarr expr &rarr atom
-// 
-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ''
-// ^
-// 
-// -// The attempt to Match {@code ')'} will fail when it sees {@code ''} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -// -func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { - // SINGLE TOKEN DELETION - MatchedSymbol := d.SingleTokenDeletion(recognizer) - if MatchedSymbol != nil { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.Consume() - return MatchedSymbol - } - // SINGLE TOKEN INSERTION - if d.SingleTokenInsertion(recognizer) { - return d.GetMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) -} - -// -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -// -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -// -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -// -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "" - } else { - ln := recognizer.GetLiteralNames() - if expectedTokenType > 0 && expectedTokenType < len(ln) { - tokenText = "" - } else { - tokenText = "" // TODO matches the JS impl - } - } - current := currentSymbol - lookback := recognizer.GetTokenStream().LT(-1) - if current.GetTokenType() == TokenEOF && lookback != nil { - current = lookback - } - - tf := recognizer.GetTokenFactory() - - return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) -} - -func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { - return recognizer.GetExpectedTokens() -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - return d.escapeWSAndQuote(s) -} - -func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - return "'" + s + "'" -} - -// Compute the error recovery set for the current rule. During -// rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to -// computing FIRST of what follows the rule reference in the -// enclosing rule. See LinearApproximator.FIRST(). -// This local follow set only includes tokens -// from within the rule i.e., the FIRST computation done by -// ANTLR stops at the end of a rule. -// -// EXAMPLE -// -// When you find a "no viable alt exception", the input is not -// consistent with any of the alternatives for rule r. The best -// thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. -// You don't want the exact set of viable next tokens because the -// input might just be missing a token--you might consume the -// rest of the input looking for one of the missing tokens. -// -// Consider grammar: -// -// a : '[' b ']' -// | '(' b ')' -// -// b : c '^' INT -// c : ID -// | INT -// -// -// At each rule invocation, the set of tokens that could follow -// that rule is pushed on a stack. Here are the various -// context-sensitive follow sets: -// -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' -// -// Upon erroneous input "[]", the call chain is -// -// a -> b -> c -// -// and, hence, the follow context stack is: -// -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c -// -// Notice that ')' is not included, because b would have to have -// been called from a different context in rule a for ')' to be -// included. -// -// For error recovery, we cannot consider FOLLOW(c) -// (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that -// could follow any reference in the call chain. We need to -// reSync to one of those tokens. Note that FOLLOW(c)='^' and if -// we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. -// In this case, for input "[]", LA(1) is ']' and in the set, so we would -// not consume anything. After printing an error, rule c would -// return normally. Rule b would not find the required '^' though. -// At this point, it gets a mismatched token error and panics an -// exception (since LA(1) is not in the viable following token -// set). The rule exception handler tries to recover, but finds -// the same recovery set and doesn't consume anything. Rule b -// exits normally returning to rule a. Now it finds the ']' (and -// with the successful Match exits errorRecovery mode). -// -// So, you can see that the parser walks up the call chain looking -// for the token that was a member of the recovery set. -// -// Errors are not generated in errorRecovery mode. -// -// ANTLR's error recovery mechanism is based upon original ideas: -// -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 -// -// Later, Josef Grosch had some good ideas: -// -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip -// -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -// -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { - atn := recognizer.GetInterpreter().atn - ctx := recognizer.GetParserRuleContext() - recoverSet := NewIntervalSet() - for ctx != nil && ctx.GetInvokingState() >= 0 { - // compute what follows who invoked us - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) - recoverSet.addSet(follow) - ctx = ctx.GetParent().(ParserRuleContext) - } - recoverSet.removeOne(TokenEpsilon) - return recoverSet -} - -// Consume tokens until one Matches the given token set.// -func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { - ttype := recognizer.GetTokenStream().LA(1) - for ttype != TokenEOF && !set.contains(ttype) { - recognizer.Consume() - ttype = recognizer.GetTokenStream().LA(1) - } -} - -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors -// by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes -// that were not completed prior to encountering the error. -// -//

-// This error strategy is useful in the following scenarios.

-// -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
-// -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -// -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - context = context.GetParent().(ParserRuleContext) - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -// -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go deleted file mode 100644 index 2ef74926..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//

If the state number is not known, b method returns -1.

- -// -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -// -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -// -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go deleted file mode 100644 index 842170c0..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go deleted file mode 100644 index 5ff270f5..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go deleted file mode 100644 index 438e0ea6..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type IntStream interface { - Consume() - LA(int) int - Mark() int - Release(marker int) - Index() int - Seek(index int) - Size() int - GetSourceName() string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go deleted file mode 100644 index 510d9091..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type Interval struct { - Start int - Stop int -} - -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i -} - -func (i *Interval) Contains(item int) bool { - return item >= i.Start && item < i.Stop -} - -func (i *Interval) String() string { - if i.Start == i.Stop-1 { - return strconv.Itoa(i.Start) - } - - return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) -} - -func (i *Interval) length() int { - return i.Stop - i.Start -} - -type IntervalSet struct { - intervals []*Interval - readOnly bool -} - -func NewIntervalSet() *IntervalSet { - - i := new(IntervalSet) - - i.intervals = nil - i.readOnly = false - - return i -} - -func (i *IntervalSet) first() int { - if len(i.intervals) == 0 { - return TokenInvalidType - } - - return i.intervals[0].Start -} - -func (i *IntervalSet) addOne(v int) { - i.addInterval(NewInterval(v, v+1)) -} - -func (i *IntervalSet) addRange(l, h int) { - i.addInterval(NewInterval(l, h+1)) -} - -func (i *IntervalSet) addInterval(v *Interval) { - if i.intervals == nil { - i.intervals = make([]*Interval, 0) - i.intervals = append(i.intervals, v) - } else { - // find insert pos - for k, interval := range i.intervals { - // distinct range -> insert - if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) - return - } else if v.Stop == interval.Start { - i.intervals[k].Start = v.Start - return - } else if v.Start <= interval.Stop { - i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) - - // if not applying to end, merge potential overlaps - if k < len(i.intervals)-1 { - l := i.intervals[k] - r := i.intervals[k+1] - // if r contained in l - if l.Stop >= r.Stop { - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } else if l.Stop >= r.Start { // partial overlap - i.intervals[k] = NewInterval(l.Start, r.Stop) - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } - } - return - } - } - // greater than any exiting - i.intervals = append(i.intervals, v) - } -} - -func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { - if other.intervals != nil { - for k := 0; k < len(other.intervals); k++ { - i2 := other.intervals[k] - i.addInterval(NewInterval(i2.Start, i2.Stop)) - } - } - return i -} - -func (i *IntervalSet) complement(start int, stop int) *IntervalSet { - result := NewIntervalSet() - result.addInterval(NewInterval(start, stop+1)) - for j := 0; j < len(i.intervals); j++ { - result.removeRange(i.intervals[j]) - } - return result -} - -func (i *IntervalSet) contains(item int) bool { - if i.intervals == nil { - return false - } - for k := 0; k < len(i.intervals); k++ { - if i.intervals[k].Contains(item) { - return true - } - } - return false -} - -func (i *IntervalSet) length() int { - len := 0 - - for _, v := range i.intervals { - len += v.length() - } - - return len -} - -func (i *IntervalSet) removeRange(v *Interval) { - if v.Start == v.Stop-1 { - i.removeOne(v.Start) - } else if i.intervals != nil { - k := 0 - for n := 0; n < len(i.intervals); n++ { - ni := i.intervals[k] - // intervals are ordered - if v.Stop <= ni.Start { - return - } else if v.Start > ni.Start && v.Stop < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - x := NewInterval(v.Stop, ni.Stop) - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } else if v.Start <= ni.Start && v.Stop >= ni.Stop { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - k = k - 1 // need another pass - } else if v.Start < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - } else if v.Stop < ni.Stop { - i.intervals[k] = NewInterval(v.Stop, ni.Stop) - } - k++ - } - } -} - -func (i *IntervalSet) removeOne(v int) { - if i.intervals != nil { - for k := 0; k < len(i.intervals); k++ { - ki := i.intervals[k] - // intervals i ordered - if v < ki.Start { - return - } else if v == ki.Start && v == ki.Stop-1 { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - return - } else if v == ki.Start { - i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) - return - } else if v == ki.Stop-1 { - i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) - return - } else if v < ki.Stop-1 { - x := NewInterval(ki.Start, v) - ki.Start = v + 1 - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } - } - } -} - -func (i *IntervalSet) String() string { - return i.StringVerbose(nil, nil, false) -} - -func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { - - if i.intervals == nil { - return "{}" - } else if literalNames != nil || symbolicNames != nil { - return i.toTokenString(literalNames, symbolicNames) - } else if elemsAreChar { - return i.toCharString() - } - - return i.toIndexString() -} - -func (i *IntervalSet) toCharString() string { - names := make([]string, len(i.intervals)) - - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - names = append(names, ("'" + string(v.Start) + "'")) - } - } else { - names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'") - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toIndexString() string { - - names := make([]string, 0) - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - names = append(names, strconv.Itoa(v.Start)) - } - } else { - names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { - names := make([]string, 0) - for _, v := range i.intervals { - for j := v.Start; j < v.Stop; j++ { - names = append(names, i.elementName(literalNames, symbolicNames, j)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { - if a == TokenEOF { - return "" - } else if a == TokenEpsilon { - return "" - } else { - if a < len(literalNames) && literalNames[a] != "" { - return literalNames[a] - } - - return symbolicNames[a] - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go deleted file mode 100644 index b04f0457..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A lexer is recognizer that draws input symbols from a character stream. -// lexer grammars result in a subclass of this object. A Lexer object -// uses simplified Match() and error recovery mechanisms in the interest -// of speed. -/// - -type Lexer interface { - TokenSource - Recognizer - - Emit() Token - - SetChannel(int) - PushMode(int) - PopMode() int - SetType(int) - SetMode(int) -} - -type BaseLexer struct { - *BaseRecognizer - - Interpreter ILexerATNSimulator - TokenStartCharIndex int - TokenStartLine int - TokenStartColumn int - ActionType int - Virt Lexer // The most derived lexer implementation. Allows virtual method calls. - - input CharStream - factory TokenFactory - tokenFactorySourcePair *TokenSourceCharStreamPair - token Token - hitEOF bool - channel int - thetype int - modeStack IntStack - mode int - text string -} - -func NewBaseLexer(input CharStream) *BaseLexer { - - lexer := new(BaseLexer) - - lexer.BaseRecognizer = NewBaseRecognizer() - - lexer.input = input - lexer.factory = CommonTokenFactoryDEFAULT - lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} - - lexer.Virt = lexer - - lexer.Interpreter = nil // child classes must populate it - - // The goal of all lexer rules/methods is to create a token object. - // l is an instance variable as multiple rules may collaborate to - // create a single token. NextToken will return l object after - // Matching lexer rule(s). If you subclass to allow multiple token - // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not - // emit another token. - lexer.token = nil - - // What character index in the stream did the current token start at? - // Needed, for example, to get the text for current token. Set at - // the start of NextToken. - lexer.TokenStartCharIndex = -1 - - // The line on which the first character of the token resides/// - lexer.TokenStartLine = -1 - - // The character position of first character within the line/// - lexer.TokenStartColumn = -1 - - // Once we see EOF on char stream, next token will be EOF. - // If you have DONE : EOF then you see DONE EOF. - lexer.hitEOF = false - - // The channel number for the current token/// - lexer.channel = TokenDefaultChannel - - // The token type for the current token/// - lexer.thetype = TokenInvalidType - - lexer.modeStack = make([]int, 0) - lexer.mode = LexerDefaultMode - - // You can set the text for the current token to override what is in - // the input char buffer. Use setText() or can set l instance var. - // / - lexer.text = "" - - return lexer -} - -const ( - LexerDefaultMode = 0 - LexerMore = -2 - LexerSkip = -3 -) - -const ( - LexerDefaultTokenChannel = TokenDefaultChannel - LexerHidden = TokenHiddenChannel - LexerMinCharValue = 0x0000 - LexerMaxCharValue = 0x10FFFF -) - -func (b *BaseLexer) reset() { - // wack Lexer state variables - if b.input != nil { - b.input.Seek(0) // rewind the input - } - b.token = nil - b.thetype = TokenInvalidType - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = -1 - b.TokenStartColumn = -1 - b.TokenStartLine = -1 - b.text = "" - - b.hitEOF = false - b.mode = LexerDefaultMode - b.modeStack = make([]int, 0) - - b.Interpreter.reset() -} - -func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { - return b.Interpreter -} - -func (b *BaseLexer) GetInputStream() CharStream { - return b.input -} - -func (b *BaseLexer) GetSourceName() string { - return b.GrammarFileName -} - -func (b *BaseLexer) SetChannel(v int) { - b.channel = v -} - -func (b *BaseLexer) GetTokenFactory() TokenFactory { - return b.factory -} - -func (b *BaseLexer) setTokenFactory(f TokenFactory) { - b.factory = f -} - -func (b *BaseLexer) safeMatch() (ret int) { - defer func() { - if e := recover(); e != nil { - if re, ok := e.(RecognitionException); ok { - b.notifyListeners(re) // Report error - b.Recover(re) - ret = LexerSkip // default - } - } - }() - - return b.Interpreter.Match(b.input, b.mode) -} - -// Return a token from l source i.e., Match a token on the char stream. -func (b *BaseLexer) NextToken() Token { - if b.input == nil { - panic("NextToken requires a non-nil input stream.") - } - - tokenStartMarker := b.input.Mark() - - // previously in finally block - defer func() { - // make sure we release marker after Match or - // unbuffered char stream will keep buffering - b.input.Release(tokenStartMarker) - }() - - for { - if b.hitEOF { - b.EmitEOF() - return b.token - } - b.token = nil - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = b.input.Index() - b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() - b.TokenStartLine = b.Interpreter.GetLine() - b.text = "" - continueOuter := false - for { - b.thetype = TokenInvalidType - ttype := LexerSkip - - ttype = b.safeMatch() - - if b.input.LA(1) == TokenEOF { - b.hitEOF = true - } - if b.thetype == TokenInvalidType { - b.thetype = ttype - } - if b.thetype == LexerSkip { - continueOuter = true - break - } - if b.thetype != LexerMore { - break - } - } - - if continueOuter { - continue - } - if b.token == nil { - b.Virt.Emit() - } - return b.token - } - - return nil -} - -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that -// if token==nil at end of any token rule, it creates one for you -// and emits it. -// / -func (b *BaseLexer) Skip() { - b.thetype = LexerSkip -} - -func (b *BaseLexer) More() { - b.thetype = LexerMore -} - -func (b *BaseLexer) SetMode(m int) { - b.mode = m -} - -func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { - fmt.Println("pushMode " + strconv.Itoa(m)) - } - b.modeStack.Push(b.mode) - b.mode = m -} - -func (b *BaseLexer) PopMode() int { - if len(b.modeStack) == 0 { - panic("Empty Stack") - } - if LexerATNSimulatorDebug { - fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) - } - i, _ := b.modeStack.Pop() - b.mode = i - return b.mode -} - -func (b *BaseLexer) inputStream() CharStream { - return b.input -} - -// SetInputStream resets the lexer input stream and associated lexer state. -func (b *BaseLexer) SetInputStream(input CharStream) { - b.input = nil - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() - b.input = input - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} -} - -func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { - return b.tokenFactorySourcePair -} - -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / -func (b *BaseLexer) EmitToken(token Token) { - b.token = token -} - -// The standard method called to automatically emit a token at the -// outermost lexical rule. The token object should point into the -// char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. -// / -func (b *BaseLexer) Emit() Token { - t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) - b.EmitToken(t) - return t -} - -func (b *BaseLexer) EmitEOF() Token { - cpos := b.GetCharPositionInLine() - lpos := b.GetLine() - eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) - b.EmitToken(eof) - return eof -} - -func (b *BaseLexer) GetCharPositionInLine() int { - return b.Interpreter.GetCharPositionInLine() -} - -func (b *BaseLexer) GetLine() int { - return b.Interpreter.GetLine() -} - -func (b *BaseLexer) GetType() int { - return b.thetype -} - -func (b *BaseLexer) SetType(t int) { - b.thetype = t -} - -// What is the index of the current character of lookahead?/// -func (b *BaseLexer) GetCharIndex() int { - return b.input.Index() -} - -// Return the text Matched so far for the current token or any text override. -//Set the complete text of l token it wipes any previous changes to the text. -func (b *BaseLexer) GetText() string { - if b.text != "" { - return b.text - } - - return b.Interpreter.GetText(b.input) -} - -func (b *BaseLexer) SetText(text string) { - b.text = text -} - -func (b *BaseLexer) GetATN() *ATN { - return b.Interpreter.ATN() -} - -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / -func (b *BaseLexer) GetAllTokens() []Token { - vl := b.Virt - tokens := make([]Token, 0) - t := vl.NextToken() - for t.GetTokenType() != TokenEOF { - tokens = append(tokens, t) - t = vl.NextToken() - } - return tokens -} - -func (b *BaseLexer) notifyListeners(e RecognitionException) { - start := b.TokenStartCharIndex - stop := b.input.Index() - text := b.input.GetTextFromInterval(NewInterval(start, stop)) - msg := "token recognition error at: '" + text + "'" - listener := b.GetErrorListenerDispatch() - listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) -} - -func (b *BaseLexer) getErrorDisplayForChar(c rune) string { - if c == TokenEOF { - return "" - } else if c == '\n' { - return "\\n" - } else if c == '\t' { - return "\\t" - } else if c == '\r' { - return "\\r" - } else { - return string(c) - } -} - -func (b *BaseLexer) getCharErrorDisplay(c rune) string { - return "'" + b.getErrorDisplayForChar(c) + "'" -} - -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope -// it all works out. You can instead use the rule invocation stack -// to do sophisticated error recovery if you are in a fragment rule. -// / -func (b *BaseLexer) Recover(re RecognitionException) { - if b.input.LA(1) != TokenEOF { - if _, ok := re.(*LexerNoViableAltException); ok { - // Skip a char and try again - b.Interpreter.Consume(b.input) - } else { - // TODO: Do we lose character or line position information? - b.input.Consume() - } - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go deleted file mode 100644 index 20df84f9..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. -) - -type LexerAction interface { - getActionType() int - getIsPositionDependent() bool - execute(lexer Lexer) - hash() int - equals(other LexerAction) bool -} - -type BaseLexerAction struct { - actionType int - isPositionDependent bool -} - -func NewBaseLexerAction(action int) *BaseLexerAction { - la := new(BaseLexerAction) - - la.actionType = action - la.isPositionDependent = false - - return la -} - -func (b *BaseLexerAction) execute(lexer Lexer) { - panic("Not implemented") -} - -func (b *BaseLexerAction) getActionType() int { - return b.actionType -} - -func (b *BaseLexerAction) getIsPositionDependent() bool { - return b.isPositionDependent -} - -func (b *BaseLexerAction) hash() int { - return b.actionType -} - -func (b *BaseLexerAction) equals(other LexerAction) bool { - return b == other -} - -// -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. -// -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerSkipAction struct { - *BaseLexerAction -} - -func NewLexerSkipAction() *LexerSkipAction { - la := new(LexerSkipAction) - la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) - return la -} - -// Provides a singleton instance of l parameterless lexer action. -var LexerSkipActionINSTANCE = NewLexerSkipAction() - -func (l *LexerSkipAction) execute(lexer Lexer) { - lexer.Skip() -} - -func (l *LexerSkipAction) String() string { - return "skip" -} - -// Implements the {@code type} lexer action by calling {@link Lexer//setType} -// with the assigned type. -type LexerTypeAction struct { - *BaseLexerAction - - thetype int -} - -func NewLexerTypeAction(thetype int) *LexerTypeAction { - l := new(LexerTypeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) - l.thetype = thetype - return l -} - -func (l *LexerTypeAction) execute(lexer Lexer) { - lexer.SetType(l.thetype) -} - -func (l *LexerTypeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.thetype) - return murmurFinish(h, 2) -} - -func (l *LexerTypeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerTypeAction); !ok { - return false - } else { - return l.thetype == other.(*LexerTypeAction).thetype - } -} - -func (l *LexerTypeAction) String() string { - return "actionType(" + strconv.Itoa(l.thetype) + ")" -} - -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. -type LexerPushModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerPushModeAction(mode int) *LexerPushModeAction { - - l := new(LexerPushModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) - - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//pushMode} with the -// value provided by {@link //getMode}.

-func (l *LexerPushModeAction) execute(lexer Lexer) { - lexer.PushMode(l.mode) -} - -func (l *LexerPushModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerPushModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerPushModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerPushModeAction).mode - } -} - -func (l *LexerPushModeAction) String() string { - return "pushMode(" + strconv.Itoa(l.mode) + ")" -} - -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. -// -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerPopModeAction struct { - *BaseLexerAction -} - -func NewLexerPopModeAction() *LexerPopModeAction { - - l := new(LexerPopModeAction) - - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) - - return l -} - -var LexerPopModeActionINSTANCE = NewLexerPopModeAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerPopModeAction) execute(lexer Lexer) { - lexer.PopMode() -} - -func (l *LexerPopModeAction) String() string { - return "popMode" -} - -// Implements the {@code more} lexer action by calling {@link Lexer//more}. -// -//

The {@code more} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

- -type LexerMoreAction struct { - *BaseLexerAction -} - -func NewLexerMoreAction() *LexerMoreAction { - l := new(LexerMoreAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) - - return l -} - -var LexerMoreActionINSTANCE = NewLexerMoreAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerMoreAction) execute(lexer Lexer) { - lexer.More() -} - -func (l *LexerMoreAction) String() string { - return "more" -} - -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with -// the assigned mode. -type LexerModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerModeAction(mode int) *LexerModeAction { - l := new(LexerModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//mode} with the -// value provided by {@link //getMode}.

-func (l *LexerModeAction) execute(lexer Lexer) { - lexer.SetMode(l.mode) -} - -func (l *LexerModeAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerModeAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerModeAction).mode - } -} - -func (l *LexerModeAction) String() string { - return "mode(" + strconv.Itoa(l.mode) + ")" -} - -// Executes a custom lexer action by calling {@link Recognizer//action} with the -// rule and action indexes assigned to the custom action. The implementation of -// a custom action is added to the generated code for the lexer in an override -// of {@link Recognizer//action} when the grammar is compiled. -// -//

This class may represent embedded actions created with the {...} -// syntax in ANTLR 4, as well as actions created for lexer commands where the -// command argument could not be evaluated when the grammar was compiled.

- -// Constructs a custom lexer action with the specified rule and action -// indexes. -// -// @param ruleIndex The rule index to use for calls to -// {@link Recognizer//action}. -// @param actionIndex The action index to use for calls to -// {@link Recognizer//action}. - -type LexerCustomAction struct { - *BaseLexerAction - ruleIndex, actionIndex int -} - -func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { - l := new(LexerCustomAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) - l.ruleIndex = ruleIndex - l.actionIndex = actionIndex - l.isPositionDependent = true - return l -} - -//

Custom actions are implemented by calling {@link Lexer//action} with the -// appropriate rule and action indexes.

-func (l *LexerCustomAction) execute(lexer Lexer) { - lexer.Action(nil, l.ruleIndex, l.actionIndex) -} - -func (l *LexerCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.ruleIndex) - h = murmurUpdate(h, l.actionIndex) - return murmurFinish(h, 3) -} - -func (l *LexerCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerCustomAction); !ok { - return false - } else { - return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex - } -} - -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. -type LexerChannelAction struct { - *BaseLexerAction - - channel int -} - -func NewLexerChannelAction(channel int) *LexerChannelAction { - l := new(LexerChannelAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) - l.channel = channel - return l -} - -//

This action is implemented by calling {@link Lexer//setChannel} with the -// value provided by {@link //getChannel}.

-func (l *LexerChannelAction) execute(lexer Lexer) { - lexer.SetChannel(l.channel) -} - -func (l *LexerChannelAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.channel) - return murmurFinish(h, 2) -} - -func (l *LexerChannelAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerChannelAction); !ok { - return false - } else { - return l.channel == other.(*LexerChannelAction).channel - } -} - -func (l *LexerChannelAction) String() string { - return "channel(" + strconv.Itoa(l.channel) + ")" -} - -// This implementation of {@link LexerAction} is used for tracking input offsets -// for position-dependent actions within a {@link LexerActionExecutor}. -// -//

This action is not serialized as part of the ATN, and is only required for -// position-dependent lexer actions which appear at a location other than the -// end of a rule. For more information about DFA optimizations employed for -// lexer actions, see {@link LexerActionExecutor//append} and -// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

- -// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. -type LexerIndexedCustomAction struct { - *BaseLexerAction - - offset int - lexerAction LexerAction - isPositionDependent bool -} - -func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { - - l := new(LexerIndexedCustomAction) - l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) - - l.offset = offset - l.lexerAction = lexerAction - l.isPositionDependent = true - - return l -} - -//

This method calls {@link //execute} on the result of {@link //getAction} -// using the provided {@code lexer}.

-func (l *LexerIndexedCustomAction) execute(lexer Lexer) { - // assume the input stream position was properly set by the calling code - l.lexerAction.execute(lexer) -} - -func (l *LexerIndexedCustomAction) hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.offset) - h = murmurUpdate(h, l.lexerAction.hash()) - return murmurFinish(h, 3) -} - -func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerIndexedCustomAction); !ok { - return false - } else { - return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go deleted file mode 100644 index 80b949a1..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). -// -//

The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.

- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

-// -//

Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

-// -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//

This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.

-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) hash() int { - if l == nil { - return 61 - } - return l.cachedHash -} - -func (l *LexerActionExecutor) equals(other interface{}) bool { - if l == other { - return true - } else if _, ok := other.(*LexerActionExecutor); !ok { - return false - } else { - return l.cachedHash == other.(*LexerActionExecutor).cachedHash && - &l.lexerActions == &other.(*LexerActionExecutor).lexerActions - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go deleted file mode 100644 index 131364f7..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go +++ /dev/null @@ -1,658 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - - LexerATNSimulatorMinDFAEdge = 0 - LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN - - LexerATNSimulatorMatchCalls = 0 -) - -type ILexerATNSimulator interface { - IATNSimulator - - reset() - Match(input CharStream, mode int) int - GetCharPositionInLine() int - GetLine() int - GetText(input CharStream) string - Consume(input CharStream) -} - -type LexerATNSimulator struct { - *BaseATNSimulator - - recog Lexer - predictionMode int - mergeCache DoubleDict - startIndex int - Line int - CharPositionInLine int - mode int - prevAccept *SimState - MatchCalls int -} - -func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - l.decisionToDFA = decisionToDFA - l.recog = recog - // The current token's starting index into the character stream. - // Shared across DFA to ATN simulation in case the ATN fails and the - // DFA did not have a previous accept state. In l case, we use the - // ATN-generated exception object. - l.startIndex = -1 - // line number 1..n within the input/// - l.Line = 1 - // The index of the character relative to the beginning of the line - // 0..n-1/// - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode - // Used during DFA/ATN exec to record the most recent accept configuration - // info - l.prevAccept = NewSimState() - // done - return l -} - -func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { - l.CharPositionInLine = simulator.CharPositionInLine - l.Line = simulator.Line - l.mode = simulator.mode - l.startIndex = simulator.startIndex -} - -func (l *LexerATNSimulator) Match(input CharStream, mode int) int { - l.MatchCalls++ - l.mode = mode - mark := input.Mark() - - defer func() { - input.Release(mark) - }() - - l.startIndex = input.Index() - l.prevAccept.reset() - - dfa := l.decisionToDFA[mode] - - if dfa.s0 == nil { - return l.MatchATN(input) - } - - return l.execATN(input, dfa.s0) -} - -func (l *LexerATNSimulator) reset() { - l.prevAccept.reset() - l.startIndex = -1 - l.Line = 1 - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode -} - -func (l *LexerATNSimulator) MatchATN(input CharStream) int { - startState := l.atn.modeToStartState[l.mode] - - if LexerATNSimulatorDebug { - fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) - } - oldMode := l.mode - s0Closure := l.computeStartState(input, startState) - suppressEdge := s0Closure.hasSemanticContext - s0Closure.hasSemanticContext = false - - next := l.addDFAState(s0Closure) - - if !suppressEdge { - l.decisionToDFA[l.mode].setS0(next) - } - - predict := l.execATN(input, next) - - if LexerATNSimulatorDebug { - fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) - } - return predict -} - -func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - - if LexerATNSimulatorDebug { - fmt.Println("start state closure=" + ds0.configs.String()) - } - if ds0.isAcceptState { - // allow zero-length tokens - l.captureSimState(l.prevAccept, input, ds0) - } - t := input.LA(1) - s := ds0 // s is current/from DFA state - - for { // while more work - if LexerATNSimulatorDebug { - fmt.Println("execATN loop starting closure: " + s.configs.String()) - } - - // As we move src->trg, src->trg, we keep track of the previous trg to - // avoid looking up the DFA state again, which is expensive. - // If the previous target was already part of the DFA, we might - // be able to avoid doing a reach operation upon t. If s!=nil, - // it means that semantic predicates didn't prevent us from - // creating a DFA state. Once we know s!=nil, we check to see if - // the DFA state has an edge already for t. If so, we can just reuse - // it's configuration set there's no point in re-computing it. - // This is kind of like doing DFA simulation within the ATN - // simulation because DFA simulation is really just a way to avoid - // computing reach/closure sets. Technically, once we know that - // we have a previously added DFA state, we could jump over to - // the DFA simulator. But, that would mean popping back and forth - // a lot and making things more complicated algorithmically. - // This optimization makes a lot of sense for loops within DFA. - // A character will take us back to an existing DFA state - // that already has lots of edges out of it. e.g., .* in comments. - target := l.getExistingTargetState(s, t) - if target == nil { - target = l.computeTargetState(input, s, t) - // print("Computed:" + str(target)) - } - if target == ATNSimulatorError { - break - } - // If l is a consumable input element, make sure to consume before - // capturing the accept state so the input index, line, and char - // position accurately reflect the state of the interpreter at the - // end of the token. - if t != TokenEOF { - l.Consume(input) - } - if target.isAcceptState { - l.captureSimState(l.prevAccept, input, target) - if t == TokenEOF { - break - } - } - t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state - } - - return l.failOrAccept(l.prevAccept, input, s.configs, t) -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// l method returns {@code nil}. -// -// @param s The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for l edge is not -// already cached -func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { - if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { - return nil - } - - target := s.edges[t-LexerATNSimulatorMinDFAEdge] - if LexerATNSimulatorDebug && target != nil { - fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) - } - return target -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. -func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { - reach := NewOrderedATNConfigSet() - - // if we don't find an existing DFA state - // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) - - if len(reach.configs) == 0 { // we got nowhere on t from s - if !reach.hasSemanticContext { - // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. - l.addDFAEdge(s, t, ATNSimulatorError, nil) - } - // stop when we can't Match any more char - return ATNSimulatorError - } - // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) -} - -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { - if l.prevAccept.dfaState != nil { - lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor - l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) - return prevAccept.dfaState.prediction - } - - // if no accept and EOF is first char, return EOF - if t == TokenEOF && input.Index() == l.startIndex { - return TokenEOF - } - - panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) -} - -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { - // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule - SkipAlt := ATNInvalidAltNumber - - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { - continue - } - - if LexerATNSimulatorDebug { - - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) - } - - for _, trans := range cfg.GetState().GetTransitions() { - target := l.getReachableTarget(trans, t) - if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor - if lexerActionExecutor != nil { - lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) - } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) - if l.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEOFAsEpsilon) { - // any remaining configs for l alt have a lower priority - // than the one that just reached an accept state. - SkipAlt = cfg.GetAlt() - } - } - } - } -} - -func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { - fmt.Printf("ACTION %s\n", lexerActionExecutor) - } - // seek to after last char in token - input.Seek(index) - l.Line = line - l.CharPositionInLine = charPos - if lexerActionExecutor != nil && l.recog != nil { - lexerActionExecutor.execute(l.recog, input, startIndex) - } -} - -func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { - if trans.Matches(t, 0, LexerMaxCharValue) { - return trans.getTarget() - } - - return nil -} - -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { - configs := NewOrderedATNConfigSet() - for i := 0; i < len(p.GetTransitions()); i++ { - target := p.GetTransitions()[i].getTarget() - cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) - l.closure(input, cfg, configs, false, false, false) - } - - return configs -} - -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept -// state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. -// -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") - } - - _, ok := config.state.(*RuleStopState) - if ok { - - if LexerATNSimulatorDebug { - if l.recog != nil { - fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) - } else { - fmt.Printf("closure at rule stop %s\n", config) - } - } - - if config.context == nil || config.context.hasEmptyPath() { - if config.context == nil || config.context.isEmpty() { - configs.Add(config, nil) - return true - } - - configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) - currentAltReachedAcceptState = true - } - if config.context != nil && !config.context.isEmpty() { - for i := 0; i < config.context.length(); i++ { - if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { - newContext := config.context.GetParent(i) // "pop" return state - returnState := l.atn.states[config.context.getReturnState(i)] - cfg := NewLexerATNConfig2(config, returnState, newContext) - currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - } - return currentAltReachedAcceptState - } - // optimization - if !config.state.GetEpsilonOnlyTransitions() { - if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { - configs.Add(config, nil) - } - } - for j := 0; j < len(config.state.GetTransitions()); j++ { - trans := config.state.GetTransitions()[j] - cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) - if cfg != nil { - currentAltReachedAcceptState = l.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - return currentAltReachedAcceptState -} - -// side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { - - var cfg *LexerATNConfig - - if trans.getSerializationType() == TransitionRULE { - - rt := trans.(*RuleTransition) - newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) - cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) - - } else if trans.getSerializationType() == TransitionPRECEDENCE { - panic("Precedence predicates are not supported in lexers.") - } else if trans.getSerializationType() == TransitionPREDICATE { - // Track traversing semantic predicates. If we traverse, - // we cannot add a DFA state for l "reach" computation - // because the DFA would not test the predicate again in the - // future. Rather than creating collections of semantic predicates - // like v3 and testing them on prediction, v4 will test them on the - // fly all the time using the ATN not the DFA. This is slower but - // semantically it's not used that often. One of the key elements to - // l predicate mechanism is not adding DFA states that see - // predicates immediately afterwards in the ATN. For example, - - // a : ID {p1}? | ID {p2}? - - // should create the start state for rule 'a' (to save start state - // competition), but should not create target of ID state. The - // collection of ATN states the following ID references includes - // states reached by traversing predicates. Since l is when we - // test them, we cannot cash the DFA state target of ID. - - pt := trans.(*PredicateTransition) - - if LexerATNSimulatorDebug { - fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) - } - configs.SetHasSemanticContext(true) - if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionACTION { - if config.context == nil || config.context.hasEmptyPath() { - // execute actions anywhere in the start rule for a token. - // - // TODO: if the entry rule is invoked recursively, some - // actions may be executed during the recursive call. The - // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be - // split into two contexts - one with just the empty path - // and another with everything but the empty path. - // Unfortunately, the current algorithm does not allow - // getEpsilonTarget to return two configurations, so - // additional modifications are needed before we can support - // the split operation. - lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) - cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) - } else { - // ignore actions in referenced rules - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionEPSILON { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } else if trans.getSerializationType() == TransitionATOM || - trans.getSerializationType() == TransitionRANGE || - trans.getSerializationType() == TransitionSET { - if treatEOFAsEpsilon { - if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } - } - return cfg -} - -// Evaluate a predicate specified in the lexer. -// -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

-// -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / -func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { - // assume true if no recognizer was provided - if l.recog == nil { - return true - } - if !speculative { - return l.recog.Sempred(nil, ruleIndex, predIndex) - } - savedcolumn := l.CharPositionInLine - savedLine := l.Line - index := input.Index() - marker := input.Mark() - - defer func() { - l.CharPositionInLine = savedcolumn - l.Line = savedLine - input.Seek(index) - input.Release(marker) - }() - - l.Consume(input) - return l.recog.Sempred(nil, ruleIndex, predIndex) -} - -func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { - settings.index = input.Index() - settings.line = l.Line - settings.column = l.CharPositionInLine - settings.dfaState = dfaState -} - -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { - if to == nil && cfgs != nil { - // leading to l call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes l edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to reSynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - - to = l.addDFAState(cfgs) - - if suppressEdge { - return to - } - } - // add the edge - if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { - // Only track edges within the DFA bounds - return to - } - if LexerATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) - } - if from.edges == nil { - // make room for tokens 1..n and -1 masquerading as index 0 - from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1) - } - from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect - - return to -} - -// Add a NewDFA state if there isn't one with l set of -// configurations already. This method also detects the first -// configuration containing an ATN rule stop state. Later, when -// traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState { - - proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { - - _, ok := cfg.GetState().(*RuleStopState) - - if ok { - firstConfigWithRuleStopState = cfg - break - } - } - if firstConfigWithRuleStopState != nil { - proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor - proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) - } - hash := proposed.hash() - dfa := l.decisionToDFA[l.mode] - existing, ok := dfa.getState(hash) - if ok { - return existing - } - newState := proposed - newState.stateNumber = dfa.numStates() - configs.SetReadOnly(true) - newState.configs = configs - dfa.setState(hash, newState) - return newState -} - -func (l *LexerATNSimulator) getDFA(mode int) *DFA { - return l.decisionToDFA[mode] -} - -// Get the text Matched so far for the current token. -func (l *LexerATNSimulator) GetText(input CharStream) string { - // index is first lookahead char, don't include. - return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) -} - -func (l *LexerATNSimulator) Consume(input CharStream) { - curChar := input.LA(1) - if curChar == int('\n') { - l.Line++ - l.CharPositionInLine = 0 - } else { - l.CharPositionInLine++ - } - input.Consume() -} - -func (l *LexerATNSimulator) GetCharPositionInLine() int { - return l.CharPositionInLine -} - -func (l *LexerATNSimulator) GetLine() int { - return l.Line -} - -func (l *LexerATNSimulator) GetTokenName(tt int) string { - if tt == -1 { - return "EOF" - } - - return "'" + string(tt) + "'" -} - -func resetSimState(sim *SimState) { - sim.index = -1 - sim.line = 0 - sim.column = -1 - sim.dfaState = nil -} - -type SimState struct { - index int - line int - column int - dfaState *DFAState -} - -func NewSimState() *SimState { - s := new(SimState) - resetSimState(s) - return s -} - -func (s *SimState) reset() { - resetSimState(s) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go deleted file mode 100644 index f5afd09b..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -//* Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -/// -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -//* -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewSet(nil, nil) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -/// -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSet} for la argument. -// @param calledRuleStack A set used for preventing left recursion in the -// ATN from causing a stack overflow. Outside code should pass -// {@code NewBitSet()} for la argument. -// @param seeThruPreds {@code true} to true semantic predicates as -// implicitly {@code true} and "see through them", otherwise {@code false} -// to treat semantic predicates as opaque and add {@link //HitPred} to the -// result if one is encountered. -// @param addEOF Add {@link Token//EOF} to the result if the end of the -// outermost context is reached. This parameter has no effect if {@code ctx} -// is {@code nil}. - -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { - - returnState := la.atn.states[ctx.getReturnState(i)] - - removed := calledRuleStack.contains(returnState.GetRuleIndex()) - - defer func() { - if removed { - calledRuleStack.add(returnState.GetRuleIndex()) - } - }() - - calledRuleStack.remove(returnState.GetRuleIndex()) - la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} - -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - - c := NewBaseATNConfig6(s, 0, ctx) - - if lookBusy.contains(c) { - return - } - - lookBusy.add(c) - - if s == stopState { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - } - - _, ok := s.(*RuleStopState) - - if ok { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - - if ctx != BasePredictionContextEMPTY { - // run thru all possible stack tops in ctx - for i := 0; i < ctx.length(); i++ { - returnState := la.atn.states[ctx.getReturnState(i)] - la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) - } - return - } - } - - n := len(s.GetTransitions()) - - for i := 0; i < n; i++ { - t := s.GetTransitions()[i] - - if t1, ok := t.(*RuleTransition); ok { - if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { - continue - } - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) - } else if t2, ok := t.(AbstractPredicateTransition); ok { - if seeThruPreds { - la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else { - look.addOne(LL1AnalyzerHitPred) - } - } else if t.getIsEpsilon() { - la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else if _, ok := t.(*WildcardTransition); ok { - look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) - } else { - set := t.getLabel() - if set != nil { - if _, ok := t.(*NotSetTransition); ok { - set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) - } - look.addSet(set) - } - } - } -} - -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - - defer func() { - calledRuleStack.remove(t1.getTarget().GetRuleIndex()) - }() - - calledRuleStack.add(t1.getTarget().GetRuleIndex()) - la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go deleted file mode 100644 index fb60258e..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -type Parser interface { - Recognizer - - GetInterpreter() *ParserATNSimulator - - GetTokenStream() TokenStream - GetTokenFactory() TokenFactory - GetParserRuleContext() ParserRuleContext - SetParserRuleContext(ParserRuleContext) - Consume() Token - GetParseListeners() []ParseTreeListener - - GetErrorHandler() ErrorStrategy - SetErrorHandler(ErrorStrategy) - GetInputStream() IntStream - GetCurrentToken() Token - GetExpectedTokens() *IntervalSet - NotifyErrorListeners(string, Token, RecognitionException) - IsExpectedToken(int) bool - GetPrecedence() int - GetRuleInvocationStack(ParserRuleContext) []string -} - -type BaseParser struct { - *BaseRecognizer - - Interpreter *ParserATNSimulator - BuildParseTrees bool - - input TokenStream - errHandler ErrorStrategy - precedenceStack IntStack - ctx ParserRuleContext - - tracer *TraceListener - parseListeners []ParseTreeListener - _SyntaxErrors int -} - -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// -func NewBaseParser(input TokenStream) *BaseParser { - - p := new(BaseParser) - - p.BaseRecognizer = NewBaseRecognizer() - - // The input stream. - p.input = nil - // The error handling strategy for the parser. The default value is a new - // instance of {@link DefaultErrorStrategy}. - p.errHandler = NewDefaultErrorStrategy() - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. - // p.is always non-nil during the parsing process. - p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during - // the parsing process. The default value is {@code true}. - p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is - // implemented as a parser listener so p.field is not directly used by - // other parser methods. - p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive - // events during the parse. - p.parseListeners = nil - // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. - p._SyntaxErrors = 0 - p.SetInputStream(input) - - return p -} - -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with -// bypass alternatives. -// -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -// -var bypassAltsAtnCache = make(map[string]int) - -// reset the parser's state// -func (p *BaseParser) reset() { - if p.input != nil { - p.input.Seek(0) - } - p.errHandler.reset(p) - p.ctx = nil - p._SyntaxErrors = 0 - p.SetTrace(nil) - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - if p.Interpreter != nil { - p.Interpreter.reset() - } -} - -func (p *BaseParser) GetErrorHandler() ErrorStrategy { - return p.errHandler -} - -func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { - p.errHandler = e -} - -// Match current input symbol against {@code ttype}. If the symbol type -// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are -// called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//

To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
-// -// @param listener the listener to add -// -// @panics nilPointerException if {@code} listener is {@code nil} -// -func (p *BaseParser) AddParseListener(listener ParseTreeListener) { - if listener == nil { - panic("listener") - } - if p.parseListeners == nil { - p.parseListeners = make([]ParseTreeListener, 0) - } - p.parseListeners = append(p.parseListeners, listener) -} - -// -// Remove {@code listener} from the list of parse listeners. -// -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove -// -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -// -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -// -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-// 
- -func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { - - panic("NewParseTreePatternMatcher not implemented!") - // - // if (lexer == nil) { - // if (p.GetTokenStream() != nil) { - // tokenSource := p.GetTokenStream().GetTokenSource() - // if _, ok := tokenSource.(ILexer); ok { - // lexer = tokenSource - // } - // } - // } - // if (lexer == nil) { - // panic("Parser can't discover a lexer to use") - // } - - // m := NewParseTreePatternMatcher(lexer, p) - // return m.compile(pattern, patternRuleIndex) -} - -func (p *BaseParser) GetInputStream() IntStream { - return p.GetTokenStream() -} - -func (p *BaseParser) SetInputStream(input TokenStream) { - p.SetTokenStream(input) -} - -func (p *BaseParser) GetTokenStream() TokenStream { - return p.input -} - -// Set the token stream and reset the parser.// -func (p *BaseParser) SetTokenStream(input TokenStream) { - p.input = nil - p.reset() - p.input = input -} - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -// -func (p *BaseParser) GetCurrentToken() Token { - return p.input.LT(1) -} - -func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { - if offendingToken == nil { - offendingToken = p.GetCurrentToken() - } - p._SyntaxErrors++ - line := offendingToken.GetLine() - column := offendingToken.GetColumn() - listener := p.GetErrorListenerDispatch() - listener.SyntaxError(p, offendingToken, line, column, msg, err) -} - -func (p *BaseParser) Consume() Token { - o := p.GetCurrentToken() - if o.GetTokenType() != TokenEOF { - p.GetInputStream().Consume() - } - hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 - if p.BuildParseTrees || hasListener { - if p.errHandler.inErrorRecoveryMode(p) { - node := p.ctx.AddErrorNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitErrorNode(node) - } - } - - } else { - node := p.ctx.AddTokenNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitTerminal(node) - } - } - } - // node.invokingState = p.state - } - - return o -} - -func (p *BaseParser) addContextToParseTree() { - // add current context to parent if we have a parent - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) - } -} - -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { - p.SetState(state) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.BuildParseTrees { - p.addContextToParseTree() - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() - } -} - -func (p *BaseParser) ExitRule() { - p.ctx.SetStop(p.input.LT(-1)) - // trigger event on ctx, before it reverts to parent - if p.parseListeners != nil { - p.TriggerExitRuleEvent() - } - p.SetState(p.ctx.GetInvokingState()) - if p.ctx.GetParent() != nil { - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } else { - p.ctx = nil - } -} - -func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { - localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx - // that is previous child of parse tree - if p.BuildParseTrees && p.ctx != localctx { - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() - p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) - } - } - p.ctx = localctx -} - -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -func (p *BaseParser) GetPrecedence() int { - if len(p.precedenceStack) == 0 { - return -1 - } - - return p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { - p.SetState(state) - p.precedenceStack.Push(precedence) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -// -// Like {@link //EnterRule} but for recursive rules. - -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { - previous := p.ctx - previous.SetParent(localctx) - previous.SetInvokingState(state) - previous.SetStop(p.input.LT(-1)) - - p.ctx = localctx - p.ctx.SetStart(previous.GetStart()) - if p.BuildParseTrees { - p.ctx.AddChild(previous) - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() - p.ctx.SetStop(p.input.LT(-1)) - retCtx := p.ctx // save current ctx (return value) - // unroll so ctx is as it was before call to recursive method - if p.parseListeners != nil { - for p.ctx != parentCtx { - p.TriggerExitRuleEvent() - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } - } else { - p.ctx = parentCtx - } - // hook into tree - retCtx.SetParent(parentCtx) - if p.BuildParseTrees && parentCtx != nil { - // add return ctx into invoking rule's tree - parentCtx.AddChild(retCtx) - } -} - -func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { - ctx := p.ctx - for ctx != nil { - if ctx.GetRuleIndex() == ruleIndex { - return ctx - } - ctx = ctx.GetParent().(ParserRuleContext) - } - return nil -} - -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { - return precedence >= p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) inContext(context ParserRuleContext) bool { - // TODO: useful in parser? - return false -} - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -// -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.numStates() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -// -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go deleted file mode 100644 index 128b9a96..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go +++ /dev/null @@ -1,1473 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorListATNDecisions = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - if dfa.precedenceDfa { - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.s0 - } - - if s0 == nil { - if outerContext == nil { - outerContext = RuleContextEmpty - } - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - // If p is not a precedence DFA, we check the ATN start state - // to determine if p ATN start state is the decision for the - // closure block that determines whether a precedence rule - // should continue or complete. - - t2 := dfa.atnStartState - t, ok := t2.(*StarLoopEntryState) - if !dfa.precedenceDfa && ok { - if t.precedenceRuleDecision { - dfa.setPrecedenceDfa(true) - } - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) - - if dfa.precedenceDfa { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.s0 = s0 - } - } - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -// -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - if alts.length() == 0 { - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - } else if alts.length() == 1 { - return alts.minValue() - } else { - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - - panic("Should not have reached p state") -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - edges := previousD.edges - if edges == nil { - return nil - } - - return edges[t+1] -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } else if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, nil, reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if ParserATNSimulatorDebug { - fmt.Println("in computeReachSet, starting closure: " + closure.String()) - } - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var SkippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - _, ok := c.GetState().(*RuleStopState) - - if ok { - if fullCtx || t == TokenEOF { - if SkippedStopStates == nil { - SkippedStopStates = make([]*BaseATNConfig, 0) - } - SkippedStopStates = append(SkippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for j := 0; j < len(c.GetState().GetTransitions()); j++ { - trans := c.GetState().GetTransitions()[j] - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if SkippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewSet(nil, nil) - treatEOFAsEpsilon := t == TokenEOF - for k := 0; k < len(intermediate.configs); k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if SkippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(SkippedStopStates); l++ { - reach.Add(SkippedStopStates[l], p.mergeCache) - } - } - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -// -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - - _, ok := config.GetState().(*RuleStopState) - - if ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewSet(nil, nil) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
-// -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//

-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -// -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i < nalts+1; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.

-// -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
-// -//

-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//

-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -// -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -// -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - - if ParserATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") - fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - _, ok := config.GetState().(*RuleStopState) - if ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - if !t.getIsEpsilon() && closureBusy.add(c) != c { - // avoid infinite recursion for EOF* and EOF+ - continue - } - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if closureBusy.add(c) != c { - // avoid infinite recursion for right-recursive rules - continue - } - - if p.dfa != nil && p.dfa.precedenceDfa { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - - return "" -} - -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { - - switch t.getSerializationType() { - case TransitionRULE: - return p.ruleTransition(config, t.(*RuleTransition)) - case TransitionPRECEDENCE: - return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionPREDICATE: - return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionACTION: - return p.actionTransition(config, t.(*ActionTransition)) - case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) - case TransitionATOM: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionRANGE: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - case TransitionSET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - default: - return nil - } -} - -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) - } - return NewBaseATNConfig4(config, t.getTarget()) -} - -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + - strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && inContext { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + - ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && ((pt.isCtxDependent && inContext) || !pt.isCtxDependent) { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) - } - returnState := t.followState - newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) -} - -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModeGetAlts(altsets) -} - -// Sam pointed out a problem with the previous definition, v3, of -// ambiguous states. If we have another state associated with conflicting -// alternatives, we should keep going. For example, the following grammar -// -// s : (ID | ID ID?) '' -// -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. -// The key is that we have a single state that has config's only associated -// with a single alternative, 2, and crucially the state transitions -// among the configurations are all non-epsilon transitions. That means -// we don't consider any conflicts that include alternative 2. So, we -// ignore the conflict between alts 1 and 2. We ignore a set of -// conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. -// -// It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: -// -// a : A | A | A B -// -// After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned -// with states associated with the conflicting alternatives. Here alt -// 3 is not associated with the conflicting configs, but since we can continue -// looking for input reasonably, I don't declare the state done. We -// ignore a set of conflicting alts when we have an alternative -// that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { - var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { - conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) - } else { - conflictingAlts = configs.GetConflictingAlts() - } - return conflictingAlts -} - -func (p *ParserATNSimulator) GetTokenName(t int) string { - if t == TokenEOF { - return "EOF" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil { - if t >= len(p.parser.GetLiteralNames()) { - fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ",")) - // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect - } else { - return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } - } - - return strconv.Itoa(t) -} - -func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { - return p.GetTokenName(input.LA(1)) -} - -// Used for debugging in AdaptivePredict around execATN but I cut -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -// -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { - - panic("Not implemented") - - // fmt.Println("dead end configs: ") - // var decs = nvae.deadEndConfigs - // - // for i:=0; i0) { - // var t = c.state.GetTransitions()[0] - // if t2, ok := t.(*AtomTransition); ok { - // trans = "Atom "+ p.GetTokenName(t2.label) - // } else if t3, ok := t.(SetTransition); ok { - // _, ok := t.(*NotSetTransition) - // - // var s string - // if (ok){ - // s = "~" - // } - // - // trans = s + "Set " + t3.set - // } - // } - // fmt.Errorf(c.String(p.parser, true) + ":" + trans) - // } -} - -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { - return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) -} - -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { - alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { - if alt == ATNInvalidAltNumber { - alt = c.GetAlt() // found first alt - } else if c.GetAlt() != alt { - return ATNInvalidAltNumber - } - } - return alt -} - -// -// Add an edge to the DFA, if possible. This method calls -// {@link //addDFAState} to ensure the {@code to} state is present in the -// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the -// range of edges that can be represented in the DFA tables, p method -// returns without adding the edge to the DFA. -// -//

If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.

-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -// -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - to = p.addDFAState(dfa, to) // used existing if possible not incoming - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - if from.edges == nil { - from.edges = make([]*DFAState, p.atn.maxTokenType+1+1) - } - from.edges[t+1] = to // connect - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -// -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - hash := d.hash() - existing, ok := dfa.getState(hash) - if ok { - return existing - } - d.stateNumber = dfa.numStates() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.setState(hash, d) - if ParserATNSimulatorDebug { - fmt.Println("adding NewDFA state: " + d.String()) - } - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go deleted file mode 100644 index 49cd10c5..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "reflect" - "strconv" -) - -type ParserRuleContext interface { - RuleContext - - SetException(RecognitionException) - - AddTokenNode(token Token) *TerminalNodeImpl - AddErrorNode(badToken Token) *ErrorNodeImpl - - EnterRule(listener ParseTreeListener) - ExitRule(listener ParseTreeListener) - - SetStart(Token) - GetStart() Token - - SetStop(Token) - GetStop() Token - - AddChild(child RuleContext) RuleContext - RemoveLastChild() -} - -type BaseParserRuleContext struct { - *BaseRuleContext - - start, stop Token - exception RecognitionException - children []Tree -} - -func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { - prc := new(BaseParserRuleContext) - - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = -1 - // * If we are debugging or building a parse tree for a Visitor, - // we need to track all of the tokens and rule invocations associated - // with prc rule's context. This is empty for parsing w/o tree constr. - // operation because we don't the need to track the details about - // how we parse prc rule. - // / - prc.children = nil - prc.start = nil - prc.stop = nil - // The exception that forced prc rule to return. If the rule successfully - // completed, prc is {@code nil}. - prc.exception = nil - - return prc -} - -func (prc *BaseParserRuleContext) SetException(e RecognitionException) { - prc.exception = e -} - -func (prc *BaseParserRuleContext) GetChildren() []Tree { - return prc.children -} - -func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { - // from RuleContext - prc.parentCtx = ctx.parentCtx - prc.invokingState = ctx.invokingState - prc.children = nil - prc.start = ctx.start - prc.stop = ctx.stop -} - -func (prc *BaseParserRuleContext) GetText() string { - if prc.GetChildCount() == 0 { - return "" - } - - var s string - for _, child := range prc.children { - s += child.(ParseTree).GetText() - } - - return s -} - -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { -} - -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { -} - -// * Does not set parent link other add methods do that/// -func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / -func (prc *BaseParserRuleContext) RemoveLastChild() { - if prc.children != nil && len(prc.children) > 0 { - prc.children = prc.children[0 : len(prc.children)-1] - } -} - -func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { - - node := NewTerminalNodeImpl(token) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node - -} - -func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { - node := NewErrorNodeImpl(badToken) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node -} - -func (prc *BaseParserRuleContext) GetChild(i int) Tree { - if prc.children != nil && len(prc.children) >= i { - return prc.children[i] - } - - return nil -} - -func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { - if childType == nil { - return prc.GetChild(i).(RuleContext) - } - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if reflect.TypeOf(child) == childType { - if i == 0 { - return child.(RuleContext) - } - - i-- - } - } - - return nil -} - -func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { - return TreesStringTree(prc, ruleNames, recog) -} - -func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { - return prc -} - -func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { - return visitor.VisitChildren(prc) -} - -func (prc *BaseParserRuleContext) SetStart(t Token) { - prc.start = t -} - -func (prc *BaseParserRuleContext) GetStart() Token { - return prc.start -} - -func (prc *BaseParserRuleContext) SetStop(t Token) { - prc.stop = t -} - -func (prc *BaseParserRuleContext) GetStop() Token { - return prc.stop -} - -func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if c2, ok := child.(TerminalNode); ok { - if c2.GetSymbol().GetTokenType() == ttype { - if i == 0 { - return c2 - } - - i-- - } - } - } - return nil -} - -func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { - if prc.children == nil { - return make([]TerminalNode, 0) - } - - tokens := make([]TerminalNode, 0) - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if tchild, ok := child.(TerminalNode); ok { - if tchild.GetSymbol().GetTokenType() == ttype { - tokens = append(tokens, tchild) - } - } - } - - return tokens -} - -func (prc *BaseParserRuleContext) GetPayload() interface{} { - return prc -} - -func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { - if prc.children == nil || i < 0 || i >= len(prc.children) { - return nil - } - - j := -1 // what element have we found with ctxType? - for _, o := range prc.children { - - childType := reflect.TypeOf(o) - - if childType.Implements(ctxType) { - j++ - if j == i { - return o.(RuleContext) - } - } - } - return nil -} - -// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do -// check for convertibility - -func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { - return prc.getChild(ctxType, i) -} - -func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { - if prc.children == nil { - return make([]RuleContext, 0) - } - - contexts := make([]RuleContext, 0) - - for _, child := range prc.children { - childType := reflect.TypeOf(child) - - if childType.ConvertibleTo(ctxType) { - contexts = append(contexts, child.(RuleContext)) - } - } - return contexts -} - -func (prc *BaseParserRuleContext) GetChildCount() int { - if prc.children == nil { - return 0 - } - - return len(prc.children) -} - -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { - if prc.start == nil || prc.stop == nil { - return TreeInvalidInterval - } - - return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) -} - -//need to manage circular dependencies, so export now - -// Print out a whole tree, not just a node, in LISP format -// (root child1 .. childN). Print just a node if b is a leaf. -// - -func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { - - var p ParserRuleContext = prc - s := "[" - for p != nil && p != stop { - if ruleNames == nil { - if !p.IsEmpty() { - s += strconv.Itoa(p.GetInvokingState()) - } - } else { - ri := p.GetRuleIndex() - var ruleName string - if ri >= 0 && ri < len(ruleNames) { - ruleName = ruleNames[ri] - } else { - ruleName = strconv.Itoa(ri) - } - s += ruleName - } - if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { - s += " " - } - pi := p.GetParent() - if pi != nil { - p = pi.(ParserRuleContext) - } else { - p = nil - } - } - s += "]" - return s -} - -var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) - -type InterpreterRuleContext interface { - ParserRuleContext -} - -type BaseInterpreterRuleContext struct { - *BaseParserRuleContext -} - -func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { - - prc := new(BaseInterpreterRuleContext) - - prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = ruleIndex - - return prc -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go deleted file mode 100644 index 99acb333..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go +++ /dev/null @@ -1,756 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - hash() int - GetParent(int) PredictionContext - getReturnState(int) int - equals(PredictionContext) bool - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -func calculateEmptyHash() int { - h := murmurInit(1) - return murmurFinish(h, 0) -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -// -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(37) - - if parent != nil { - s.cachedHash = calculateHash(parent, returnState) - } else { - s.cachedHash = calculateEmptyHash() - } - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { - if b == other { - return true - } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } else if b.hash() != other.hash() { - return false // can't be same if hash is different - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != other.getReturnState(0) { - return false - } else if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) hash() int { - h := murmurInit(1) - - if b.parentCtx == nil { - return murmurFinish(h, 0) - } - - h = murmurUpdate(h, b.parentCtx.hash()) - h = murmurUpdate(h, b.returnState) - return murmurFinish(h, 2) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) equals(other PredictionContext) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(37) - - for i := range parents { - c.cachedHash += calculateHash(parents[i], returnStates[i]) - } - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -func (a *ArrayPredictionContext) equals(other PredictionContext) bool { - if _, ok := other.(*ArrayPredictionContext); !ok { - return false - } else if a.cachedHash != other.hash() { - return false // can't be same if hash is different - } else { - otherP := other.(*ArrayPredictionContext) - return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents - } -} - -func (a *ArrayPredictionContext) hash() int { - h := murmurInit(1) - - for _, p := range a.parents { - h = murmurUpdate(h, p.hash()) - } - - for _, r := range a.returnStates { - h = murmurUpdate(h, r) - } - - return murmurFinish(h, 2 * len(a.parents)) -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = RuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - // share same graph if both same - if a == b { - return a - } - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - // convert singleton so both are arrays to normalize - if _, ok := a.(*BaseSingletonPredictionContext); ok { - a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } - if _, ok := b.(*BaseSingletonPredictionContext); ok { - b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } - return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) -} - -// -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.hash(), a.hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), apc) - } - return apc -} - -// -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.hash(), b.hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.hash(), a.hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - if M == a { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), a) - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), b) - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.hash(), b.hash(), M) - } - return M -} - -// -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go deleted file mode 100644 index 15718f91..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -//
    -//
  • The usual SLL+LL fallback upon SLL conflict
  • -//
  • Pure SLL without LL fallback
  • -//
-// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }

-// -//

When the ATN simulation reaches the state before {@code ''}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x''} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-// -func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -//
    -// -//
  • no conflicts and more than 1 alternative in set => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, -// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set -// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1,3}} => continue -//
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set -// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1}} => stop and predict 1
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {1}} = {@code {1}} => stop and predict 1, can announce -// ambiguity {@code {1,2}}
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, -// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {2}} = {@code {1,2}} => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, -// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {3}} = {@code {1,3}} => continue
  • -// -//
-// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-// -func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -// -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -// -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -// -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -// -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -// -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// -// This func gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := make(map[int]*BitSet) - - for _, c := range configs.GetItems() { - key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() - - alts, ok := configToAlts[key] - if !ok { - alts = NewBitSet() - configToAlts[key] = alts - } - alts.add(c.GetAlt()) - } - - values := make([]*BitSet, 0, 10) - for _, v := range configToAlts { - values = append(values, v) - } - return values -} - -// -// Get a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-// -func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go deleted file mode 100644 index d114800f..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strings" - - "strconv" -) - -type Recognizer interface { - GetLiteralNames() []string - GetSymbolicNames() []string - GetRuleNames() []string - - Sempred(RuleContext, int, int) bool - Precpred(RuleContext, int) bool - - GetState() int - SetState(int) - Action(RuleContext, int, int) - AddErrorListener(ErrorListener) - RemoveErrorListeners() - GetATN() *ATN - GetErrorListenerDispatch() ErrorListener -} - -type BaseRecognizer struct { - listeners []ErrorListener - state int - - RuleNames []string - LiteralNames []string - SymbolicNames []string - GrammarFileName string -} - -func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec -} - -var tokenTypeMapCache = make(map[string]int) -var ruleIndexMapCache = make(map[string]int) - -func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.8" - if runtimeVersion != toolVersion { - fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) - } -} - -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { - panic("action not implemented on Recognizer!") -} - -func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { - b.listeners = append(b.listeners, listener) -} - -func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) -} - -func (b *BaseRecognizer) GetRuleNames() []string { - return b.RuleNames -} - -func (b *BaseRecognizer) GetTokenNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetSymbolicNames() []string { - return b.SymbolicNames -} - -func (b *BaseRecognizer) GetLiteralNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetState() int { - return b.state -} - -func (b *BaseRecognizer) SetState(v int) { - b.state = v -} - -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. -// -//

Used for XPath and tree pattern compilation.

-// -func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map result = tokenTypeMapCache.Get(vocabulary) -// if (result == null) { -// result = new HashMap() -// for (int i = 0; i < GetATN().maxTokenType; i++) { -// String literalName = vocabulary.getLiteralName(i) -// if (literalName != null) { -// result.put(literalName, i) -// } -// -// String symbolicName = vocabulary.GetSymbolicName(i) -// if (symbolicName != null) { -// result.put(symbolicName, i) -// } -// } -// -// result.put("EOF", Token.EOF) -// result = Collections.unmodifiableMap(result) -// tokenTypeMapCache.put(vocabulary, result) -// } -// -// return result -// } -//} - -// What is the error header, normally line/character position information?// -func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { - line := e.GetOffendingToken().GetLine() - column := e.GetOffendingToken().GetColumn() - return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -// -func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - - return "'" + s + "'" -} - -func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { - return NewProxyErrorListener(b.listeners) -} - -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { - return true -} - -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { - return true -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go deleted file mode 100644 index 600cf8c0..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go deleted file mode 100644 index 49205a16..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. -// -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

-// - -type SemanticContext interface { - comparable - - evaluate(parser Recognizer, outerContext RuleContext) bool - evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext - - hash() int - String() string -} - -func SemanticContextandContext(a, b SemanticContext) SemanticContext { - if a == nil || a == SemanticContextNone { - return b - } - if b == nil || b == SemanticContextNone { - return a - } - result := NewAND(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -func SemanticContextorContext(a, b SemanticContext) SemanticContext { - if a == nil { - return b - } - if b == nil { - return a - } - if a == SemanticContextNone || b == SemanticContextNone { - return SemanticContextNone - } - result := NewOR(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -type Predicate struct { - ruleIndex int - predIndex int - isCtxDependent bool -} - -func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { - p := new(Predicate) - - p.ruleIndex = ruleIndex - p.predIndex = predIndex - p.isCtxDependent = isCtxDependent // e.g., $i ref in pred - return p -} - -//The default {@link SemanticContext}, which is semantically equivalent to -//a predicate of the form {@code {true}?}. - -var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) - -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - return p -} - -func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - - var localctx RuleContext - - if p.isCtxDependent { - localctx = outerContext - } - - return parser.Sempred(localctx, p.ruleIndex, p.predIndex) -} - -func (p *Predicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*Predicate); !ok { - return false - } else { - return p.ruleIndex == other.(*Predicate).ruleIndex && - p.predIndex == other.(*Predicate).predIndex && - p.isCtxDependent == other.(*Predicate).isCtxDependent - } -} - -func (p *Predicate) hash() int { - return p.ruleIndex*43 + p.predIndex*47 -} - -func (p *Predicate) String() string { - return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" -} - -type PrecedencePredicate struct { - precedence int -} - -func NewPrecedencePredicate(precedence int) *PrecedencePredicate { - - p := new(PrecedencePredicate) - p.precedence = precedence - - return p -} - -func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - return parser.Precpred(outerContext, p.precedence) -} - -func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - if parser.Precpred(outerContext, p.precedence) { - return SemanticContextNone - } - - return nil -} - -func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { - return p.precedence - other.precedence -} - -func (p *PrecedencePredicate) equals(other interface{}) bool { - if p == other { - return true - } else if _, ok := other.(*PrecedencePredicate); !ok { - return false - } else { - return p.precedence == other.(*PrecedencePredicate).precedence - } -} - -func (p *PrecedencePredicate) hash() int { - return p.precedence * 51 -} - -func (p *PrecedencePredicate) String() string { - return "{" + strconv.Itoa(p.precedence) + ">=prec}?" -} - -func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { - result := make([]*PrecedencePredicate, 0) - - for _, v := range set.values() { - if c2, ok := v.(*PrecedencePredicate); ok { - result = append(result, c2) - } - } - - return result -} - -// A semantic context which is true whenever none of the contained contexts -// is false.` - -type AND struct { - opnds []SemanticContext -} - -func NewAND(a, b SemanticContext) *AND { - - operands := NewSet(nil, nil) - if aa, ok := a.(*AND); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*AND); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence < reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - and := new(AND) - and.opnds = opnds - - return and -} - -func (a *AND) equals(other interface{}) bool { - if a == other { - return true - } else if _, ok := other.(*AND); !ok { - return false - } else { - for i, v := range other.(*AND).opnds { - if !a.opnds[i].equals(v) { - return false - } - } - return true - } -} - -// -// {@inheritDoc} -// -//

-// The evaluation of predicates by a context is short-circuiting, but -// unordered.

-// -func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(a.opnds); i++ { - if !a.opnds[i].evaluate(parser, outerContext) { - return false - } - } - return true -} - -func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - - for i := 0; i < len(a.opnds); i++ { - context := a.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == nil { - // The AND context is false if any element is false - return nil - } else if evaluated != SemanticContextNone { - // Reduce the result by Skipping true elements - operands = append(operands, evaluated) - } - } - if !differs { - return a - } - - if len(operands) == 0 { - // all elements were true, so the AND context is true - return SemanticContextNone - } - - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextandContext(result, o) - } - } - - return result -} - -func (a *AND) hash() int { - h := murmurInit(37) // Init with a value different from OR - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *OR) hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { - h = murmurUpdate(h, op.hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *AND) String() string { - s := "" - - for _, o := range a.opnds { - s += "&& " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} - -// -// A semantic context which is true whenever at least one of the contained -// contexts is true. -// - -type OR struct { - opnds []SemanticContext -} - -func NewOR(a, b SemanticContext) *OR { - - operands := NewSet(nil, nil) - if aa, ok := a.(*OR); ok { - for _, o := range aa.opnds { - operands.add(o) - } - } else { - operands.add(a) - } - - if ba, ok := b.(*OR); ok { - for _, o := range ba.opnds { - operands.add(o) - } - } else { - operands.add(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence > reduced.precedence { - reduced = p - } - } - - operands.add(reduced) - } - - vs := operands.values() - - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - o := new(OR) - o.opnds = opnds - - return o -} - -func (o *OR) equals(other interface{}) bool { - if o == other { - return true - } else if _, ok := other.(*OR); !ok { - return false - } else { - for i, v := range other.(*OR).opnds { - if !o.opnds[i].equals(v) { - return false - } - } - return true - } -} - -//

-// The evaluation of predicates by o context is short-circuiting, but -// unordered.

-// -func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(o.opnds); i++ { - if o.opnds[i].evaluate(parser, outerContext) { - return true - } - } - return false -} - -func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - for i := 0; i < len(o.opnds); i++ { - context := o.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == SemanticContextNone { - // The OR context is true if any element is true - return SemanticContextNone - } else if evaluated != nil { - // Reduce the result by Skipping false elements - operands = append(operands, evaluated) - } - } - if !differs { - return o - } - if len(operands) == 0 { - // all elements were false, so the OR context is false - return nil - } - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextorContext(result, o) - } - } - - return result -} - -func (o *OR) String() string { - s := "" - - for _, o := range o.opnds { - s += "|| " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go deleted file mode 100644 index 2d8e9909..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type TokenSourceCharStreamPair struct { - tokenSource TokenSource - charStream CharStream -} - -// A token has properties: text, type, line, character position in the line -// (so we can ignore tabs), token channel, index, and source from which -// we obtained this token. - -type Token interface { - GetSource() *TokenSourceCharStreamPair - GetTokenType() int - GetChannel() int - GetStart() int - GetStop() int - GetLine() int - GetColumn() int - - GetText() string - SetText(s string) - - GetTokenIndex() int - SetTokenIndex(v int) - - GetTokenSource() TokenSource - GetInputStream() CharStream -} - -type BaseToken struct { - source *TokenSourceCharStreamPair - tokenType int // token type of the token - channel int // The parser ignores everything not on DEFAULT_CHANNEL - start int // optional return -1 if not implemented. - stop int // optional return -1 if not implemented. - tokenIndex int // from 0..n-1 of the token object in the input stream - line int // line=1..n of the 1st character - column int // beginning of the line at which it occurs, 0..n-1 - text string // text of the token. - readOnly bool -} - -const ( - TokenInvalidType = 0 - - // During lookahead operations, this "token" signifies we hit rule end ATN state - // and did not follow it despite needing to. - TokenEpsilon = -2 - - TokenMinUserTokenType = 1 - - TokenEOF = -1 - - // All tokens go to the parser (unless Skip() is called in that rule) - // on a particular "channel". The parser tunes to a particular channel - // so that whitespace etc... can go to the parser on a "hidden" channel. - - TokenDefaultChannel = 0 - - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - - TokenHiddenChannel = 1 -) - -func (b *BaseToken) GetChannel() int { - return b.channel -} - -func (b *BaseToken) GetStart() int { - return b.start -} - -func (b *BaseToken) GetStop() int { - return b.stop -} - -func (b *BaseToken) GetLine() int { - return b.line -} - -func (b *BaseToken) GetColumn() int { - return b.column -} - -func (b *BaseToken) GetTokenType() int { - return b.tokenType -} - -func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { - return b.source -} - -func (b *BaseToken) GetTokenIndex() int { - return b.tokenIndex -} - -func (b *BaseToken) SetTokenIndex(v int) { - b.tokenIndex = v -} - -func (b *BaseToken) GetTokenSource() TokenSource { - return b.source.tokenSource -} - -func (b *BaseToken) GetInputStream() CharStream { - return b.source.charStream -} - -type CommonToken struct { - *BaseToken -} - -func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - - t := new(CommonToken) - - t.BaseToken = new(BaseToken) - - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 - if t.source.tokenSource != nil { - t.line = source.tokenSource.GetLine() - t.column = source.tokenSource.GetCharPositionInLine() - } else { - t.column = -1 - } - return t -} - -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. - -//CommonToken.EMPTY_SOURCE = [ nil, nil ] - -// Constructs a New{@link CommonToken} as a copy of another {@link Token}. -// -//

-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //GetText}, and {@link //source} -// will be constructed from the result of {@link Token//GetTokenSource} and -// {@link Token//GetInputStream}.

-// -// @param oldToken The token to copy. -// -func (c *CommonToken) clone() *CommonToken { - t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) - t.tokenIndex = c.GetTokenIndex() - t.line = c.GetLine() - t.column = c.GetColumn() - t.text = c.GetText() - return t -} - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go deleted file mode 100644 index e023978f..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenSource interface { - NextToken() Token - Skip() - More() - GetLine() int - GetCharPositionInLine() int - GetInputStream() CharStream - GetSourceName() string - setTokenFactory(factory TokenFactory) - GetTokenFactory() TokenFactory -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go deleted file mode 100644 index df92c814..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenStream interface { - IntStream - - LT(k int) Token - - Get(index int) Token - GetTokenSource() TokenSource - SetTokenSource(TokenSource) - - GetAllText() string - GetTextFromInterval(*Interval) string - GetTextFromRuleContext(RuleContext) string - GetTextFromTokens(Token, Token) string -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go deleted file mode 100644 index 96a03f02..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go +++ /dev/null @@ -1,649 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. -package antlr - -import ( -"bytes" -"fmt" -) - - -// -// Useful for rewriting out a buffered input token stream after doing some -// augmentation or other manipulations on it. - -//

-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)

-//

- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.

- -//

-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.

- -//

-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.

- -//

-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,

- -//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-// 
- -//

-// Then in the rules, you can execute (assuming rewriter is visible):

- -//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-// 
- -//

-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:

- -//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-// 
- -//

-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.

- - - -const( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation)GetInstructionIndex() int{ - return op.instruction_index -} - -func (op *BaseRewriteOperation)GetIndex() int{ - return op.index -} - -func (op *BaseRewriteOperation)GetText() string{ - return op.text -} - -func (op *BaseRewriteOperation)GetOpName() string{ - return op.op_name -} - -func (op *BaseRewriteOperation)GetTokens() TokenStream{ - return op.tokens -} - -func (op *BaseRewriteOperation)SetInstructionIndex(val int){ - op.instruction_index = val -} - -func (op *BaseRewriteOperation)SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation)SetText(val string){ - op.text = val -} - -func (op *BaseRewriteOperation)SetOpName(val string){ - op.op_name = val -} - -func (op *BaseRewriteOperation)SetTokens(val TokenStream) { - op.tokens = val -} - - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ - return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index, - text:text, - op_name:"InsertBeforeOp", - tokens:stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ - return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ - index:index+1, - text:text, - tokens:stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index+1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct{ - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation:BaseRewriteOperation{ - index:from, - text:text, - op_name:"ReplaceOp", - tokens:stream, - }, - LastIndex:to, - } -} - -func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ - if op.text != ""{ - buffer.WriteString(op.text) - } - return op.LastIndex +1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) - } - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) -} - - -type TokenStreamRewriter struct { - //Our source stream - tokens TokenStream - // You may have multiple, named streams of rewrite operations. - // I'm calling these things "programs." - // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int -} - -func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ - return &TokenStreamRewriter{ - tokens: tokens, - programs: map[string][]RewriteOperation{ - Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), - }, - last_rewrite_token_indexes: map[string]int{}, - } -} - -func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ - return tsr.tokens -} - -// Rollback the instruction stream for a program so that -// the indicated instruction (via instructionIndex) is no -// longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ - is, ok := tsr.programs[program_name] - if ok{ - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] - } -} - -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ - tsr.Rollback(Default_Program_Name, instruction_index) -} -//Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included -} - -func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ - tsr.DeleteProgram(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ - // to insert after, just insert before next index (even if past end) - var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ - tsr.InsertAfter(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) -} - -func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ - var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ - tsr.InsertBefore(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ - if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ - panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", - from, to, tsr.tokens.Size())) - } - var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ - tsr.ReplaceDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ - tsr.ReplaceToken(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ - tsr.ReplaceTokenDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ - tsr.Replace(program_name, from, to, "" ) -} - -func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ - tsr.Delete(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ - tsr.DeleteDefault(index,index) -} - -func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ - tsr.DeleteToken(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { - i, ok := tsr.last_rewrite_token_indexes[program_name] - if !ok{ - return -1 - } - return i -} - -func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ - tsr.last_rewrite_token_indexes[program_name] = i -} - -func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ - is := make([]RewriteOperation, 0, Program_Init_Size) - tsr.programs[name] = is - return is -} - -func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ - is := tsr.GetProgram(name) - is = append(is, op) - tsr.programs[name] = is -} - -func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { - is, ok := tsr.programs[name] - if !ok{ - is = tsr.InitializeProgram(name) - } - return is -} -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetTextDefault() string{ - return tsr.GetText( - Default_Program_Name, - NewInterval(0, tsr.tokens.Size()-1)) -} -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] - start := interval.Start - stop := interval.Stop - // ensure start/end are in range - stop = min(stop, tsr.tokens.Size()-1) - start = max(start,0) - if rewrites == nil || len(rewrites) == 0{ - return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute - } - buf := bytes.Buffer{} - // First, optimize instruction stream - indexToOp := reduceToSingleOperationPerIndex(rewrites) - // Walk buffer, executing instructions and emitting tokens - for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())} - } - } - return buf.String() -} - -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: -// -// I.i.u I.j.v leave alone, nonoverlapping -// I.i.u I.i.v combine: Iivu -// -// R.i-j.u R.x-y.v | i-j in x-y delete first R -// R.i-j.u R.i-j.v delete first R -// R.i-j.u R.x-y.v | x-y in i-j ERROR -// R.i-j.u R.x-y.v | boundaries overlap ERROR -// -// Delete special case of replace (text==null): -// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) -// -// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping -// R.x-y.v I.i.u | i in x-y ERROR -// R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping -// -// I.i.u = insert u before op @ index i -// R.x-y.u = replace x-y indexed tokens with u -// -// First we need to examine replaces. For any replace op: -// -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. -// -// Then we can deal with inserts: -// -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace -// -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. -// -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. -// -// Return a map from token index to operation. -// -func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{ - // WALK REPLACES - for i:=0; i < len(rewrites); i++{ - op := rewrites[i] - if op == nil{continue} - rop, ok := op.(*ReplaceOp) - if !ok{continue} - // Wipe prior inserts within range - for j:=0; j rop.index && iop.index <=rop.LastIndex{ - // delete insert as it's a no-op. - rewrites[iop.instruction_index] = nil - } - } - } - // Drop any prior replaces contained within - for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{ - // delete replace as it's a no-op. - rewrites[prevop.instruction_index] = nil - continue - } - // throw exception unless disjoint or identical - disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex - // Delete special case of replace (text==null): - // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) - if prevop.text == "" && rop.text == "" && !disjoint{ - rewrites[prevop.instruction_index] = nil - rop.index = min(prevop.index, rop.index) - rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) - println("new rop" + rop.String()) //TODO: remove console write, taken from Java version - }else if !disjoint{ - panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) - } - } - } - } - // WALK INSERTS - for i:=0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil{continue} - //hack to replicate inheritance in composition - _, iok := rewrites[i].(*InsertBeforeOp) - _, aok := rewrites[i].(*InsertAfterOp) - if !iok && !aok{continue} - iop := rewrites[i] - // combine current insert with prior if any at same index - // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic - for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{ - panic("insert op "+iop.String()+" within boundaries of previous "+rop.String()) - } - } - } - } - m := map[int]RewriteOperation{} - for i:=0; i < len(rewrites); i++{ - op := rewrites[i] - if op == nil {continue} - if _, ok := m[op.GetIndex()]; ok{ - panic("should only be one op per index") - } - m[op.GetIndex()] = op - } - return m -} - - -/* - Quick fixing Go lack of overloads - */ - -func max(a,b int)int{ - if a>b{ - return a - }else { - return b - } -} -func min(a,b int)int{ - if aThis is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.

-// -//

Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.

- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - return "'" + string(t.start) + "'..'" + string(t.stop) + "'" -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go deleted file mode 100644 index bdeb6d78..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "" - } - - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { - return t.String() -} - -// Represents a token that was consumed during reSynchronization -// rather than during a valid Match operation. For example, -// we will create this kind of a node during single token insertion -// and deletion as well as during "consume until error recovery set" -// upon no viable alternative exceptions. - -type ErrorNodeImpl struct { - *TerminalNodeImpl -} - -var _ ErrorNode = &ErrorNodeImpl{} - -func NewErrorNodeImpl(token Token) *ErrorNodeImpl { - en := new(ErrorNodeImpl) - en.TerminalNodeImpl = NewTerminalNodeImpl(token) - return en -} - -func (e *ErrorNodeImpl) errorNode() {} - -func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitErrorNode(e) -} - -type ParseTreeWalker struct { -} - -func NewParseTreeWalker() *ParseTreeWalker { - return new(ParseTreeWalker) -} - -// Performs a walk on the given parse tree starting at the root and going down recursively -// with depth-first search. On each node, EnterRule is called before -// recursively walking down into child nodes, then -// ExitRule is called after the recursive call to wind up. -func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { - switch tt := t.(type) { - case ErrorNode: - listener.VisitErrorNode(tt) - case TerminalNode: - listener.VisitTerminal(tt) - default: - p.EnterRule(listener, t.(RuleNode)) - for i := 0; i < t.GetChildCount(); i++ { - child := t.GetChild(i) - p.Walk(listener, child) - } - p.ExitRule(listener, t.(RuleNode)) - } -} - -// -// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} -// then by triggering the event specific to the given parse tree node -// -func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) -} - -// Exits a grammar rule by first triggering the event specific to the given parse tree node -// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} -// -func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) -} - -var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go deleted file mode 100644 index 80144eca..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -/** A set of utility routines useful for all kinds of ANTLR trees. */ - -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. -func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { - - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - s := TreesGetNodeText(tree, ruleNames, nil) - - s = EscapeWhitespace(s, false) - c := tree.GetChildCount() - if c == 0 { - return s - } - res := "(" + s + " " - if c > 0 { - s = TreesStringTree(tree.GetChild(0), ruleNames, nil) - res += s - } - for i := 1; i < c; i++ { - s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) - } - res += ")" - return res -} - -func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - if ruleNames != nil { - switch t2 := t.(type) { - case RuleNode: - t3 := t2.GetRuleContext() - altNumber := t3.GetAltNumber() - - if altNumber != ATNInvalidAltNumber { - return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) - } - return ruleNames[t3.GetRuleIndex()] - case ErrorNode: - return fmt.Sprint(t2) - case TerminalNode: - if t2.GetSymbol() != nil { - return t2.GetSymbol().GetText() - } - } - } - - // no recog for rule names - payload := t.GetPayload() - if p2, ok := payload.(Token); ok { - return p2.GetText() - } - - return fmt.Sprint(t.GetPayload()) -} - -// Return ordered list of all children of this node -func TreesGetChildren(t Tree) []Tree { - list := make([]Tree, 0) - for i := 0; i < t.GetChildCount(); i++ { - list = append(list, t.GetChild(i)) - } - return list -} - -// Return a list of all ancestors of this node. The first node of -// list is the root and the last is the parent of this node. -// -func TreesgetAncestors(t Tree) []Tree { - ancestors := make([]Tree, 0) - t = t.GetParent() - for t != nil { - f := []Tree{t} - ancestors = append(f, ancestors...) - t = t.GetParent() - } - return ancestors -} - -func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { - return TreesfindAllNodes(t, ttype, true) -} - -func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { - return TreesfindAllNodes(t, ruleIndex, false) -} - -func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { - nodes := make([]ParseTree, 0) - treesFindAllNodes(t, index, findTokens, &nodes) - return nodes -} - -func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { - // check this node (the root) first - - t2, ok := t.(TerminalNode) - t3, ok2 := t.(ParserRuleContext) - - if findTokens && ok { - if t2.GetSymbol().GetTokenType() == index { - *nodes = append(*nodes, t2) - } - } else if !findTokens && ok2 { - if t3.GetRuleIndex() == index { - *nodes = append(*nodes, t3) - } - } - // check children - for i := 0; i < t.GetChildCount(); i++ { - treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) - } -} - -func TreesDescendants(t ParseTree) []ParseTree { - nodes := []ParseTree{t} - for i := 0; i < t.GetChildCount(); i++ { - nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) - } - return nodes -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go deleted file mode 100644 index bba2ffae..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "errors" - "fmt" - "sort" - "strconv" - "strings" -) - -func intMin(a, b int) int { - if a < b { - return a - } - return b -} - -func intMax(a, b int) int { - if a > b { - return a - } - return b -} - -// A simple integer stack - -type IntStack []int - -var ErrEmptyStack = errors.New("Stack is empty") - -func (s *IntStack) Pop() (int, error) { - l := len(*s) - 1 - if l < 0 { - return 0, ErrEmptyStack - } - v := (*s)[l] - *s = (*s)[0:l] - return v, nil -} - -func (s *IntStack) Push(e int) { - *s = append(*s, e) -} - -type Set struct { - data map[int][]interface{} - hashcodeFunction func(interface{}) int - equalsFunction func(interface{}, interface{}) bool -} - -func NewSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(interface{}, interface{}) bool) *Set { - - s := new(Set) - - s.data = make(map[int][]interface{}) - - if hashcodeFunction != nil { - s.hashcodeFunction = hashcodeFunction - } else { - s.hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - s.equalsFunction = standardEqualsFunction - } else { - s.equalsFunction = equalsFunction - } - - return s -} - -func standardEqualsFunction(a interface{}, b interface{}) bool { - - ac, oka := a.(comparable) - bc, okb := b.(comparable) - - if !oka || !okb { - panic("Not Comparable") - } - - return ac.equals(bc) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.hash() - } - - panic("Not Hasher") -} - -type hasher interface { - hash() int -} - -func (s *Set) length() int { - return len(s.data) -} - -func (s *Set) add(value interface{}) interface{} { - - key := s.hashcodeFunction(value) - - values := s.data[key] - - if s.data[key] != nil { - for i := 0; i < len(values); i++ { - if s.equalsFunction(value, values[i]) { - return values[i] - } - } - - s.data[key] = append(s.data[key], value) - return value - } - - v := make([]interface{}, 1, 10) - v[0] = value - s.data[key] = v - - return value -} - -func (s *Set) contains(value interface{}) bool { - - key := s.hashcodeFunction(value) - - values := s.data[key] - - if s.data[key] != nil { - for i := 0; i < len(values); i++ { - if s.equalsFunction(value, values[i]) { - return true - } - } - } - return false -} - -func (s *Set) values() []interface{} { - var l []interface{} - - for _, v := range s.data { - l = append(l, v...) - } - - return l -} - -func (s *Set) String() string { - r := "" - - for _, av := range s.data { - for _, v := range av { - r += fmt.Sprint(v) - } - } - - return r -} - -type BitSet struct { - data map[int]bool -} - -func NewBitSet() *BitSet { - b := new(BitSet) - b.data = make(map[int]bool) - return b -} - -func (b *BitSet) add(value int) { - b.data[value] = true -} - -func (b *BitSet) clear(index int) { - delete(b.data, index) -} - -func (b *BitSet) or(set *BitSet) { - for k := range set.data { - b.add(k) - } -} - -func (b *BitSet) remove(value int) { - delete(b.data, value) -} - -func (b *BitSet) contains(value int) bool { - return b.data[value] -} - -func (b *BitSet) values() []int { - ks := make([]int, len(b.data)) - i := 0 - for k := range b.data { - ks[i] = k - i++ - } - sort.Ints(ks) - return ks -} - -func (b *BitSet) minValue() int { - min := 2147483647 - - for k := range b.data { - if k < min { - min = k - } - } - - return min -} - -func (b *BitSet) equals(other interface{}) bool { - otherBitSet, ok := other.(*BitSet) - if !ok { - return false - } - - if len(b.data) != len(otherBitSet.data) { - return false - } - - for k, v := range b.data { - if otherBitSet.data[k] != v { - return false - } - } - - return true -} - -func (b *BitSet) length() int { - return len(b.data) -} - -func (b *BitSet) String() string { - vals := b.values() - valsS := make([]string, len(vals)) - - for i, val := range vals { - valsS[i] = strconv.Itoa(val) - } - return "{" + strings.Join(valsS, ", ") + "}" -} - -type AltDict struct { - data map[string]interface{} -} - -func NewAltDict() *AltDict { - d := new(AltDict) - d.data = make(map[string]interface{}) - return d -} - -func (a *AltDict) Get(key string) interface{} { - key = "k-" + key - return a.data[key] -} - -func (a *AltDict) put(key string, value interface{}) { - key = "k-" + key - a.data[key] = value -} - -func (a *AltDict) values() []interface{} { - vs := make([]interface{}, len(a.data)) - i := 0 - for _, v := range a.data { - vs[i] = v - i++ - } - return vs -} - -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - -func EscapeWhitespace(s string, escapeSpaces bool) string { - - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - if escapeSpaces { - s = strings.Replace(s, " ", "\u00B7", -1) - } - return s -} - -func TerminalNodeToStringArray(sa []TerminalNode) []string { - st := make([]string, len(sa)) - - for i, s := range sa { - st[i] = fmt.Sprintf("%v", s) - } - - return st -} - -func PrintArrayJavaStyle(sa []string) string { - var buffer bytes.Buffer - - buffer.WriteString("[") - - for i, s := range sa { - buffer.WriteString(s) - if i != len(sa)-1 { - buffer.WriteString(", ") - } - } - - buffer.WriteString("]") - - return buffer.String() -} - -// The following routines were lifted from bits.rotate* available in Go 1.9. - -const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64 - -// rotateLeft returns the value of x rotated left by (k mod UintSize) bits. -// To rotate x right by k bits, call RotateLeft(x, -k). -func rotateLeft(x uint, k int) uint { - if uintSize == 32 { - return uint(rotateLeft32(uint32(x), k)) - } - return uint(rotateLeft64(uint64(x), k)) -} - -// rotateLeft32 returns the value of x rotated left by (k mod 32) bits. -func rotateLeft32(x uint32, k int) uint32 { - const n = 32 - s := uint(k) & (n - 1) - return x<>(n-s) -} - -// rotateLeft64 returns the value of x rotated left by (k mod 64) bits. -func rotateLeft64(x uint64, k int) uint64 { - const n = 64 - s := uint(k) & (n - 1) - return x<>(n-s) -} - - -// murmur hash -const ( - c1_32 uint = 0xCC9E2D51 - c2_32 uint = 0x1B873593 - n1_32 uint = 0xE6546B64 -) - -func murmurInit(seed int) int { - return seed -} - -func murmurUpdate(h1 int, k1 int) int { - var k1u uint - k1u = uint(k1) * c1_32 - k1u = rotateLeft(k1u, 15) - k1u *= c2_32 - - var h1u = uint(h1) ^ k1u - k1u = rotateLeft(k1u, 13) - h1u = h1u*5 + 0xe6546b64 - return int(h1u) -} - -func murmurFinish(h1 int, numberOfWords int) int { - var h1u uint = uint(h1) - h1u ^= uint(numberOfWords * 4) - h1u ^= h1u >> 16 - h1u *= uint(0x85ebca6b) - h1u ^= h1u >> 13 - h1u *= 0xc2b2ae35 - h1u ^= h1u >> 16 - - return int(h1u) -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be..00000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d..00000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8e..00000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/caddyserver/caddy/v2/.gitignore b/vendor/github.com/caddyserver/caddy/v2/.gitignore deleted file mode 100644 index 5322a53f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/.gitignore +++ /dev/null @@ -1,27 +0,0 @@ -_gitignore/ -*.log -Caddyfile -!caddyfile/ - -# artifacts from pprof tooling -*.prof -*.test - -# build artifacts and helpers -cmd/caddy/caddy -cmd/caddy/caddy.exe - -# mac specific -.DS_Store - -# go modules -vendor - -# goreleaser artifacts -dist -caddy-build -caddy-dist - -# IDE files -.idea/ -.vscode/ diff --git a/vendor/github.com/caddyserver/caddy/v2/.golangci.yml b/vendor/github.com/caddyserver/caddy/v2/.golangci.yml deleted file mode 100644 index 2c6acca5..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/.golangci.yml +++ /dev/null @@ -1,98 +0,0 @@ -linters-settings: - errcheck: - ignore: fmt:.*,io/ioutil:^Read.*,go.uber.org/zap/zapcore:^Add.* - ignoretests: true - -linters: - disable-all: true - enable: - - bodyclose - - deadcode - - errcheck - - gofmt - - goimports - - gosec - - gosimple - - govet - - ineffassign - - misspell - - prealloc - - staticcheck - - structcheck - - typecheck - - unconvert - - unused - - varcheck - # these are implicitly disabled: - # - asciicheck - # - depguard - # - dogsled - # - dupl - # - exhaustive - # - exportloopref - # - funlen - # - gci - # - gochecknoglobals - # - gochecknoinits - # - gocognit - # - goconst - # - gocritic - # - gocyclo - # - godot - # - godox - # - goerr113 - # - gofumpt - # - goheader - # - golint - # - gomnd - # - gomodguard - # - goprintffuncname - # - interfacer - # - lll - # - maligned - # - nakedret - # - nestif - # - nlreturn - # - noctx - # - nolintlint - # - rowserrcheck - # - scopelint - # - sqlclosecheck - # - stylecheck - # - testpackage - # - unparam - # - whitespace - # - wsl - -run: - # default concurrency is a available CPU number. - # concurrency: 4 # explicitly omit this value to fully utilize available resources. - deadline: 5m - issues-exit-code: 1 - tests: false - -# output configuration options -output: - format: 'colored-line-number' - print-issued-lines: true - print-linter-name: true - -issues: - exclude-rules: - # we aren't calling unknown URL - - text: "G107" # G107: Url provided to HTTP request as taint input - linters: - - gosec - # as a web server that's expected to handle any template, this is totally in the hands of the user. - - text: "G203" # G203: Use of unescaped data in HTML templates - linters: - - gosec - # we're shelling out to known commands, not relying on user-defined input. - - text: "G204" # G204: Audit use of command execution - linters: - - gosec - # the choice of weakrand is deliberate, hence the named import "weakrand" - - path: modules/caddyhttp/reverseproxy/selectionpolicies.go - text: "G404" # G404: Insecure random number source (rand) - linters: - - gosec diff --git a/vendor/github.com/caddyserver/caddy/v2/.goreleaser.yml b/vendor/github.com/caddyserver/caddy/v2/.goreleaser.yml deleted file mode 100644 index 22074316..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/.goreleaser.yml +++ /dev/null @@ -1,129 +0,0 @@ -before: - hooks: - # The build is done in this particular way to build Caddy in a designated directory named in .gitignore. - # This is so we can run goreleaser on tag without Git complaining of being dirty. The main.go in cmd/caddy directory - # cannot be built within that directory due to changes necessary for the build causing Git to be dirty, which - # subsequently causes gorleaser to refuse running. - - mkdir -p caddy-build - - cp cmd/caddy/main.go caddy-build/main.go - - cp ./go.mod caddy-build/go.mod - - sed -i.bkp 's|github.com/caddyserver/caddy/v2|caddy|g' ./caddy-build/go.mod - # GoReleaser doesn't seem to offer {{.Tag}} at this stage, so we have to embed it into the env - # so we run: TAG=$(git describe --abbrev=0) goreleaser release --rm-dist --skip-publish --skip-validate - - go mod edit -require=github.com/caddyserver/caddy/v2@{{.Env.TAG}} ./caddy-build/go.mod - # as of Go 1.16, `go` commands no longer automatically change go.{mod,sum}. We now have to explicitly - # run `go mod tidy`. The `/bin/sh -c '...'` is because goreleaser can't find cd in PATH without shell invocation. - - /bin/sh -c 'cd ./caddy-build && go mod tidy' - - git clone --depth 1 https://github.com/caddyserver/dist caddy-dist - - go mod download - -builds: -- env: - - CGO_ENABLED=0 - - GO111MODULE=on - main: main.go - dir: ./caddy-build - binary: caddy - goos: - - darwin - - linux - - windows - - freebsd - goarch: - - amd64 - - arm - - arm64 - - s390x - - ppc64le - goarm: - - 5 - - 6 - - 7 - ignore: - - goos: darwin - goarch: arm - - goos: darwin - goarch: ppc64le - - goos: darwin - goarch: s390x - - goos: windows - goarch: ppc64le - - goos: windows - goarch: s390x - - goos: freebsd - goarch: ppc64le - - goos: freebsd - goarch: s390x - - goos: freebsd - goarch: arm - goarm: 5 - flags: - - -trimpath - ldflags: - - -s -w - -archives: - - format_overrides: - - goos: windows - format: zip - replacements: - darwin: mac -checksum: - algorithm: sha512 - -nfpms: - - id: default - package_name: caddy - - vendor: Light Code Labs - homepage: https://caddyserver.com - maintainer: Matthew Holt - description: | - Caddy - Powerful, enterprise-ready, open source web server with automatic HTTPS written in Go - license: Apache 2.0 - - formats: - - deb - # - rpm - - bindir: /usr/bin - contents: - - src: ./caddy-dist/init/caddy.service - dst: /lib/systemd/system/caddy.service - - - src: ./caddy-dist/init/caddy-api.service - dst: /lib/systemd/system/caddy-api.service - - - src: ./caddy-dist/welcome/index.html - dst: /usr/share/caddy/index.html - - - src: ./caddy-dist/scripts/completions/bash-completion - dst: /etc/bash_completion.d/caddy - - - src: ./caddy-dist/config/Caddyfile - dst: /etc/caddy/Caddyfile - type: config - - scripts: - postinstall: ./caddy-dist/scripts/postinstall.sh - preremove: ./caddy-dist/scripts/preremove.sh - postremove: ./caddy-dist/scripts/postremove.sh - - -release: - github: - owner: caddyserver - name: caddy - draft: true - prerelease: auto - -changelog: - sort: asc - filters: - exclude: - - '^chore:' - - '^ci:' - - '^docs?:' - - '^readme:' - - '^tests?:' - - '^\w+\s+' # a hack to remove commit messages without colons thus don't correspond to a package diff --git a/vendor/github.com/caddyserver/caddy/v2/AUTHORS b/vendor/github.com/caddyserver/caddy/v2/AUTHORS deleted file mode 100644 index 3635dd88..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/AUTHORS +++ /dev/null @@ -1,10 +0,0 @@ -# This is the official list of Caddy Authors for copyright purposes. -# Authors may be either individual people or legal entities. -# -# Not all individual contributors are authors. For the full list of -# contributors, refer to the project's page on GitHub or the repo's -# commit history. - -Matthew Holt -Light Code Labs -Ardan Labs diff --git a/vendor/github.com/caddyserver/caddy/v2/LICENSE b/vendor/github.com/caddyserver/caddy/v2/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/caddyserver/caddy/v2/README.md b/vendor/github.com/caddyserver/caddy/v2/README.md deleted file mode 100644 index 5f00e323..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/README.md +++ /dev/null @@ -1,186 +0,0 @@ -

- Caddy -
-

a project

-

-
-

Every site on HTTPS

-

Caddy is an extensible server platform that uses TLS by default.

-

- - -
- @caddyserver on Twitter - Caddy Forum -
- Caddy on Sourcegraph - Cloudsmith -

-

- Releases · - Documentation · - Get Help -

- - - -### Menu - -- [Features](#features) -- [Install](#install) -- [Build from source](#build-from-source) - - [For development](#for-development) - - [With version information and/or plugins](#with-version-information-andor-plugins) -- [Quick start](#quick-start) -- [Overview](#overview) -- [Full documentation](#full-documentation) -- [Getting help](#getting-help) -- [About](#about) - -

- Powered by -
- CertMagic -

- - -## [Features](https://caddyserver.com/v2) - -- **Easy configuration** with the [Caddyfile](https://caddyserver.com/docs/caddyfile) -- **Powerful configuration** with its [native JSON config](https://caddyserver.com/docs/json/) -- **Dynamic configuration** with the [JSON API](https://caddyserver.com/docs/api) -- [**Config adapters**](https://caddyserver.com/docs/config-adapters) if you don't like JSON -- **Automatic HTTPS** by default - - [ZeroSSL](https://zerossl.com) and [Let's Encrypt](https://letsencrypt.org) for public names - - Fully-managed local CA for internal names & IPs - - Can coordinate with other Caddy instances in a cluster - - Multi-issuer fallback -- **Stays up when other servers go down** due to TLS/OCSP/certificate-related issues -- **Production-ready** after serving trillions of requests and managing millions of TLS certificates -- **Scales to tens of thousands of sites** ... and probably more -- **HTTP/1.1, HTTP/2, and experimental HTTP/3** support -- **Highly extensible** [modular architecture](https://caddyserver.com/docs/architecture) lets Caddy do anything without bloat -- **Runs anywhere** with **no external dependencies** (not even libc) -- Written in Go, a language with higher **memory safety guarantees** than other servers -- Actually **fun to use** -- So, so much more to [discover](https://caddyserver.com/v2) - -## Install - -The simplest, cross-platform way is to download from [GitHub Releases](https://github.com/caddyserver/caddy/releases) and place the executable file in your PATH. - -For other install options, see https://caddyserver.com/docs/install. - -## Build from source - -Requirements: - -- [Go 1.16 or newer](https://golang.org/dl/) - -### For development - -_**Note:** These steps [will not embed proper version information](https://github.com/golang/go/issues/29228). For that, please follow the instructions in the next section._ - -```bash -$ git clone "https://github.com/caddyserver/caddy.git" -$ cd caddy/cmd/caddy/ -$ go build -``` - -When you run Caddy, it may try to bind to low ports unless otherwise specified in your config. If your OS requires elevated privileges for this, you will need to give your new binary permission to do so. On Linux, this can be done easily with: `sudo setcap cap_net_bind_service=+ep ./caddy` - -If you prefer to use `go run` which only creates temporary binaries, you can still do this with the included `setcap.sh` like so: - -```bash -$ go run -exec ./setcap.sh main.go -``` - -If you don't want to type your password for `setcap`, use `sudo visudo` to edit your sudoers file and allow your user account to run that command without a password, for example: - -``` -username ALL=(ALL:ALL) NOPASSWD: /usr/sbin/setcap -``` - -replacing `username` with your actual username. Please be careful and only do this if you know what you are doing! We are only qualified to document how to use Caddy, not Go tooling or your computer, and we are providing these instructions for convenience only; please learn how to use your own computer at your own risk and make any needful adjustments. - -### With version information and/or plugins - -Using [our builder tool, `xcaddy`](https://github.com/caddyserver/xcaddy)... - -``` -$ xcaddy build -``` - -...the following steps are automated: - -1. Create a new folder: `mkdir caddy` -2. Change into it: `cd caddy` -3. Copy [Caddy's main.go](https://github.com/caddyserver/caddy/blob/master/cmd/caddy/main.go) into the empty folder. Add imports for any custom plugins you want to add. -4. Initialize a Go module: `go mod init caddy` -5. (Optional) Pin Caddy version: `go get github.com/caddyserver/caddy/v2@version` replacing `version` with a git tag, commit, or branch name. -6. (Optional) Add plugins by adding their import: `_ "import/path/here"` -7. Compile: `go build` - - - - -## Quick start - -The [Caddy website](https://caddyserver.com/docs/) has documentation that includes tutorials, quick-start guides, reference, and more. - -**We recommend that all users -- regardless of experience level -- do our [Getting Started](https://caddyserver.com/docs/getting-started) guide to become familiar with using Caddy.** - -If you've only got a minute, [the website has several quick-start tutorials](https://caddyserver.com/docs/quick-starts) to choose from! However, after finishing a quick-start tutorial, please read more documentation to understand how the software works. 🙂 - - - - -## Overview - -Caddy is most often used as an HTTPS server, but it is suitable for any long-running Go program. First and foremost, it is a platform to run Go applications. Caddy "apps" are just Go programs that are implemented as Caddy modules. Two apps -- `tls` and `http` -- ship standard with Caddy. - -Caddy apps instantly benefit from [automated documentation](https://caddyserver.com/docs/json/), graceful on-line [config changes via API](https://caddyserver.com/docs/api), and unification with other Caddy apps. - -Although [JSON](https://caddyserver.com/docs/json/) is Caddy's native config language, Caddy can accept input from [config adapters](https://caddyserver.com/docs/config-adapters) which can essentially convert any config format of your choice into JSON: Caddyfile, JSON 5, YAML, TOML, NGINX config, and more. - -The primary way to configure Caddy is through [its API](https://caddyserver.com/docs/api), but if you prefer config files, the [command-line interface](https://caddyserver.com/docs/command-line) supports those too. - -Caddy exposes an unprecedented level of control compared to any web server in existence. In Caddy, you are usually setting the actual values of the initialized types in memory that power everything from your HTTP handlers and TLS handshakes to your storage medium. Caddy is also ridiculously extensible, with a powerful plugin system that makes vast improvements over other web servers. - -To wield the power of this design, you need to know how the config document is structured. Please see [our documentation site](https://caddyserver.com/docs/) for details about [Caddy's config structure](https://caddyserver.com/docs/json/). - -Nearly all of Caddy's configuration is contained in a single config document, rather than being scattered across CLI flags and env variables and a configuration file as with other web servers. This makes managing your server config more straightforward and reduces hidden variables/factors. - - -## Full documentation - -Our website has complete documentation: - -**https://caddyserver.com/docs/** - -The docs are also open source. You can contribute to them here: https://github.com/caddyserver/website - - - -## Getting help - -- We **strongly recommend** that all professionals or companies using Caddy get a support contract through [Ardan Labs](https://www.ardanlabs.com/my/contact-us?dd=caddy) before help is needed. - -- A [sponsorship](https://github.com/sponsors/mholt) goes a long way! If Caddy is benefitting your company, please consider a sponsorship! This not only helps fund full-time work to ensure the longevity of the project, it's also a great look for your company to your customers and potential customers! - -- Individuals can exchange help for free on our community forum at https://caddy.community. Remember that people give help out of their spare time and good will. The best way to get help is to give it first! - -Please use our [issue tracker](https://github.com/caddyserver/caddy/issues) only for bug reports and feature requests, i.e. actionable development items (support questions will usually be referred to the forums). - - - -## About - -**The name "Caddy" is trademarked.** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". Caddy is a registered trademark of Stack Holdings GmbH. - -- _Project on Twitter: [@caddyserver](https://twitter.com/caddyserver)_ -- _Author on Twitter: [@mholt6](https://twitter.com/mholt6)_ - -Caddy is a project of [ZeroSSL](https://zerossl.com), a Stack Holdings company. - -Debian package repository hosting is graciously provided by [Cloudsmith](https://cloudsmith.com). Cloudsmith is the only fully hosted, cloud-native, universal package management solution, that enables your organization to create, store and share packages in any format, to any place, with total confidence. \ No newline at end of file diff --git a/vendor/github.com/caddyserver/caddy/v2/admin.go b/vendor/github.com/caddyserver/caddy/v2/admin.go deleted file mode 100644 index fb451682..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/admin.go +++ /dev/null @@ -1,1245 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "bytes" - "context" - "crypto" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "expvar" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/pprof" - "net/url" - "os" - "path" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/caddyserver/caddy/v2/notify" - "github.com/caddyserver/certmagic" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// AdminConfig configures Caddy's API endpoint, which is used -// to manage Caddy while it is running. -type AdminConfig struct { - // If true, the admin endpoint will be completely disabled. - // Note that this makes any runtime changes to the config - // impossible, since the interface to do so is through the - // admin endpoint. - Disabled bool `json:"disabled,omitempty"` - - // The address to which the admin endpoint's listener should - // bind itself. Can be any single network address that can be - // parsed by Caddy. Default: localhost:2019 - Listen string `json:"listen,omitempty"` - - // If true, CORS headers will be emitted, and requests to the - // API will be rejected if their `Host` and `Origin` headers - // do not match the expected value(s). Use `origins` to - // customize which origins/hosts are allowed. If `origins` is - // not set, the listen address is the only value allowed by - // default. Enforced only on local (plaintext) endpoint. - EnforceOrigin bool `json:"enforce_origin,omitempty"` - - // The list of allowed origins/hosts for API requests. Only needed - // if accessing the admin endpoint from a host different from the - // socket's network interface or if `enforce_origin` is true. If not - // set, the listener address will be the default value. If set but - // empty, no origins will be allowed. Enforced only on local - // (plaintext) endpoint. - Origins []string `json:"origins,omitempty"` - - // Options pertaining to configuration management. - Config *ConfigSettings `json:"config,omitempty"` - - // Options that establish this server's identity. Identity refers to - // credentials which can be used to uniquely identify and authenticate - // this server instance. This is required if remote administration is - // enabled (but does not require remote administration to be enabled). - // Default: no identity management. - Identity *IdentityConfig `json:"identity,omitempty"` - - // Options pertaining to remote administration. By default, remote - // administration is disabled. If enabled, identity management must - // also be configured, as that is how the endpoint is secured. - // See the neighboring "identity" object. - // - // EXPERIMENTAL: This feature is subject to change. - Remote *RemoteAdmin `json:"remote,omitempty"` -} - -// ConfigSettings configures the management of configuration. -type ConfigSettings struct { - // Whether to keep a copy of the active config on disk. Default is true. - // Note that "pulled" dynamic configs (using the neighboring "load" module) - // are not persisted; only configs that are pushed to Caddy get persisted. - Persist *bool `json:"persist,omitempty"` - - // Loads a configuration to use. This is helpful if your configs are - // managed elsewhere, and you want Caddy to pull its config dynamically - // when it starts. The pulled config completely replaces the current - // one, just like any other config load. It is an error if a pulled - // config is configured to pull another config. - // - // EXPERIMENTAL: Subject to change. - LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"` - - // The interval to pull config. With a non-zero value, will pull config - // from config loader (eg. a http loader) with given interval. - // - // EXPERIMENTAL: Subject to change. - LoadInterval Duration `json:"load_interval,omitempty"` -} - -// IdentityConfig configures management of this server's identity. An identity -// consists of credentials that uniquely verify this instance; for example, -// TLS certificates (public + private key pairs). -type IdentityConfig struct { - // List of names or IP addresses which refer to this server. - // Certificates will be obtained for these identifiers so - // secure TLS connections can be made using them. - Identifiers []string `json:"identifiers,omitempty"` - - // Issuers that can provide this admin endpoint its identity - // certificate(s). Default: ACME issuers configured for - // ZeroSSL and Let's Encrypt. Be sure to change this if you - // require credentials for private identifiers. - IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"` - - issuers []certmagic.Issuer -} - -// RemoteAdmin enables and configures remote administration. If enabled, -// a secure listener enforcing mutual TLS authentication will be started -// on a different port from the standard plaintext admin server. -// -// This endpoint is secured using identity management, which must be -// configured separately (because identity management does not depend -// on remote administration). See the admin/identity config struct. -// -// EXPERIMENTAL: Subject to change. -type RemoteAdmin struct { - // The address on which to start the secure listener. - // Default: :2021 - Listen string `json:"listen,omitempty"` - - // List of access controls for this secure admin endpoint. - // This configures TLS mutual authentication (i.e. authorized - // client certificates), but also application-layer permissions - // like which paths and methods each identity is authorized for. - AccessControl []*AdminAccess `json:"access_control,omitempty"` -} - -// AdminAccess specifies what permissions an identity or group -// of identities are granted. -type AdminAccess struct { - // Base64-encoded DER certificates containing public keys to accept. - // (The contents of PEM certificate blocks are base64-encoded DER.) - // Any of these public keys can appear in any part of a verified chain. - PublicKeys []string `json:"public_keys,omitempty"` - - // Limits what the associated identities are allowed to do. - // If unspecified, all permissions are granted. - Permissions []AdminPermissions `json:"permissions,omitempty"` - - publicKeys []crypto.PublicKey -} - -// AdminPermissions specifies what kinds of requests are allowed -// to be made to the admin endpoint. -type AdminPermissions struct { - // The API paths allowed. Paths are simple prefix matches. - // Any subpath of the specified paths will be allowed. - Paths []string `json:"paths,omitempty"` - - // The HTTP methods allowed for the given paths. - Methods []string `json:"methods,omitempty"` -} - -// newAdminHandler reads admin's config and returns an http.Handler suitable -// for use in an admin endpoint server, which will be listening on listenAddr. -func (admin AdminConfig) newAdminHandler(addr NetworkAddress, remote bool) adminHandler { - muxWrap := adminHandler{mux: http.NewServeMux()} - - // secure the local or remote endpoint respectively - if remote { - muxWrap.remoteControl = admin.Remote - } else { - muxWrap.enforceHost = !addr.isWildcardInterface() - muxWrap.allowedOrigins = admin.allowedOrigins(addr) - } - - addRouteWithMetrics := func(pattern string, handlerLabel string, h http.Handler) { - labels := prometheus.Labels{"path": pattern, "handler": handlerLabel} - h = instrumentHandlerCounter( - adminMetrics.requestCount.MustCurryWith(labels), - h, - ) - muxWrap.mux.Handle(pattern, h) - } - // addRoute just calls muxWrap.mux.Handle after - // wrapping the handler with error handling - addRoute := func(pattern string, handlerLabel string, h AdminHandler) { - wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - err := h.ServeHTTP(w, r) - if err != nil { - labels := prometheus.Labels{ - "path": pattern, - "handler": handlerLabel, - "method": strings.ToUpper(r.Method), - } - adminMetrics.requestErrors.With(labels).Inc() - } - muxWrap.handleError(w, r, err) - }) - addRouteWithMetrics(pattern, handlerLabel, wrapper) - } - - const handlerLabel = "admin" - - // register standard config control endpoints - addRoute("/"+rawConfigKey+"/", handlerLabel, AdminHandlerFunc(handleConfig)) - addRoute("/id/", handlerLabel, AdminHandlerFunc(handleConfigID)) - addRoute("/stop", handlerLabel, AdminHandlerFunc(handleStop)) - - // register debugging endpoints - addRouteWithMetrics("/debug/pprof/", handlerLabel, http.HandlerFunc(pprof.Index)) - addRouteWithMetrics("/debug/pprof/cmdline", handlerLabel, http.HandlerFunc(pprof.Cmdline)) - addRouteWithMetrics("/debug/pprof/profile", handlerLabel, http.HandlerFunc(pprof.Profile)) - addRouteWithMetrics("/debug/pprof/symbol", handlerLabel, http.HandlerFunc(pprof.Symbol)) - addRouteWithMetrics("/debug/pprof/trace", handlerLabel, http.HandlerFunc(pprof.Trace)) - addRouteWithMetrics("/debug/vars", handlerLabel, expvar.Handler()) - - // register third-party module endpoints - for _, m := range GetModules("admin.api") { - router := m.New().(AdminRouter) - handlerLabel := m.ID.Name() - for _, route := range router.Routes() { - addRoute(route.Pattern, handlerLabel, route.Handler) - } - } - - return muxWrap -} - -// allowedOrigins returns a list of origins that are allowed. -// If admin.Origins is nil (null), the provided listen address -// will be used as the default origin. If admin.Origins is -// empty, no origins will be allowed, effectively bricking the -// endpoint for non-unix-socket endpoints, but whatever. -func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []string { - uniqueOrigins := make(map[string]struct{}) - for _, o := range admin.Origins { - uniqueOrigins[o] = struct{}{} - } - if admin.Origins == nil { - if addr.isLoopback() { - if addr.IsUnixNetwork() { - // RFC 2616, Section 14.26: - // "A client MUST include a Host header field in all HTTP/1.1 request - // messages. If the requested URI does not include an Internet host - // name for the service being requested, then the Host header field MUST - // be given with an empty value." - uniqueOrigins[""] = struct{}{} - } else { - uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{} - uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{} - uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{} - } - } - if !addr.IsUnixNetwork() { - uniqueOrigins[addr.JoinHostPort(0)] = struct{}{} - } - } - allowed := make([]string, 0, len(uniqueOrigins)) - for origin := range uniqueOrigins { - allowed = append(allowed, origin) - } - return allowed -} - -// replaceLocalAdminServer replaces the running local admin server -// according to the relevant configuration in cfg. If no configuration -// for the admin endpoint exists in cfg, a default one is used, so -// that there is always an admin server (unless it is explicitly -// configured to be disabled). -func replaceLocalAdminServer(cfg *Config) error { - // always be sure to close down the old admin endpoint - // as gracefully as possible, even if the new one is - // disabled -- careful to use reference to the current - // (old) admin endpoint since it will be different - // when the function returns - oldAdminServer := localAdminServer - defer func() { - // do the shutdown asynchronously so that any - // current API request gets a response; this - // goroutine may last a few seconds - if oldAdminServer != nil { - go func(oldAdminServer *http.Server) { - err := stopAdminServer(oldAdminServer) - if err != nil { - Log().Named("admin").Error("stopping current admin endpoint", zap.Error(err)) - } - }(oldAdminServer) - } - }() - - // always get a valid admin config - adminConfig := DefaultAdminConfig - if cfg != nil && cfg.Admin != nil { - adminConfig = cfg.Admin - } - - // if new admin endpoint is to be disabled, we're done - if adminConfig.Disabled { - Log().Named("admin").Warn("admin endpoint disabled") - return nil - } - - // extract a singular listener address - addr, err := parseAdminListenAddr(adminConfig.Listen, DefaultAdminListen) - if err != nil { - return err - } - - handler := adminConfig.newAdminHandler(addr, false) - - ln, err := Listen(addr.Network, addr.JoinHostPort(0)) - if err != nil { - return err - } - - serverMu.Lock() - localAdminServer = &http.Server{ - Addr: addr.String(), // for logging purposes only - Handler: handler, - ReadTimeout: 10 * time.Second, - ReadHeaderTimeout: 5 * time.Second, - IdleTimeout: 60 * time.Second, - MaxHeaderBytes: 1024 * 64, - } - serverMu.Unlock() - - adminLogger := Log().Named("admin") - go func() { - serverMu.Lock() - server := localAdminServer - serverMu.Unlock() - if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) { - adminLogger.Error("admin server shutdown for unknown reason", zap.Error(err)) - } - }() - - adminLogger.Info("admin endpoint started", - zap.String("address", addr.String()), - zap.Bool("enforce_origin", adminConfig.EnforceOrigin), - zap.Strings("origins", handler.allowedOrigins)) - - if !handler.enforceHost { - adminLogger.Warn("admin endpoint on open interface; host checking disabled", - zap.String("address", addr.String())) - } - - return nil -} - -// manageIdentity sets up automated identity management for this server. -func manageIdentity(ctx Context, cfg *Config) error { - if cfg == nil || cfg.Admin == nil || cfg.Admin.Identity == nil { - return nil - } - - // set default issuers; this is pretty hacky because we can't - // import the caddytls package -- but it works - if cfg.Admin.Identity.IssuersRaw == nil { - cfg.Admin.Identity.IssuersRaw = []json.RawMessage{ - json.RawMessage(`{"module": "zerossl"}`), - json.RawMessage(`{"module": "acme"}`), - } - } - - // load and provision issuer modules - if cfg.Admin.Identity.IssuersRaw != nil { - val, err := ctx.LoadModule(cfg.Admin.Identity, "IssuersRaw") - if err != nil { - return fmt.Errorf("loading identity issuer modules: %s", err) - } - for _, issVal := range val.([]interface{}) { - cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer)) - } - } - - // we'll make a new cache when we make the CertMagic config, so stop any previous cache - if identityCertCache != nil { - identityCertCache.Stop() - } - - logger := Log().Named("admin.identity") - cmCfg := cfg.Admin.Identity.certmagicConfig(logger, true) - - // issuers have circular dependencies with the configs because, - // as explained in the caddytls package, they need access to the - // correct storage and cache to solve ACME challenges - for _, issuer := range cfg.Admin.Identity.issuers { - // avoid import cycle with caddytls package, so manually duplicate the interface here, yuck - if annoying, ok := issuer.(interface{ SetConfig(cfg *certmagic.Config) }); ok { - annoying.SetConfig(cmCfg) - } - } - - // obtain and renew server identity certificate(s) - return cmCfg.ManageAsync(ctx, cfg.Admin.Identity.Identifiers) -} - -// replaceRemoteAdminServer replaces the running remote admin server -// according to the relevant configuration in cfg. It stops any previous -// remote admin server and only starts a new one if configured. -func replaceRemoteAdminServer(ctx Context, cfg *Config) error { - if cfg == nil { - return nil - } - - remoteLogger := Log().Named("admin.remote") - - oldAdminServer := remoteAdminServer - defer func() { - if oldAdminServer != nil { - go func(oldAdminServer *http.Server) { - err := stopAdminServer(oldAdminServer) - if err != nil { - Log().Named("admin").Error("stopping current secure admin endpoint", zap.Error(err)) - } - }(oldAdminServer) - } - }() - - if cfg.Admin == nil || cfg.Admin.Remote == nil { - return nil - } - - addr, err := parseAdminListenAddr(cfg.Admin.Remote.Listen, DefaultRemoteAdminListen) - if err != nil { - return err - } - - // make the HTTP handler but disable Host/Origin enforcement - // because we are using TLS authentication instead - handler := cfg.Admin.newAdminHandler(addr, true) - - // create client certificate pool for TLS mutual auth, and extract public keys - // so that we can enforce access controls at the application layer - clientCertPool := x509.NewCertPool() - for i, accessControl := range cfg.Admin.Remote.AccessControl { - for j, certBase64 := range accessControl.PublicKeys { - cert, err := decodeBase64DERCert(certBase64) - if err != nil { - return fmt.Errorf("access control %d public key %d: parsing base64 certificate DER: %v", i, j, err) - } - accessControl.publicKeys = append(accessControl.publicKeys, cert.PublicKey) - clientCertPool.AddCert(cert) - } - } - - // create TLS config that will enforce mutual authentication - cmCfg := cfg.Admin.Identity.certmagicConfig(remoteLogger, false) - tlsConfig := cmCfg.TLSConfig() - tlsConfig.NextProtos = nil // this server does not solve ACME challenges - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = clientCertPool - - // convert logger to stdlib so it can be used by HTTP server - serverLogger, err := zap.NewStdLogAt(remoteLogger, zap.DebugLevel) - if err != nil { - return err - } - - serverMu.Lock() - // create secure HTTP server - remoteAdminServer = &http.Server{ - Addr: addr.String(), // for logging purposes only - Handler: handler, - TLSConfig: tlsConfig, - ReadTimeout: 10 * time.Second, - ReadHeaderTimeout: 5 * time.Second, - IdleTimeout: 60 * time.Second, - MaxHeaderBytes: 1024 * 64, - ErrorLog: serverLogger, - } - serverMu.Unlock() - - // start listener - ln, err := Listen(addr.Network, addr.JoinHostPort(0)) - if err != nil { - return err - } - ln = tls.NewListener(ln, tlsConfig) - - go func() { - serverMu.Lock() - server := remoteAdminServer - serverMu.Unlock() - if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) { - remoteLogger.Error("admin remote server shutdown for unknown reason", zap.Error(err)) - } - }() - - remoteLogger.Info("secure admin remote control endpoint started", - zap.String("address", addr.String())) - - return nil -} - -func (ident *IdentityConfig) certmagicConfig(logger *zap.Logger, makeCache bool) *certmagic.Config { - if ident == nil { - // user might not have configured identity; that's OK, we can still make a - // certmagic config, although it'll be mostly useless for remote management - ident = new(IdentityConfig) - } - cmCfg := &certmagic.Config{ - Storage: DefaultStorage, // do not act as part of a cluster (this is for the server's local identity) - Logger: logger, - Issuers: ident.issuers, - } - if makeCache { - identityCertCache = certmagic.NewCache(certmagic.CacheOptions{ - GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) { - return cmCfg, nil - }, - }) - } - return certmagic.New(identityCertCache, *cmCfg) -} - -// IdentityCredentials returns this instance's configured, managed identity credentials -// that can be used in TLS client authentication. -func (ctx Context) IdentityCredentials(logger *zap.Logger) ([]tls.Certificate, error) { - if ctx.cfg == nil || ctx.cfg.Admin == nil || ctx.cfg.Admin.Identity == nil { - return nil, fmt.Errorf("no server identity configured") - } - ident := ctx.cfg.Admin.Identity - if len(ident.Identifiers) == 0 { - return nil, fmt.Errorf("no identifiers configured") - } - if logger == nil { - logger = Log() - } - magic := ident.certmagicConfig(logger, false) - return magic.ClientCredentials(ctx, ident.Identifiers) -} - -// enforceAccessControls enforces application-layer access controls for r based on remote. -// It expects that the TLS server has already established at least one verified chain of -// trust, and then looks for a matching, authorized public key that is allowed to access -// the defined path(s) using the defined method(s). -func (remote RemoteAdmin) enforceAccessControls(r *http.Request) error { - for _, chain := range r.TLS.VerifiedChains { - for _, peerCert := range chain { - for _, adminAccess := range remote.AccessControl { - for _, allowedKey := range adminAccess.publicKeys { - // see if we found a matching public key; the TLS server already verified the chain - // so we know the client possesses the associated private key; this handy interface - // doesn't appear to be defined anywhere in the std lib, but was implemented here: - // https://github.com/golang/go/commit/b5f2c0f50297fa5cd14af668ddd7fd923626cf8c - comparer, ok := peerCert.PublicKey.(interface{ Equal(crypto.PublicKey) bool }) - if !ok || !comparer.Equal(allowedKey) { - continue - } - - // key recognized; make sure its HTTP request is permitted - for _, accessPerm := range adminAccess.Permissions { - // verify method - methodFound := accessPerm.Methods == nil - for _, method := range accessPerm.Methods { - if method == r.Method { - methodFound = true - break - } - } - if !methodFound { - return APIError{ - HTTPStatus: http.StatusForbidden, - Message: "not authorized to use this method", - } - } - - // verify path - pathFound := accessPerm.Paths == nil - for _, allowedPath := range accessPerm.Paths { - if strings.HasPrefix(r.URL.Path, allowedPath) { - pathFound = true - break - } - } - if !pathFound { - return APIError{ - HTTPStatus: http.StatusForbidden, - Message: "not authorized to access this path", - } - } - } - - // public key authorized, method and path allowed - return nil - } - } - } - } - - // in theory, this should never happen; with an unverified chain, the TLS server - // should not accept the connection in the first place, and the acceptable cert - // pool is configured using the same list of public keys we verify against - return APIError{ - HTTPStatus: http.StatusUnauthorized, - Message: "client identity not authorized", - } -} - -func stopAdminServer(srv *http.Server) error { - if srv == nil { - return fmt.Errorf("no admin server") - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - err := srv.Shutdown(ctx) - if err != nil { - return fmt.Errorf("shutting down admin server: %v", err) - } - Log().Named("admin").Info("stopped previous server", zap.String("address", srv.Addr)) - return nil -} - -// AdminRouter is a type which can return routes for the admin API. -type AdminRouter interface { - Routes() []AdminRoute -} - -// AdminRoute represents a route for the admin endpoint. -type AdminRoute struct { - Pattern string - Handler AdminHandler -} - -type adminHandler struct { - mux *http.ServeMux - - // security for local/plaintext) endpoint, on by default - enforceOrigin bool - enforceHost bool - allowedOrigins []string - - // security for remote/encrypted endpoint - remoteControl *RemoteAdmin -} - -// ServeHTTP is the external entry point for API requests. -// It will only be called once per request. -func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - log := Log().Named("admin.api").With( - zap.String("method", r.Method), - zap.String("host", r.Host), - zap.String("uri", r.RequestURI), - zap.String("remote_addr", r.RemoteAddr), - zap.Reflect("headers", r.Header), - ) - if r.TLS != nil { - log = log.With( - zap.Bool("secure", true), - zap.Int("verified_chains", len(r.TLS.VerifiedChains)), - ) - } - if r.RequestURI == "/metrics" { - log.Debug("received request") - } else { - log.Info("received request") - } - h.serveHTTP(w, r) -} - -// serveHTTP is the internal entry point for API requests. It may -// be called more than once per request, for example if a request -// is rewritten (i.e. internal redirect). -func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) { - if h.remoteControl != nil { - // enforce access controls on secure endpoint - if err := h.remoteControl.enforceAccessControls(r); err != nil { - h.handleError(w, r, err) - return - } - } - - if strings.Contains(r.Header.Get("Upgrade"), "websocket") { - // I've never been able demonstrate a vulnerability myself, but apparently - // WebSocket connections originating from browsers aren't subject to CORS - // restrictions, so we'll just be on the safe side - h.handleError(w, r, fmt.Errorf("websocket connections aren't allowed")) - return - } - - if h.enforceHost { - // DNS rebinding mitigation - err := h.checkHost(r) - if err != nil { - h.handleError(w, r, err) - return - } - } - - if h.enforceOrigin { - // cross-site mitigation - origin, err := h.checkOrigin(r) - if err != nil { - h.handleError(w, r, err) - return - } - - if r.Method == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET, POST, PUT, PATCH, DELETE") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Cache-Control") - w.Header().Set("Access-Control-Allow-Credentials", "true") - } - w.Header().Set("Access-Control-Allow-Origin", origin) - } - - h.mux.ServeHTTP(w, r) -} - -func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - if err == errInternalRedir { - h.serveHTTP(w, r) - return - } - - apiErr, ok := err.(APIError) - if !ok { - apiErr = APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: err, - } - } - if apiErr.HTTPStatus == 0 { - apiErr.HTTPStatus = http.StatusInternalServerError - } - if apiErr.Message == "" && apiErr.Err != nil { - apiErr.Message = apiErr.Err.Error() - } - - Log().Named("admin.api").Error("request error", - zap.Error(err), - zap.Int("status_code", apiErr.HTTPStatus), - ) - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(apiErr.HTTPStatus) - encErr := json.NewEncoder(w).Encode(apiErr) - if encErr != nil { - Log().Named("admin.api").Error("failed to encode error response", zap.Error(encErr)) - } -} - -// checkHost returns a handler that wraps next such that -// it will only be called if the request's Host header matches -// a trustworthy/expected value. This helps to mitigate DNS -// rebinding attacks. -func (h adminHandler) checkHost(r *http.Request) error { - var allowed bool - for _, allowedHost := range h.allowedOrigins { - if r.Host == allowedHost { - allowed = true - break - } - } - if !allowed { - return APIError{ - HTTPStatus: http.StatusForbidden, - Err: fmt.Errorf("host not allowed: %s", r.Host), - } - } - return nil -} - -// checkOrigin ensures that the Origin header, if -// set, matches the intended target; prevents arbitrary -// sites from issuing requests to our listener. It -// returns the origin that was obtained from r. -func (h adminHandler) checkOrigin(r *http.Request) (string, error) { - origin := h.getOriginHost(r) - if origin == "" { - return origin, APIError{ - HTTPStatus: http.StatusForbidden, - Err: fmt.Errorf("missing required Origin header"), - } - } - if !h.originAllowed(origin) { - return origin, APIError{ - HTTPStatus: http.StatusForbidden, - Err: fmt.Errorf("client is not allowed to access from origin %s", origin), - } - } - return origin, nil -} - -func (h adminHandler) getOriginHost(r *http.Request) string { - origin := r.Header.Get("Origin") - if origin == "" { - origin = r.Header.Get("Referer") - } - originURL, err := url.Parse(origin) - if err == nil && originURL.Host != "" { - origin = originURL.Host - } - return origin -} - -func (h adminHandler) originAllowed(origin string) bool { - for _, allowedOrigin := range h.allowedOrigins { - originCopy := origin - if !strings.Contains(allowedOrigin, "://") { - // no scheme specified, so allow both - originCopy = strings.TrimPrefix(originCopy, "http://") - originCopy = strings.TrimPrefix(originCopy, "https://") - } - if originCopy == allowedOrigin { - return true - } - } - return false -} - -func handleConfig(w http.ResponseWriter, r *http.Request) error { - switch r.Method { - case http.MethodGet: - w.Header().Set("Content-Type", "application/json") - - err := readConfig(r.URL.Path, w) - if err != nil { - return APIError{HTTPStatus: http.StatusBadRequest, Err: err} - } - - return nil - - case http.MethodPost, - http.MethodPut, - http.MethodPatch, - http.MethodDelete: - - // DELETE does not use a body, but the others do - var body []byte - if r.Method != http.MethodDelete { - if ct := r.Header.Get("Content-Type"); !strings.Contains(ct, "/json") { - return APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct), - } - } - - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - - _, err := io.Copy(buf, r.Body) - if err != nil { - return APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("reading request body: %v", err), - } - } - body = buf.Bytes() - } - - forceReload := r.Header.Get("Cache-Control") == "must-revalidate" - - err := changeConfig(r.Method, r.URL.Path, body, forceReload) - if err != nil { - return err - } - - default: - return APIError{ - HTTPStatus: http.StatusMethodNotAllowed, - Err: fmt.Errorf("method %s not allowed", r.Method), - } - } - - return nil -} - -func handleConfigID(w http.ResponseWriter, r *http.Request) error { - idPath := r.URL.Path - - parts := strings.Split(idPath, "/") - if len(parts) < 3 || parts[2] == "" { - return fmt.Errorf("request path is missing object ID") - } - if parts[0] != "" || parts[1] != "id" { - return fmt.Errorf("malformed object path") - } - id := parts[2] - - // map the ID to the expanded path - currentCfgMu.RLock() - expanded, ok := rawCfgIndex[id] - defer currentCfgMu.RUnlock() - if !ok { - return fmt.Errorf("unknown object ID '%s'", id) - } - - // piece the full URL path back together - parts = append([]string{expanded}, parts[3:]...) - r.URL.Path = path.Join(parts...) - - return errInternalRedir -} - -func handleStop(w http.ResponseWriter, r *http.Request) error { - if r.Method != http.MethodPost { - return APIError{ - HTTPStatus: http.StatusMethodNotAllowed, - Err: fmt.Errorf("method not allowed"), - } - } - - if err := notify.NotifyStopping(); err != nil { - Log().Error("unable to notify stopping to service manager", zap.Error(err)) - } - - exitProcess(Log().Named("admin.api")) - return nil -} - -// unsyncedConfigAccess traverses into the current config and performs -// the operation at path according to method, using body and out as -// needed. This is a low-level, unsynchronized function; most callers -// will want to use changeConfig or readConfig instead. This requires a -// read or write lock on currentCfgMu, depending on method (GET needs -// only a read lock; all others need a write lock). -func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error { - var err error - var val interface{} - - // if there is a request body, decode it into the - // variable that will be set in the config according - // to method and path - if len(body) > 0 { - err = json.Unmarshal(body, &val) - if err != nil { - return fmt.Errorf("decoding request body: %v", err) - } - } - - enc := json.NewEncoder(out) - - cleanPath := strings.Trim(path, "/") - if cleanPath == "" { - return fmt.Errorf("no traversable path") - } - - parts := strings.Split(cleanPath, "/") - if len(parts) == 0 { - return fmt.Errorf("path missing") - } - - // A path that ends with "..." implies: - // 1) the part before it is an array - // 2) the payload is an array - // and means that the user wants to expand the elements - // in the payload array and append each one into the - // destination array, like so: - // array = append(array, elems...) - // This special case is handled below. - ellipses := parts[len(parts)-1] == "..." - if ellipses { - parts = parts[:len(parts)-1] - } - - var ptr interface{} = rawCfg - -traverseLoop: - for i, part := range parts { - switch v := ptr.(type) { - case map[string]interface{}: - // if the next part enters a slice, and the slice is our destination, - // handle it specially (because appending to the slice copies the slice - // header, which does not replace the original one like we want) - if arr, ok := v[part].([]interface{}); ok && i == len(parts)-2 { - var idx int - if method != http.MethodPost { - idxStr := parts[len(parts)-1] - idx, err = strconv.Atoi(idxStr) - if err != nil { - return fmt.Errorf("[%s] invalid array index '%s': %v", - path, idxStr, err) - } - if idx < 0 || idx >= len(arr) { - return fmt.Errorf("[%s] array index out of bounds: %s", path, idxStr) - } - } - - switch method { - case http.MethodGet: - err = enc.Encode(arr[idx]) - if err != nil { - return fmt.Errorf("encoding config: %v", err) - } - case http.MethodPost: - if ellipses { - valArray, ok := val.([]interface{}) - if !ok { - return fmt.Errorf("final element is not an array") - } - v[part] = append(arr, valArray...) - } else { - v[part] = append(arr, val) - } - case http.MethodPut: - // avoid creation of new slice and a second copy (see - // https://github.com/golang/go/wiki/SliceTricks#insert) - arr = append(arr, nil) - copy(arr[idx+1:], arr[idx:]) - arr[idx] = val - v[part] = arr - case http.MethodPatch: - arr[idx] = val - case http.MethodDelete: - v[part] = append(arr[:idx], arr[idx+1:]...) - default: - return fmt.Errorf("unrecognized method %s", method) - } - break traverseLoop - } - - if i == len(parts)-1 { - switch method { - case http.MethodGet: - err = enc.Encode(v[part]) - if err != nil { - return fmt.Errorf("encoding config: %v", err) - } - case http.MethodPost: - // if the part is an existing list, POST appends to - // it, otherwise it just sets or creates the value - if arr, ok := v[part].([]interface{}); ok { - if ellipses { - valArray, ok := val.([]interface{}) - if !ok { - return fmt.Errorf("final element is not an array") - } - v[part] = append(arr, valArray...) - } else { - v[part] = append(arr, val) - } - } else { - v[part] = val - } - case http.MethodPut: - if _, ok := v[part]; ok { - return fmt.Errorf("[%s] key already exists: %s", path, part) - } - v[part] = val - case http.MethodPatch: - if _, ok := v[part]; !ok { - return fmt.Errorf("[%s] key does not exist: %s", path, part) - } - v[part] = val - case http.MethodDelete: - delete(v, part) - default: - return fmt.Errorf("unrecognized method %s", method) - } - } else { - // if we are "PUTting" a new resource, the key(s) in its path - // might not exist yet; that's OK but we need to make them as - // we go, while we still have a pointer from the level above - if v[part] == nil && method == http.MethodPut { - v[part] = make(map[string]interface{}) - } - ptr = v[part] - } - - case []interface{}: - partInt, err := strconv.Atoi(part) - if err != nil { - return fmt.Errorf("[/%s] invalid array index '%s': %v", - strings.Join(parts[:i+1], "/"), part, err) - } - if partInt < 0 || partInt >= len(v) { - return fmt.Errorf("[/%s] array index out of bounds: %s", - strings.Join(parts[:i+1], "/"), part) - } - ptr = v[partInt] - - default: - return fmt.Errorf("invalid traversal path at: %s", strings.Join(parts[:i+1], "/")) - } - } - - return nil -} - -// RemoveMetaFields removes meta fields like "@id" from a JSON message -// by using a simple regular expression. (An alternate way to do this -// would be to delete them from the raw, map[string]interface{} -// representation as they are indexed, then iterate the index we made -// and add them back after encoding as JSON, but this is simpler.) -func RemoveMetaFields(rawJSON []byte) []byte { - return idRegexp.ReplaceAllFunc(rawJSON, func(in []byte) []byte { - // matches with a comma on both sides (when "@id" property is - // not the first or last in the object) need to keep exactly - // one comma for correct JSON syntax - comma := []byte{','} - if bytes.HasPrefix(in, comma) && bytes.HasSuffix(in, comma) { - return comma - } - return []byte{} - }) -} - -// AdminHandler is like http.Handler except ServeHTTP may return an error. -// -// If any handler encounters an error, it should be returned for proper -// handling. -type AdminHandler interface { - ServeHTTP(http.ResponseWriter, *http.Request) error -} - -// AdminHandlerFunc is a convenience type like http.HandlerFunc. -type AdminHandlerFunc func(http.ResponseWriter, *http.Request) error - -// ServeHTTP implements the Handler interface. -func (f AdminHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error { - return f(w, r) -} - -// APIError is a structured error that every API -// handler should return for consistency in logging -// and client responses. If Message is unset, then -// Err.Error() will be serialized in its place. -type APIError struct { - HTTPStatus int `json:"-"` - Err error `json:"-"` - Message string `json:"error"` -} - -func (e APIError) Error() string { - if e.Err != nil { - return e.Err.Error() - } - return e.Message -} - -// parseAdminListenAddr extracts a singular listen address from either addr -// or defaultAddr, returning the network and the address of the listener. -func parseAdminListenAddr(addr string, defaultAddr string) (NetworkAddress, error) { - input := addr - if input == "" { - input = defaultAddr - } - listenAddr, err := ParseNetworkAddress(input) - if err != nil { - return NetworkAddress{}, fmt.Errorf("parsing listener address: %v", err) - } - if listenAddr.PortRangeSize() != 1 { - return NetworkAddress{}, fmt.Errorf("must be exactly one listener address; cannot listen on: %s", listenAddr) - } - return listenAddr, nil -} - -// decodeBase64DERCert base64-decodes, then DER-decodes, certStr. -func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { - derBytes, err := base64.StdEncoding.DecodeString(certStr) - if err != nil { - return nil, err - } - return x509.ParseCertificate(derBytes) -} - -var ( - // DefaultAdminListen is the address for the local admin - // listener, if none is specified at startup. - DefaultAdminListen = "localhost:2019" - - // DefaultRemoteAdminListen is the address for the remote - // (TLS-authenticated) admin listener, if enabled and not - // specified otherwise. - DefaultRemoteAdminListen = ":2021" - - // DefaultAdminConfig is the default configuration - // for the local administration endpoint. - DefaultAdminConfig = &AdminConfig{ - Listen: DefaultAdminListen, - } -) - -// PIDFile writes a pidfile to the file at filename. It -// will get deleted before the process gracefully exits. -func PIDFile(filename string) error { - pid := []byte(strconv.Itoa(os.Getpid()) + "\n") - err := ioutil.WriteFile(filename, pid, 0600) - if err != nil { - return err - } - pidfile = filename - return nil -} - -// idRegexp is used to match ID fields and their associated values -// in the config. It also matches adjacent commas so that syntax -// can be preserved no matter where in the object the field appears. -// It supports string and most numeric values. -var idRegexp = regexp.MustCompile(`(?m),?\s*"` + idKey + `"\s*:\s*(-?[0-9]+(\.[0-9]+)?|(?U)".*")\s*,?`) - -// pidfile is the name of the pidfile, if any. -var pidfile string - -// errInternalRedir indicates an internal redirect -// and is useful when admin API handlers rewrite -// the request; in that case, authentication and -// authorization needs to happen again for the -// rewritten request. -var errInternalRedir = fmt.Errorf("internal redirect; re-authorization required") - -const ( - rawConfigKey = "config" - idKey = "@id" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -// keep a reference to admin endpoint singletons while they're active -var ( - serverMu sync.Mutex - localAdminServer, remoteAdminServer *http.Server - identityCertCache *certmagic.Cache -) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddy.go b/vendor/github.com/caddyserver/caddy/v2/caddy.go deleted file mode 100644 index ba025b18..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddy.go +++ /dev/null @@ -1,779 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "os" - "path" - "path/filepath" - "runtime/debug" - "strconv" - "strings" - "sync" - "time" - - "github.com/caddyserver/caddy/v2/notify" - "github.com/caddyserver/certmagic" - "github.com/google/uuid" - "go.uber.org/zap" -) - -// Config is the top (or beginning) of the Caddy configuration structure. -// Caddy config is expressed natively as a JSON document. If you prefer -// not to work with JSON directly, there are [many config adapters](/docs/config-adapters) -// available that can convert various inputs into Caddy JSON. -// -// Many parts of this config are extensible through the use of Caddy modules. -// Fields which have a json.RawMessage type and which appear as dots (•••) in -// the online docs can be fulfilled by modules in a certain module -// namespace. The docs show which modules can be used in a given place. -// -// Whenever a module is used, its name must be given either inline as part of -// the module, or as the key to the module's value. The docs will make it clear -// which to use. -// -// Generally, all config settings are optional, as it is Caddy convention to -// have good, documented default values. If a parameter is required, the docs -// should say so. -// -// Go programs which are directly building a Config struct value should take -// care to populate the JSON-encodable fields of the struct (i.e. the fields -// with `json` struct tags) if employing the module lifecycle (e.g. Provision -// method calls). -type Config struct { - Admin *AdminConfig `json:"admin,omitempty"` - Logging *Logging `json:"logging,omitempty"` - - // StorageRaw is a storage module that defines how/where Caddy - // stores assets (such as TLS certificates). The default storage - // module is `caddy.storage.file_system` (the local file system), - // and the default path - // [depends on the OS and environment](/docs/conventions#data-directory). - StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` - - // AppsRaw are the apps that Caddy will load and run. The - // app module name is the key, and the app's config is the - // associated value. - AppsRaw ModuleMap `json:"apps,omitempty" caddy:"namespace="` - - apps map[string]App - storage certmagic.Storage - - cancelFunc context.CancelFunc -} - -// App is a thing that Caddy runs. -type App interface { - Start() error - Stop() error -} - -// Run runs the given config, replacing any existing config. -func Run(cfg *Config) error { - cfgJSON, err := json.Marshal(cfg) - if err != nil { - return err - } - return Load(cfgJSON, true) -} - -// Load loads the given config JSON and runs it only -// if it is different from the current config or -// forceReload is true. -func Load(cfgJSON []byte, forceReload bool) error { - if err := notify.NotifyReloading(); err != nil { - Log().Error("unable to notify reloading to service manager", zap.Error(err)) - } - - defer func() { - if err := notify.NotifyReadiness(); err != nil { - Log().Error("unable to notify readiness to service manager", zap.Error(err)) - } - }() - - return changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, forceReload) -} - -// changeConfig changes the current config (rawCfg) according to the -// method, traversed via the given path, and uses the given input as -// the new value (if applicable; i.e. "DELETE" doesn't have an input). -// If the resulting config is the same as the previous, no reload will -// occur unless forceReload is true. This function is safe for -// concurrent use. -func changeConfig(method, path string, input []byte, forceReload bool) error { - switch method { - case http.MethodGet, - http.MethodHead, - http.MethodOptions, - http.MethodConnect, - http.MethodTrace: - return fmt.Errorf("method not allowed") - } - - currentCfgMu.Lock() - defer currentCfgMu.Unlock() - - err := unsyncedConfigAccess(method, path, input, nil) - if err != nil { - return err - } - - // the mutation is complete, so encode the entire config as JSON - newCfg, err := json.Marshal(rawCfg[rawConfigKey]) - if err != nil { - return APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("encoding new config: %v", err), - } - } - - // if nothing changed, no need to do a whole reload unless the client forces it - if !forceReload && bytes.Equal(rawCfgJSON, newCfg) { - Log().Named("admin.api").Info("config is unchanged") - return nil - } - - // find any IDs in this config and index them - idx := make(map[string]string) - err = indexConfigObjects(rawCfg[rawConfigKey], "/"+rawConfigKey, idx) - if err != nil { - return APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: fmt.Errorf("indexing config: %v", err), - } - } - - // load this new config; if it fails, we need to revert to - // our old representation of caddy's actual config - err = unsyncedDecodeAndRun(newCfg, true) - if err != nil { - if len(rawCfgJSON) > 0 { - // restore old config state to keep it consistent - // with what caddy is still running; we need to - // unmarshal it again because it's likely that - // pointers deep in our rawCfg map were modified - var oldCfg interface{} - err2 := json.Unmarshal(rawCfgJSON, &oldCfg) - if err2 != nil { - err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2) - } - rawCfg[rawConfigKey] = oldCfg - } - - return fmt.Errorf("loading new config: %v", err) - } - - // success, so update our stored copy of the encoded - // config to keep it consistent with what caddy is now - // running (storing an encoded copy is not strictly - // necessary, but avoids an extra json.Marshal for - // each config change) - rawCfgJSON = newCfg - rawCfgIndex = idx - - return nil -} - -// readConfig traverses the current config to path -// and writes its JSON encoding to out. -func readConfig(path string, out io.Writer) error { - currentCfgMu.RLock() - defer currentCfgMu.RUnlock() - return unsyncedConfigAccess(http.MethodGet, path, nil, out) -} - -// indexConfigObjects recursively searches ptr for object fields named -// "@id" and maps that ID value to the full configPath in the index. -// This function is NOT safe for concurrent access; obtain a write lock -// on currentCfgMu. -func indexConfigObjects(ptr interface{}, configPath string, index map[string]string) error { - switch val := ptr.(type) { - case map[string]interface{}: - for k, v := range val { - if k == idKey { - switch idVal := v.(type) { - case string: - index[idVal] = configPath - case float64: // all JSON numbers decode as float64 - index[fmt.Sprintf("%v", idVal)] = configPath - default: - return fmt.Errorf("%s: %s field must be a string or number", configPath, idKey) - } - continue - } - // traverse this object property recursively - err := indexConfigObjects(val[k], path.Join(configPath, k), index) - if err != nil { - return err - } - } - case []interface{}: - // traverse each element of the array recursively - for i := range val { - err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index) - if err != nil { - return err - } - } - } - - return nil -} - -// unsyncedDecodeAndRun removes any meta fields (like @id tags) -// from cfgJSON, decodes the result into a *Config, and runs -// it as the new config, replacing any other current config. -// It does NOT update the raw config state, as this is a -// lower-level function; most callers will want to use Load -// instead. A write lock on currentCfgMu is required! If -// allowPersist is false, it will not be persisted to disk, -// even if it is configured to. -func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error { - // remove any @id fields from the JSON, which would cause - // loading to break since the field wouldn't be recognized - strippedCfgJSON := RemoveMetaFields(cfgJSON) - - var newCfg *Config - err := strictUnmarshalJSON(strippedCfgJSON, &newCfg) - if err != nil { - return err - } - - // prevent recursive config loads; that is a user error, and - // although frequent config loads should be safe, we cannot - // guarantee that in the presence of third party plugins, nor - // do we want this error to go unnoticed (we assume it was a - // pulled config if we're not allowed to persist it) - if !allowPersist && - newCfg != nil && - newCfg.Admin != nil && - newCfg.Admin.Config != nil && - newCfg.Admin.Config.LoadRaw != nil && - newCfg.Admin.Config.LoadInterval <= 0 { - return fmt.Errorf("recursive config loading detected: pulled configs cannot pull other configs without positive load_interval") - } - - // run the new config and start all its apps - err = run(newCfg, true) - if err != nil { - return err - } - - // swap old config with the new one - oldCfg := currentCfg - currentCfg = newCfg - - // Stop, Cleanup each old app - unsyncedStop(oldCfg) - - // autosave a non-nil config, if not disabled - if allowPersist && - newCfg != nil && - (newCfg.Admin == nil || - newCfg.Admin.Config == nil || - newCfg.Admin.Config.Persist == nil || - *newCfg.Admin.Config.Persist) { - dir := filepath.Dir(ConfigAutosavePath) - err := os.MkdirAll(dir, 0700) - if err != nil { - Log().Error("unable to create folder for config autosave", - zap.String("dir", dir), - zap.Error(err)) - } else { - err := ioutil.WriteFile(ConfigAutosavePath, cfgJSON, 0600) - if err == nil { - Log().Info("autosaved config (load with --resume flag)", zap.String("file", ConfigAutosavePath)) - } else { - Log().Error("unable to autosave config", - zap.String("file", ConfigAutosavePath), - zap.Error(err)) - } - } - } - - return nil -} - -// run runs newCfg and starts all its apps if -// start is true. If any errors happen, cleanup -// is performed if any modules were provisioned; -// apps that were started already will be stopped, -// so this function should not leak resources if -// an error is returned. However, if no error is -// returned and start == false, you should cancel -// the config if you are not going to start it, -// so that each provisioned module will be -// cleaned up. -// -// This is a low-level function; most callers -// will want to use Run instead, which also -// updates the config's raw state. -func run(newCfg *Config, start bool) error { - // because we will need to roll back any state - // modifications if this function errors, we - // keep a single error value and scope all - // sub-operations to their own functions to - // ensure this error value does not get - // overridden or missed when it should have - // been set by a short assignment - var err error - - if newCfg == nil { - newCfg = new(Config) - } - - // create a context within which to load - // modules - essentially our new config's - // execution environment; be sure that - // cleanup occurs when we return if there - // was an error; if no error, it will get - // cleaned up on next config cycle - ctx, cancel := NewContext(Context{Context: context.Background(), cfg: newCfg}) - defer func() { - if err != nil { - // if there were any errors during startup, - // we should cancel the new context we created - // since the associated config won't be used; - // this will cause all modules that were newly - // provisioned to clean themselves up - cancel() - - // also undo any other state changes we made - if currentCfg != nil { - certmagic.Default.Storage = currentCfg.storage - } - } - }() - newCfg.cancelFunc = cancel // clean up later - - // set up logging before anything bad happens - if newCfg.Logging == nil { - newCfg.Logging = new(Logging) - } - err = newCfg.Logging.openLogs(ctx) - if err != nil { - return err - } - - // start the admin endpoint (and stop any prior one) - if start { - err = replaceLocalAdminServer(newCfg) - if err != nil { - return fmt.Errorf("starting caddy administration endpoint: %v", err) - } - } - - // prepare the new config for use - newCfg.apps = make(map[string]App) - - // set up global storage and make it CertMagic's default storage, too - err = func() error { - if newCfg.StorageRaw != nil { - val, err := ctx.LoadModule(newCfg, "StorageRaw") - if err != nil { - return fmt.Errorf("loading storage module: %v", err) - } - stor, err := val.(StorageConverter).CertMagicStorage() - if err != nil { - return fmt.Errorf("creating storage value: %v", err) - } - newCfg.storage = stor - } - - if newCfg.storage == nil { - newCfg.storage = DefaultStorage - } - certmagic.Default.Storage = newCfg.storage - - return nil - }() - if err != nil { - return err - } - - // Load and Provision each app and their submodules - err = func() error { - for appName := range newCfg.AppsRaw { - if _, err := ctx.App(appName); err != nil { - return err - } - } - return nil - }() - if err != nil { - return err - } - - if !start { - return nil - } - - // Start - err = func() error { - var started []string - for name, a := range newCfg.apps { - err := a.Start() - if err != nil { - // an app failed to start, so we need to stop - // all other apps that were already started - for _, otherAppName := range started { - err2 := newCfg.apps[otherAppName].Stop() - if err2 != nil { - err = fmt.Errorf("%v; additionally, aborting app %s: %v", - err, otherAppName, err2) - } - } - return fmt.Errorf("%s app module: start: %v", name, err) - } - started = append(started, name) - } - return nil - }() - if err != nil { - return err - } - - // now that the user's config is running, finish setting up anything else, - // such as remote admin endpoint, config loader, etc. - return finishSettingUp(ctx, newCfg) -} - -// finishSettingUp should be run after all apps have successfully started. -func finishSettingUp(ctx Context, cfg *Config) error { - // establish this server's identity (only after apps are loaded - // so that cert management of this endpoint doesn't prevent user's - // servers from starting which likely also use HTTP/HTTPS ports; - // but before remote management which may depend on these creds) - err := manageIdentity(ctx, cfg) - if err != nil { - return fmt.Errorf("provisioning remote admin endpoint: %v", err) - } - - // replace any remote admin endpoint - err = replaceRemoteAdminServer(ctx, cfg) - if err != nil { - return fmt.Errorf("provisioning remote admin endpoint: %v", err) - } - - // if dynamic config is requested, set that up and run it - if cfg != nil && cfg.Admin != nil && cfg.Admin.Config != nil && cfg.Admin.Config.LoadRaw != nil { - val, err := ctx.LoadModule(cfg.Admin.Config, "LoadRaw") - if err != nil { - return fmt.Errorf("loading config loader module: %s", err) - } - runLoadedConfig := func(config []byte) { - Log().Info("applying dynamically-loaded config", zap.String("loader_module", val.(Module).CaddyModule().ID.Name()), zap.Int("pull_interval", int(cfg.Admin.Config.LoadInterval))) - currentCfgMu.Lock() - err := unsyncedDecodeAndRun(config, false) - currentCfgMu.Unlock() - if err == nil { - Log().Info("dynamically-loaded config applied successfully") - } else { - Log().Error("running dynamically-loaded config failed", zap.Error(err)) - } - } - if cfg.Admin.Config.LoadInterval > 0 { - go func() { - select { - // if LoadInterval is positive, will wait for the interval and then run with new config - case <-time.After(time.Duration(cfg.Admin.Config.LoadInterval)): - loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx) - if err != nil { - Log().Error("loading dynamic config failed", zap.Error(err)) - return - } - runLoadedConfig(loadedConfig) - case <-ctx.Done(): - return - } - }() - } else { - // if no LoadInterval is provided, will load config synchronously - loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx) - if err != nil { - return fmt.Errorf("loading dynamic config from %T: %v", val, err) - } - // do this in a goroutine so current config can finish being loaded; otherwise deadlock - go runLoadedConfig(loadedConfig) - } - - } - - return nil -} - -// ConfigLoader is a type that can load a Caddy config. The -// returned config must be valid Caddy JSON. -type ConfigLoader interface { - LoadConfig(Context) ([]byte, error) -} - -// Stop stops running the current configuration. -// It is the antithesis of Run(). This function -// will log any errors that occur during the -// stopping of individual apps and continue to -// stop the others. Stop should only be called -// if not replacing with a new config. -func Stop() error { - currentCfgMu.Lock() - defer currentCfgMu.Unlock() - unsyncedStop(currentCfg) - currentCfg = nil - rawCfgJSON = nil - rawCfgIndex = nil - rawCfg[rawConfigKey] = nil - return nil -} - -// unsyncedStop stops cfg from running, but has -// no locking around cfg. It is a no-op if cfg is -// nil. If any app returns an error when stopping, -// it is logged and the function continues stopping -// the next app. This function assumes all apps in -// cfg were successfully started first. -func unsyncedStop(cfg *Config) { - if cfg == nil { - return - } - - // stop each app - for name, a := range cfg.apps { - err := a.Stop() - if err != nil { - log.Printf("[ERROR] stop %s: %v", name, err) - } - } - - // clean up all modules - cfg.cancelFunc() -} - -// Validate loads, provisions, and validates -// cfg, but does not start running it. -func Validate(cfg *Config) error { - err := run(cfg, false) - if err == nil { - cfg.cancelFunc() // call Cleanup on all modules - } - return err -} - -// exitProcess exits the process as gracefully as possible, -// but it always exits, even if there are errors doing so. -// It stops all apps, cleans up external locks, removes any -// PID file, and shuts down admin endpoint(s) in a goroutine. -// Errors are logged along the way, and an appropriate exit -// code is emitted. -func exitProcess(logger *zap.Logger) { - if logger == nil { - logger = Log() - } - logger.Warn("exiting; byeee!! 👋") - - exitCode := ExitCodeSuccess - - // stop all apps - if err := Stop(); err != nil { - logger.Error("failed to stop apps", zap.Error(err)) - exitCode = ExitCodeFailedQuit - } - - // clean up certmagic locks - certmagic.CleanUpOwnLocks(logger) - - // remove pidfile - if pidfile != "" { - err := os.Remove(pidfile) - if err != nil { - logger.Error("cleaning up PID file:", - zap.String("pidfile", pidfile), - zap.Error(err)) - exitCode = ExitCodeFailedQuit - } - } - - // shut down admin endpoint(s) in goroutines so that - // if this function was called from an admin handler, - // it has a chance to return gracefully - // use goroutine so that we can finish responding to API request - go func() { - defer func() { - logger = logger.With(zap.Int("exit_code", exitCode)) - if exitCode == ExitCodeSuccess { - logger.Info("shutdown complete") - } else { - logger.Error("unclean shutdown") - } - os.Exit(exitCode) - }() - - if remoteAdminServer != nil { - err := stopAdminServer(remoteAdminServer) - if err != nil { - exitCode = ExitCodeFailedQuit - logger.Error("failed to stop remote admin server gracefully", zap.Error(err)) - } - } - if localAdminServer != nil { - err := stopAdminServer(localAdminServer) - if err != nil { - exitCode = ExitCodeFailedQuit - logger.Error("failed to stop local admin server gracefully", zap.Error(err)) - } - } - }() -} - -// Duration can be an integer or a string. An integer is -// interpreted as nanoseconds. If a string, it is a Go -// time.Duration value such as `300ms`, `1.5h`, or `2h45m`; -// valid units are `ns`, `us`/`µs`, `ms`, `s`, `m`, `h`, and `d`. -type Duration time.Duration - -// UnmarshalJSON satisfies json.Unmarshaler. -func (d *Duration) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return io.EOF - } - var dur time.Duration - var err error - if b[0] == byte('"') && b[len(b)-1] == byte('"') { - dur, err = ParseDuration(strings.Trim(string(b), `"`)) - } else { - err = json.Unmarshal(b, &dur) - } - *d = Duration(dur) - return err -} - -// ParseDuration parses a duration string, adding -// support for the "d" unit meaning number of days, -// where a day is assumed to be 24h. -func ParseDuration(s string) (time.Duration, error) { - var inNumber bool - var numStart int - for i := 0; i < len(s); i++ { - ch := s[i] - if ch == 'd' { - daysStr := s[numStart:i] - days, err := strconv.ParseFloat(daysStr, 64) - if err != nil { - return 0, err - } - hours := days * 24.0 - hoursStr := strconv.FormatFloat(hours, 'f', -1, 64) - s = s[:numStart] + hoursStr + "h" + s[i+1:] - i-- - continue - } - if !inNumber { - numStart = i - } - inNumber = (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == '+' - } - return time.ParseDuration(s) -} - -// InstanceID returns the UUID for this instance, and generates one if it -// does not already exist. The UUID is stored in the local data directory, -// regardless of storage configuration, since each instance is intended to -// have its own unique ID. -func InstanceID() (uuid.UUID, error) { - uuidFilePath := filepath.Join(AppDataDir(), "instance.uuid") - uuidFileBytes, err := ioutil.ReadFile(uuidFilePath) - if os.IsNotExist(err) { - uuid, err := uuid.NewRandom() - if err != nil { - return uuid, err - } - err = ioutil.WriteFile(uuidFilePath, []byte(uuid.String()), 0600) - return uuid, err - } else if err != nil { - return [16]byte{}, err - } - return uuid.ParseBytes(uuidFileBytes) -} - -// GoModule returns the build info of this Caddy -// build from debug.BuildInfo (requires Go modules). -// If no version information is available, a non-nil -// value will still be returned, but with an -// unknown version. -func GoModule() *debug.Module { - var mod debug.Module - return goModule(&mod) -} - -// goModule holds the actual implementation of GoModule. -// Allocating debug.Module in GoModule() and passing a -// reference to goModule enables mid-stack inlining. -func goModule(mod *debug.Module) *debug.Module { - mod.Version = "unknown" - bi, ok := debug.ReadBuildInfo() - if ok { - mod.Path = bi.Main.Path - // The recommended way to build Caddy involves - // creating a separate main module, which - // TODO: track related Go issue: https://github.com/golang/go/issues/29228 - // once that issue is fixed, we should just be able to use bi.Main... hopefully. - for _, dep := range bi.Deps { - if dep.Path == ImportPath { - return dep - } - } - return &bi.Main - } - return mod -} - -// CtxKey is a value type for use with context.WithValue. -type CtxKey string - -// This group of variables pertains to the current configuration. -var ( - // currentCfgMu protects everything in this var block. - currentCfgMu sync.RWMutex - - // currentCfg is the currently-running configuration. - currentCfg *Config - - // rawCfg is the current, generic-decoded configuration; - // we initialize it as a map with one field ("config") - // to maintain parity with the API endpoint and to avoid - // the special case of having to access/mutate the variable - // directly without traversing into it. - rawCfg = map[string]interface{}{ - rawConfigKey: nil, - } - - // rawCfgJSON is the JSON-encoded form of rawCfg. Keeping - // this around avoids an extra Marshal call during changes. - rawCfgJSON []byte - - // rawCfgIndex is the map of user-assigned ID to expanded - // path, for converting /id/ paths to /config/ paths. - rawCfgIndex map[string]string -) - -// ImportPath is the package import path for Caddy core. -const ImportPath = "github.com/caddyserver/caddy/v2" diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/adapter.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/adapter.go deleted file mode 100644 index 5b80df3f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/adapter.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" -) - -// Adapter adapts Caddyfile to Caddy JSON. -type Adapter struct { - ServerType ServerType -} - -// Adapt converts the Caddyfile config in body to Caddy JSON. -func (a Adapter) Adapt(body []byte, options map[string]interface{}) ([]byte, []caddyconfig.Warning, error) { - if a.ServerType == nil { - return nil, nil, fmt.Errorf("no server type") - } - if options == nil { - options = make(map[string]interface{}) - } - - filename, _ := options["filename"].(string) - if filename == "" { - filename = "Caddyfile" - } - - serverBlocks, err := Parse(filename, body) - if err != nil { - return nil, nil, err - } - - cfg, warnings, err := a.ServerType.Setup(serverBlocks, options) - if err != nil { - return nil, warnings, err - } - - // lint check: see if input was properly formatted; sometimes messy files files parse - // successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry) - if warning, different := formattingDifference(filename, body); different { - warnings = append(warnings, warning) - } - - result, err := json.Marshal(cfg) - - return result, warnings, err -} - -// formattingDifference returns a warning and true if the formatted version -// is any different from the input; empty warning and false otherwise. -// TODO: also perform this check on imported files -func formattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) { - // replace windows-style newlines to normalize comparison - normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1) - - formatted := Format(normalizedBody) - if bytes.Equal(formatted, normalizedBody) { - return caddyconfig.Warning{}, false - } - - // find where the difference is - line := 1 - for i, ch := range normalizedBody { - if i >= len(formatted) || ch != formatted[i] { - break - } - if ch == '\n' { - line++ - } - } - return caddyconfig.Warning{ - File: filename, - Line: line, - Message: "input is not formatted with 'caddy fmt'", - }, true -} - -// Unmarshaler is a type that can unmarshal -// Caddyfile tokens to set itself up for a -// JSON encoding. The goal of an unmarshaler -// is not to set itself up for actual use, -// but to set itself up for being marshaled -// into JSON. Caddyfile-unmarshaled values -// will not be used directly; they will be -// encoded as JSON and then used from that. -// Implementations must be able to support -// multiple segments (instances of their -// directive or batch of tokens); typically -// this means wrapping all token logic in -// a loop: `for d.Next() { ... }`. -type Unmarshaler interface { - UnmarshalCaddyfile(d *Dispenser) error -} - -// ServerType is a type that can evaluate a Caddyfile and set up a caddy config. -type ServerType interface { - // Setup takes the server blocks which - // contain tokens, as well as options - // (e.g. CLI flags) and creates a Caddy - // config, along with any warnings or - // an error. - Setup([]ServerBlock, map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) -} - -// UnmarshalModule instantiates a module with the given ID and invokes -// UnmarshalCaddyfile on the new value using the immediate next segment -// of d as input. In other words, d's next token should be the first -// token of the module's Caddyfile input. -// -// This function is used when the next segment of Caddyfile tokens -// belongs to another Caddy module. The returned value is often -// type-asserted to the module's associated type for practical use -// when setting up a config. -func UnmarshalModule(d *Dispenser, moduleID string) (Unmarshaler, error) { - mod, err := caddy.GetModule(moduleID) - if err != nil { - return nil, d.Errf("getting module named '%s': %v", moduleID, err) - } - inst := mod.New() - unm, ok := inst.(Unmarshaler) - if !ok { - return nil, d.Errf("module %s is not a Caddyfile unmarshaler; is %T", mod.ID, inst) - } - err = unm.UnmarshalCaddyfile(d.NewFromNextSegment()) - if err != nil { - return nil, err - } - return unm, nil -} - -// Interface guard -var _ caddyconfig.Adapter = (*Adapter)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/dispenser.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/dispenser.go deleted file mode 100644 index fa7f5e75..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/dispenser.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "errors" - "fmt" - "io" - "log" - "strings" -) - -// Dispenser is a type that dispenses tokens, similarly to a lexer, -// except that it can do so with some notion of structure. An empty -// Dispenser is invalid; call NewDispenser to make a proper instance. -type Dispenser struct { - tokens []Token - cursor int - nesting int -} - -// NewDispenser returns a Dispenser filled with the given tokens. -func NewDispenser(tokens []Token) *Dispenser { - return &Dispenser{ - tokens: tokens, - cursor: -1, - } -} - -// NewTestDispenser parses input into tokens and creates a new -// Dispenser for test purposes only; any errors are fatal. -func NewTestDispenser(input string) *Dispenser { - tokens, err := allTokens("Testfile", []byte(input)) - if err != nil && err != io.EOF { - log.Fatalf("getting all tokens from input: %v", err) - } - return NewDispenser(tokens) -} - -// Next loads the next token. Returns true if a token -// was loaded; false otherwise. If false, all tokens -// have been consumed. -func (d *Dispenser) Next() bool { - if d.cursor < len(d.tokens)-1 { - d.cursor++ - return true - } - return false -} - -// Prev moves to the previous token. It does the inverse -// of Next(), except this function may decrement the cursor -// to -1 so that the next call to Next() points to the -// first token; this allows dispensing to "start over". This -// method returns true if the cursor ends up pointing to a -// valid token. -func (d *Dispenser) Prev() bool { - if d.cursor > -1 { - d.cursor-- - return d.cursor > -1 - } - return false -} - -// NextArg loads the next token if it is on the same -// line and if it is not a block opening (open curly -// brace). Returns true if an argument token was -// loaded; false otherwise. If false, all tokens on -// the line have been consumed except for potentially -// a block opening. It handles imported tokens -// correctly. -func (d *Dispenser) NextArg() bool { - if !d.nextOnSameLine() { - return false - } - if d.Val() == "{" { - // roll back; a block opening is not an argument - d.cursor-- - return false - } - return true -} - -// nextOnSameLine advances the cursor if the next -// token is on the same line of the same file. -func (d *Dispenser) nextOnSameLine() bool { - if d.cursor < 0 { - d.cursor++ - return true - } - if d.cursor >= len(d.tokens) { - return false - } - if d.cursor < len(d.tokens)-1 && - d.tokens[d.cursor].File == d.tokens[d.cursor+1].File && - d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line { - d.cursor++ - return true - } - return false -} - -// NextLine loads the next token only if it is not on the same -// line as the current token, and returns true if a token was -// loaded; false otherwise. If false, there is not another token -// or it is on the same line. It handles imported tokens correctly. -func (d *Dispenser) NextLine() bool { - if d.cursor < 0 { - d.cursor++ - return true - } - if d.cursor >= len(d.tokens) { - return false - } - if d.cursor < len(d.tokens)-1 && - (d.tokens[d.cursor].File != d.tokens[d.cursor+1].File || - d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) { - d.cursor++ - return true - } - return false -} - -// NextBlock can be used as the condition of a for loop -// to load the next token as long as it opens a block or -// is already in a block nested more than initialNestingLevel. -// In other words, a loop over NextBlock() will iterate -// all tokens in the block assuming the next token is an -// open curly brace, until the matching closing brace. -// The open and closing brace tokens for the outer-most -// block will be consumed internally and omitted from -// the iteration. -// -// Proper use of this method looks like this: -// -// for nesting := d.Nesting(); d.NextBlock(nesting); { -// } -// -// However, in simple cases where it is known that the -// Dispenser is new and has not already traversed state -// by a loop over NextBlock(), this will do: -// -// for d.NextBlock(0) { -// } -// -// As with other token parsing logic, a loop over -// NextBlock() should be contained within a loop over -// Next(), as it is usually prudent to skip the initial -// token. -func (d *Dispenser) NextBlock(initialNestingLevel int) bool { - if d.nesting > initialNestingLevel { - if !d.Next() { - return false // should be EOF error - } - if d.Val() == "}" && !d.nextOnSameLine() { - d.nesting-- - } else if d.Val() == "{" && !d.nextOnSameLine() { - d.nesting++ - } - return d.nesting > initialNestingLevel - } - if !d.nextOnSameLine() { // block must open on same line - return false - } - if d.Val() != "{" { - d.cursor-- // roll back if not opening brace - return false - } - d.Next() // consume open curly brace - if d.Val() == "}" { - return false // open and then closed right away - } - d.nesting++ - return true -} - -// Nesting returns the current nesting level. Necessary -// if using NextBlock() -func (d *Dispenser) Nesting() int { - return d.nesting -} - -// Val gets the text of the current token. If there is no token -// loaded, it returns empty string. -func (d *Dispenser) Val() string { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return "" - } - return d.tokens[d.cursor].Text -} - -// Line gets the line number of the current token. -// If there is no token loaded, it returns 0. -func (d *Dispenser) Line() int { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return 0 - } - return d.tokens[d.cursor].Line -} - -// File gets the filename where the current token originated. -func (d *Dispenser) File() string { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return "" - } - return d.tokens[d.cursor].File -} - -// Args is a convenience function that loads the next arguments -// (tokens on the same line) into an arbitrary number of strings -// pointed to in targets. If there are not enough argument tokens -// available to fill targets, false is returned and the remaining -// targets are left unchanged. If all the targets are filled, -// then true is returned. -func (d *Dispenser) Args(targets ...*string) bool { - for i := 0; i < len(targets); i++ { - if !d.NextArg() { - return false - } - *targets[i] = d.Val() - } - return true -} - -// AllArgs is like Args, but if there are more argument tokens -// available than there are targets, false is returned. The -// number of available argument tokens must match the number of -// targets exactly to return true. -func (d *Dispenser) AllArgs(targets ...*string) bool { - if !d.Args(targets...) { - return false - } - if d.NextArg() { - d.Prev() - return false - } - return true -} - -// RemainingArgs loads any more arguments (tokens on the same line) -// into a slice and returns them. Open curly brace tokens also indicate -// the end of arguments, and the curly brace is not included in -// the return value nor is it loaded. -func (d *Dispenser) RemainingArgs() []string { - var args []string - for d.NextArg() { - args = append(args, d.Val()) - } - return args -} - -// NewFromNextSegment returns a new dispenser with a copy of -// the tokens from the current token until the end of the -// "directive" whether that be to the end of the line or -// the end of a block that starts at the end of the line; -// in other words, until the end of the segment. -func (d *Dispenser) NewFromNextSegment() *Dispenser { - return NewDispenser(d.NextSegment()) -} - -// NextSegment returns a copy of the tokens from the current -// token until the end of the line or block that starts at -// the end of the line. -func (d *Dispenser) NextSegment() Segment { - tkns := Segment{d.Token()} - for d.NextArg() { - tkns = append(tkns, d.Token()) - } - var openedBlock bool - for nesting := d.Nesting(); d.NextBlock(nesting); { - if !openedBlock { - // because NextBlock() consumes the initial open - // curly brace, we rewind here to append it, since - // our case is special in that we want the new - // dispenser to have all the tokens including - // surrounding curly braces - d.Prev() - tkns = append(tkns, d.Token()) - d.Next() - openedBlock = true - } - tkns = append(tkns, d.Token()) - } - if openedBlock { - // include closing brace - tkns = append(tkns, d.Token()) - - // do not consume the closing curly brace; the - // next iteration of the enclosing loop will - // call Next() and consume it - } - return tkns -} - -// Token returns the current token. -func (d *Dispenser) Token() Token { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return Token{} - } - return d.tokens[d.cursor] -} - -// Reset sets d's cursor to the beginning, as -// if this was a new and unused dispenser. -func (d *Dispenser) Reset() { - d.cursor = -1 - d.nesting = 0 -} - -// ArgErr returns an argument error, meaning that another -// argument was expected but not found. In other words, -// a line break or open curly brace was encountered instead of -// an argument. -func (d *Dispenser) ArgErr() error { - if d.Val() == "{" { - return d.Err("Unexpected token '{', expecting argument") - } - return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val()) -} - -// SyntaxErr creates a generic syntax error which explains what was -// found and what was expected. -func (d *Dispenser) SyntaxErr(expected string) error { - msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected) - return errors.New(msg) -} - -// EOFErr returns an error indicating that the dispenser reached -// the end of the input when searching for the next token. -func (d *Dispenser) EOFErr() error { - return d.Errf("Unexpected EOF") -} - -// Err generates a custom parse-time error with a message of msg. -func (d *Dispenser) Err(msg string) error { - return d.Errf(msg) -} - -// Errf is like Err, but for formatted error messages -func (d *Dispenser) Errf(format string, args ...interface{}) error { - err := fmt.Errorf(format, args...) - return fmt.Errorf("%s:%d - Error during parsing: %w", d.File(), d.Line(), err) -} - -// Delete deletes the current token and returns the updated slice -// of tokens. The cursor is not advanced to the next token. -// Because deletion modifies the underlying slice, this method -// should only be called if you have access to the original slice -// of tokens and/or are using the slice of tokens outside this -// Dispenser instance. If you do not re-assign the slice with the -// return value of this method, inconsistencies in the token -// array will become apparent (or worse, hide from you like they -// did me for 3 and a half freaking hours late one night). -func (d *Dispenser) Delete() []Token { - if d.cursor >= 0 && d.cursor <= len(d.tokens)-1 { - d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...) - d.cursor-- - } - return d.tokens -} - -// numLineBreaks counts how many line breaks are in the token -// value given by the token index tknIdx. It returns 0 if the -// token does not exist or there are no line breaks. -func (d *Dispenser) numLineBreaks(tknIdx int) int { - if tknIdx < 0 || tknIdx >= len(d.tokens) { - return 0 - } - return strings.Count(d.tokens[tknIdx].Text, "\n") -} - -// isNewLine determines whether the current token is on a different -// line (higher line number) than the previous token. It handles imported -// tokens correctly. If there isn't a previous token, it returns true. -func (d *Dispenser) isNewLine() bool { - if d.cursor < 1 { - return true - } - if d.cursor > len(d.tokens)-1 { - return false - } - return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File || - d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter.go deleted file mode 100644 index cb0033f7..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bytes" - "io" - "unicode" -) - -// Format formats the input Caddyfile to a standard, nice-looking -// appearance. It works by reading each rune of the input and taking -// control over all the bracing and whitespace that is written; otherwise, -// words, comments, placeholders, and escaped characters are all treated -// literally and written as they appear in the input. -func Format(input []byte) []byte { - input = bytes.TrimSpace(input) - - out := new(bytes.Buffer) - rdr := bytes.NewReader(input) - - var ( - last rune // the last character that was written to the result - - space = true // whether current/previous character was whitespace (beginning of input counts as space) - beginningOfLine = true // whether we are at beginning of line - - openBrace bool // whether current word/token is or started with open curly brace - openBraceWritten bool // if openBrace, whether that brace was written or not - openBraceSpace bool // whether there was a non-newline space before open brace - - newLines int // count of newlines consumed - - comment bool // whether we're in a comment - quoted bool // whether we're in a quoted segment - escaped bool // whether current char is escaped - - nesting int // indentation level - ) - - write := func(ch rune) { - out.WriteRune(ch) - last = ch - } - - indent := func() { - for tabs := nesting; tabs > 0; tabs-- { - write('\t') - } - } - - nextLine := func() { - write('\n') - beginningOfLine = true - } - - for { - ch, _, err := rdr.ReadRune() - if err != nil { - if err == io.EOF { - break - } - panic(err) - } - - if comment { - if ch == '\n' { - comment = false - space = true - nextLine() - continue - } else { - write(ch) - continue - } - } - - if !escaped && ch == '\\' { - if space { - write(' ') - space = false - } - write(ch) - escaped = true - continue - } - - if escaped { - write(ch) - escaped = false - continue - } - - if quoted { - if ch == '"' { - quoted = false - } - write(ch) - continue - } - - if space && ch == '"' { - quoted = true - } - - if unicode.IsSpace(ch) { - space = true - if ch == '\n' { - newLines++ - } - continue - } - spacePrior := space - space = false - - ////////////////////////////////////////////////////////// - // I find it helpful to think of the formatting loop in two - // main sections; by the time we reach this point, we - // know we are in a "regular" part of the file: we know - // the character is not a space, not in a literal segment - // like a comment or quoted, it's not escaped, etc. - ////////////////////////////////////////////////////////// - - if ch == '#' { - comment = true - } - - if openBrace && spacePrior && !openBraceWritten { - if nesting == 0 && last == '}' { - nextLine() - nextLine() - } - - openBrace = false - if beginningOfLine { - indent() - } else if !openBraceSpace { - write(' ') - } - write('{') - openBraceWritten = true - nextLine() - newLines = 0 - nesting++ - } - - switch { - case ch == '{': - openBrace = true - openBraceWritten = false - openBraceSpace = spacePrior && !beginningOfLine - if openBraceSpace { - write(' ') - } - continue - - case ch == '}' && (spacePrior || !openBrace): - if last != '\n' { - nextLine() - } - if nesting > 0 { - nesting-- - } - indent() - write('}') - newLines = 0 - continue - } - - if newLines > 2 { - newLines = 2 - } - for i := 0; i < newLines; i++ { - nextLine() - } - newLines = 0 - if beginningOfLine { - indent() - } - if nesting == 0 && last == '}' && beginningOfLine { - nextLine() - nextLine() - } - - if !beginningOfLine && spacePrior { - write(' ') - } - - if openBrace && !openBraceWritten { - write('{') - openBraceWritten = true - } - write(ch) - - beginningOfLine = false - } - - // the Caddyfile does not need any leading or trailing spaces, but... - trimmedResult := bytes.TrimSpace(out.Bytes()) - - // ...Caddyfiles should, however, end with a newline because - // newlines are significant to the syntax of the file - return append(trimmedResult, '\n') -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter_fuzz.go deleted file mode 100644 index b1bbd84a..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/formatter_fuzz.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package caddyfile - -import "bytes" - -func FuzzFormat(input []byte) int { - formatted := Format(input) - if bytes.Equal(formatted, Format(formatted)) { - return 1 - } - return 0 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/importgraph.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/importgraph.go deleted file mode 100644 index 659c3680..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/importgraph.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "fmt" -) - -type adjacency map[string][]string - -type importGraph struct { - nodes map[string]bool - edges adjacency -} - -func (i *importGraph) addNode(name string) { - if i.nodes == nil { - i.nodes = make(map[string]bool) - } - if _, exists := i.nodes[name]; exists { - return - } - i.nodes[name] = true -} -func (i *importGraph) addNodes(names []string) { - for _, name := range names { - i.addNode(name) - } -} - -func (i *importGraph) removeNode(name string) { - delete(i.nodes, name) -} -func (i *importGraph) removeNodes(names []string) { - for _, name := range names { - i.removeNode(name) - } -} - -func (i *importGraph) addEdge(from, to string) error { - if !i.exists(from) || !i.exists(to) { - return fmt.Errorf("one of the nodes does not exist") - } - - if i.willCycle(to, from) { - return fmt.Errorf("a cycle of imports exists between %s and %s", from, to) - } - - if i.areConnected(from, to) { - // if connected, there's nothing to do - return nil - } - - if i.nodes == nil { - i.nodes = make(map[string]bool) - } - if i.edges == nil { - i.edges = make(adjacency) - } - - i.edges[from] = append(i.edges[from], to) - return nil -} -func (i *importGraph) addEdges(from string, tos []string) error { - for _, to := range tos { - err := i.addEdge(from, to) - if err != nil { - return err - } - } - return nil -} - -func (i *importGraph) areConnected(from, to string) bool { - al, ok := i.edges[from] - if !ok { - return false - } - for _, v := range al { - if v == to { - return true - } - } - return false -} - -func (i *importGraph) willCycle(from, to string) bool { - collector := make(map[string]bool) - - var visit func(string) - visit = func(start string) { - if !collector[start] { - collector[start] = true - for _, v := range i.edges[start] { - visit(v) - } - } - } - - for _, v := range i.edges[from] { - visit(v) - } - for k := range collector { - if to == k { - return true - } - } - - return false -} - -func (i *importGraph) exists(key string) bool { - _, exists := i.nodes[key] - return exists -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer.go deleted file mode 100644 index f4da2391..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bufio" - "bytes" - "io" - "unicode" -) - -type ( - // lexer is a utility which can get values, token by - // token, from a Reader. A token is a word, and tokens - // are separated by whitespace. A word can be enclosed - // in quotes if it contains whitespace. - lexer struct { - reader *bufio.Reader - token Token - line int - skippedLines int - } - - // Token represents a single parsable unit. - Token struct { - File string - Line int - Text string - inSnippet bool - snippetName string - } -) - -// load prepares the lexer to scan an input for tokens. -// It discards any leading byte order mark. -func (l *lexer) load(input io.Reader) error { - l.reader = bufio.NewReader(input) - l.line = 1 - - // discard byte order mark, if present - firstCh, _, err := l.reader.ReadRune() - if err != nil { - return err - } - if firstCh != 0xFEFF { - err := l.reader.UnreadRune() - if err != nil { - return err - } - } - - return nil -} - -// next loads the next token into the lexer. -// A token is delimited by whitespace, unless -// the token starts with a quotes character (") -// in which case the token goes until the closing -// quotes (the enclosing quotes are not included). -// Inside quoted strings, quotes may be escaped -// with a preceding \ character. No other chars -// may be escaped. The rest of the line is skipped -// if a "#" character is read in. Returns true if -// a token was loaded; false otherwise. -func (l *lexer) next() bool { - var val []rune - var comment, quoted, btQuoted, escaped bool - - makeToken := func() bool { - l.token.Text = string(val) - return true - } - - for { - ch, _, err := l.reader.ReadRune() - if err != nil { - if len(val) > 0 { - return makeToken() - } - if err == io.EOF { - return false - } - panic(err) - } - - if !escaped && !btQuoted && ch == '\\' { - escaped = true - continue - } - - if quoted || btQuoted { - if quoted && escaped { - // all is literal in quoted area, - // so only escape quotes - if ch != '"' { - val = append(val, '\\') - } - escaped = false - } else { - if quoted && ch == '"' { - return makeToken() - } - if btQuoted && ch == '`' { - return makeToken() - } - } - if ch == '\n' { - l.line += 1 + l.skippedLines - l.skippedLines = 0 - } - val = append(val, ch) - continue - } - - if unicode.IsSpace(ch) { - if ch == '\r' { - continue - } - if ch == '\n' { - if escaped { - l.skippedLines++ - escaped = false - } else { - l.line += 1 + l.skippedLines - l.skippedLines = 0 - } - comment = false - } - if len(val) > 0 { - return makeToken() - } - continue - } - - if ch == '#' && len(val) == 0 { - comment = true - } - if comment { - continue - } - - if len(val) == 0 { - l.token = Token{Line: l.line} - if ch == '"' { - quoted = true - continue - } - if ch == '`' { - btQuoted = true - continue - } - } - - if escaped { - val = append(val, '\\') - escaped = false - } - - val = append(val, ch) - } -} - -// Tokenize takes bytes as input and lexes it into -// a list of tokens that can be parsed as a Caddyfile. -// Also takes a filename to fill the token's File as -// the source of the tokens, which is important to -// determine relative paths for `import` directives. -func Tokenize(input []byte, filename string) ([]Token, error) { - l := lexer{} - if err := l.load(bytes.NewReader(input)); err != nil { - return nil, err - } - var tokens []Token - for l.next() { - l.token.File = filename - tokens = append(tokens, l.token) - } - return tokens, nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer_fuzz.go deleted file mode 100644 index 29348446..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/lexer_fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package caddyfile - -func FuzzTokenize(input []byte) int { - tokens, err := Tokenize(input, "Caddyfile") - if err != nil { - return 0 - } - if len(tokens) == 0 { - return -1 - } - return 1 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/parse.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/parse.go deleted file mode 100644 index c0f60794..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/caddyfile/parse.go +++ /dev/null @@ -1,616 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" -) - -// Parse parses the input just enough to group tokens, in -// order, by server block. No further parsing is performed. -// Server blocks are returned in the order in which they appear. -// Directives that do not appear in validDirectives will cause -// an error. If you do not want to check for valid directives, -// pass in nil instead. -// -// Environment variables in {$ENVIRONMENT_VARIABLE} notation -// will be replaced before parsing begins. -func Parse(filename string, input []byte) ([]ServerBlock, error) { - tokens, err := allTokens(filename, input) - if err != nil { - return nil, err - } - p := parser{ - Dispenser: NewDispenser(tokens), - importGraph: importGraph{ - nodes: make(map[string]bool), - edges: make(adjacency), - }, - } - return p.parseAll() -} - -// replaceEnvVars replaces all occurrences of environment variables. -func replaceEnvVars(input []byte) ([]byte, error) { - var offset int - for { - begin := bytes.Index(input[offset:], spanOpen) - if begin < 0 { - break - } - begin += offset // make beginning relative to input, not offset - end := bytes.Index(input[begin+len(spanOpen):], spanClose) - if end < 0 { - break - } - end += begin + len(spanOpen) // make end relative to input, not begin - - // get the name; if there is no name, skip it - envString := input[begin+len(spanOpen) : end] - if len(envString) == 0 { - offset = end + len(spanClose) - continue - } - - // split the string into a key and an optional default - envParts := strings.SplitN(string(envString), envVarDefaultDelimiter, 2) - - // do a lookup for the env var, replace with the default if not found - envVarValue, found := os.LookupEnv(envParts[0]) - if !found && len(envParts) == 2 { - envVarValue = envParts[1] - } - - // get the value of the environment variable - // note that this causes one-level deep chaining - envVarBytes := []byte(envVarValue) - - // splice in the value - input = append(input[:begin], - append(envVarBytes, input[end+len(spanClose):]...)...) - - // continue at the end of the replacement - offset = begin + len(envVarBytes) - } - return input, nil -} - -// allTokens lexes the entire input, but does not parse it. -// It returns all the tokens from the input, unstructured -// and in order. -func allTokens(filename string, input []byte) ([]Token, error) { - input, err := replaceEnvVars(input) - if err != nil { - return nil, err - } - tokens, err := Tokenize(input, filename) - if err != nil { - return nil, err - } - return tokens, nil -} - -type parser struct { - *Dispenser - block ServerBlock // current server block being parsed - eof bool // if we encounter a valid EOF in a hard place - definedSnippets map[string][]Token - nesting int - importGraph importGraph -} - -func (p *parser) parseAll() ([]ServerBlock, error) { - var blocks []ServerBlock - - for p.Next() { - err := p.parseOne() - if err != nil { - return blocks, err - } - if len(p.block.Keys) > 0 || len(p.block.Segments) > 0 { - blocks = append(blocks, p.block) - } - if p.nesting > 0 { - return blocks, p.EOFErr() - } - } - - return blocks, nil -} - -func (p *parser) parseOne() error { - p.block = ServerBlock{} - return p.begin() -} - -func (p *parser) begin() error { - if len(p.tokens) == 0 { - return nil - } - - err := p.addresses() - - if err != nil { - return err - } - - if p.eof { - // this happens if the Caddyfile consists of only - // a line of addresses and nothing else - return nil - } - - if ok, name := p.isSnippet(); ok { - if p.definedSnippets == nil { - p.definedSnippets = map[string][]Token{} - } - if _, found := p.definedSnippets[name]; found { - return p.Errf("redeclaration of previously declared snippet %s", name) - } - // consume all tokens til matched close brace - tokens, err := p.snippetTokens() - if err != nil { - return err - } - // Just as we need to track which file the token comes from, we need to - // keep track of which snippets do the tokens come from. This is helpful - // in tracking import cycles across files/snippets by namespacing them. Without - // this we end up with false-positives in cycle-detection. - for k, v := range tokens { - v.inSnippet = true - v.snippetName = name - tokens[k] = v - } - p.definedSnippets[name] = tokens - // empty block keys so we don't save this block as a real server. - p.block.Keys = nil - return nil - } - - return p.blockContents() -} - -func (p *parser) addresses() error { - var expectingAnother bool - - for { - tkn := p.Val() - - // special case: import directive replaces tokens during parse-time - if tkn == "import" && p.isNewLine() { - err := p.doImport() - if err != nil { - return err - } - continue - } - - // Open brace definitely indicates end of addresses - if tkn == "{" { - if expectingAnother { - return p.Errf("Expected another address but had '%s' - check for extra comma", tkn) - } - // Mark this server block as being defined with braces. - // This is used to provide a better error message when - // the user may have tried to define two server blocks - // without having used braces, which are required in - // that case. - p.block.HasBraces = true - break - } - - // Users commonly forget to place a space between the address and the '{' - if strings.HasSuffix(tkn, "{") { - return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", tkn) - } - - if tkn != "" { // empty token possible if user typed "" - // Trailing comma indicates another address will follow, which - // may possibly be on the next line - if tkn[len(tkn)-1] == ',' { - tkn = tkn[:len(tkn)-1] - expectingAnother = true - } else { - expectingAnother = false // but we may still see another one on this line - } - - // If there's a comma here, it's probably because they didn't use a space - // between their two domains, e.g. "foo.com,bar.com", which would not be - // parsed as two separate site addresses. - if strings.Contains(tkn, ",") { - return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", tkn) - } - - p.block.Keys = append(p.block.Keys, tkn) - } - - // Advance token and possibly break out of loop or return error - hasNext := p.Next() - if expectingAnother && !hasNext { - return p.EOFErr() - } - if !hasNext { - p.eof = true - break // EOF - } - if !expectingAnother && p.isNewLine() { - break - } - } - - return nil -} - -func (p *parser) blockContents() error { - errOpenCurlyBrace := p.openCurlyBrace() - if errOpenCurlyBrace != nil { - // single-server configs don't need curly braces - p.cursor-- - } - - err := p.directives() - if err != nil { - return err - } - - // only look for close curly brace if there was an opening - if errOpenCurlyBrace == nil { - err = p.closeCurlyBrace() - if err != nil { - return err - } - } - - return nil -} - -// directives parses through all the lines for directives -// and it expects the next token to be the first -// directive. It goes until EOF or closing curly brace -// which ends the server block. -func (p *parser) directives() error { - for p.Next() { - // end of server block - if p.Val() == "}" { - // p.nesting has already been decremented - break - } - - // special case: import directive replaces tokens during parse-time - if p.Val() == "import" { - err := p.doImport() - if err != nil { - return err - } - p.cursor-- // cursor is advanced when we continue, so roll back one more - continue - } - - // normal case: parse a directive as a new segment - // (a "segment" is a line which starts with a directive - // and which ends at the end of the line or at the end of - // the block that is opened at the end of the line) - if err := p.directive(); err != nil { - return err - } - } - - return nil -} - -// doImport swaps out the import directive and its argument -// (a total of 2 tokens) with the tokens in the specified file -// or globbing pattern. When the function returns, the cursor -// is on the token before where the import directive was. In -// other words, call Next() to access the first token that was -// imported. -func (p *parser) doImport() error { - // syntax checks - if !p.NextArg() { - return p.ArgErr() - } - importPattern := p.Val() - if importPattern == "" { - return p.Err("Import requires a non-empty filepath") - } - - // grab remaining args as placeholder replacements - args := p.RemainingArgs() - - // add args to the replacer - repl := caddy.NewEmptyReplacer() - for index, arg := range args { - repl.Set("args."+strconv.Itoa(index), arg) - } - - // splice out the import directive and its arguments - // (2 tokens, plus the length of args) - tokensBefore := p.tokens[:p.cursor-1-len(args)] - tokensAfter := p.tokens[p.cursor+1:] - var importedTokens []Token - var nodes []string - - // first check snippets. That is a simple, non-recursive replacement - if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil { - importedTokens = p.definedSnippets[importPattern] - if len(importedTokens) > 0 { - // just grab the first one - nodes = append(nodes, fmt.Sprintf("%s:%s", importedTokens[0].File, importedTokens[0].snippetName)) - } - } else { - // make path relative to the file of the _token_ being processed rather - // than current working directory (issue #867) and then use glob to get - // list of matching filenames - absFile, err := filepath.Abs(p.Dispenser.File()) - if err != nil { - return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.File(), err) - } - - var matches []string - var globPattern string - if !filepath.IsAbs(importPattern) { - globPattern = filepath.Join(filepath.Dir(absFile), importPattern) - } else { - globPattern = importPattern - } - if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 || - (strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) { - // See issue #2096 - a pattern with many glob expansions can hang for too long - return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern) - } - matches, err = filepath.Glob(globPattern) - - if err != nil { - return p.Errf("Failed to use import pattern %s: %v", importPattern, err) - } - if len(matches) == 0 { - if strings.ContainsAny(globPattern, "*?[]") { - log.Printf("[WARNING] No files matching import glob pattern: %s", importPattern) - } else { - return p.Errf("File to import not found: %s", importPattern) - } - } - - // collect all the imported tokens - for _, importFile := range matches { - newTokens, err := p.doSingleImport(importFile) - if err != nil { - return err - } - importedTokens = append(importedTokens, newTokens...) - } - nodes = matches - } - - nodeName := p.File() - if p.Token().inSnippet { - nodeName += fmt.Sprintf(":%s", p.Token().snippetName) - } - p.importGraph.addNode(nodeName) - p.importGraph.addNodes(nodes) - if err := p.importGraph.addEdges(nodeName, nodes); err != nil { - p.importGraph.removeNodes(nodes) - return err - } - - // copy the tokens so we don't overwrite p.definedSnippets - tokensCopy := make([]Token, len(importedTokens)) - copy(tokensCopy, importedTokens) - - // run the argument replacer on the tokens - for index, token := range tokensCopy { - token.Text = repl.ReplaceKnown(token.Text, "") - tokensCopy[index] = token - } - - // splice the imported tokens in the place of the import statement - // and rewind cursor so Next() will land on first imported token - p.tokens = append(tokensBefore, append(tokensCopy, tokensAfter...)...) - p.cursor -= len(args) + 1 - - return nil -} - -// doSingleImport lexes the individual file at importFile and returns -// its tokens or an error, if any. -func (p *parser) doSingleImport(importFile string) ([]Token, error) { - file, err := os.Open(importFile) - if err != nil { - return nil, p.Errf("Could not import %s: %v", importFile, err) - } - defer file.Close() - - if info, err := file.Stat(); err != nil { - return nil, p.Errf("Could not import %s: %v", importFile, err) - } else if info.IsDir() { - return nil, p.Errf("Could not import %s: is a directory", importFile) - } - - input, err := ioutil.ReadAll(file) - if err != nil { - return nil, p.Errf("Could not read imported file %s: %v", importFile, err) - } - - importedTokens, err := allTokens(importFile, input) - if err != nil { - return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err) - } - - // Tack the file path onto these tokens so errors show the imported file's name - // (we use full, absolute path to avoid bugs: issue #1892) - filename, err := filepath.Abs(importFile) - if err != nil { - return nil, p.Errf("Failed to get absolute path of file: %s: %v", importFile, err) - } - for i := 0; i < len(importedTokens); i++ { - importedTokens[i].File = filename - } - - return importedTokens, nil -} - -// directive collects tokens until the directive's scope -// closes (either end of line or end of curly brace block). -// It expects the currently-loaded token to be a directive -// (or } that ends a server block). The collected tokens -// are loaded into the current server block for later use -// by directive setup functions. -func (p *parser) directive() error { - - // a segment is a list of tokens associated with this directive - var segment Segment - - // the directive itself is appended as a relevant token - segment = append(segment, p.Token()) - - for p.Next() { - if p.Val() == "{" { - p.nesting++ - } else if p.isNewLine() && p.nesting == 0 { - p.cursor-- // read too far - break - } else if p.Val() == "}" && p.nesting > 0 { - p.nesting-- - } else if p.Val() == "}" && p.nesting == 0 { - return p.Err("Unexpected '}' because no matching opening brace") - } else if p.Val() == "import" && p.isNewLine() { - if err := p.doImport(); err != nil { - return err - } - p.cursor-- // cursor is advanced when we continue, so roll back one more - continue - } - - segment = append(segment, p.Token()) - } - - p.block.Segments = append(p.block.Segments, segment) - - if p.nesting > 0 { - return p.EOFErr() - } - - return nil -} - -// openCurlyBrace expects the current token to be an -// opening curly brace. This acts like an assertion -// because it returns an error if the token is not -// a opening curly brace. It does NOT advance the token. -func (p *parser) openCurlyBrace() error { - if p.Val() != "{" { - return p.SyntaxErr("{") - } - return nil -} - -// closeCurlyBrace expects the current token to be -// a closing curly brace. This acts like an assertion -// because it returns an error if the token is not -// a closing curly brace. It does NOT advance the token. -func (p *parser) closeCurlyBrace() error { - if p.Val() != "}" { - return p.SyntaxErr("}") - } - return nil -} - -func (p *parser) isSnippet() (bool, string) { - keys := p.block.Keys - // A snippet block is a single key with parens. Nothing else qualifies. - if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") { - return true, strings.TrimSuffix(keys[0][1:], ")") - } - return false, "" -} - -// read and store everything in a block for later replay. -func (p *parser) snippetTokens() ([]Token, error) { - // snippet must have curlies. - err := p.openCurlyBrace() - if err != nil { - return nil, err - } - nesting := 1 // count our own nesting in snippets - tokens := []Token{} - for p.Next() { - if p.Val() == "}" { - nesting-- - if nesting == 0 { - break - } - } - if p.Val() == "{" { - nesting++ - } - tokens = append(tokens, p.tokens[p.cursor]) - } - // make sure we're matched up - if nesting != 0 { - return nil, p.SyntaxErr("}") - } - return tokens, nil -} - -// ServerBlock associates any number of keys from the -// head of the server block with tokens, which are -// grouped by segments. -type ServerBlock struct { - HasBraces bool - Keys []string - Segments []Segment -} - -// DispenseDirective returns a dispenser that contains -// all the tokens in the server block. -func (sb ServerBlock) DispenseDirective(dir string) *Dispenser { - var tokens []Token - for _, seg := range sb.Segments { - if len(seg) > 0 && seg[0].Text == dir { - tokens = append(tokens, seg...) - } - } - return NewDispenser(tokens) -} - -// Segment is a list of tokens which begins with a directive -// and ends at the end of the directive (either at the end of -// the line, or at the end of a block it opens). -type Segment []Token - -// Directive returns the directive name for the segment. -// The directive name is the text of the first token. -func (s Segment) Directive() string { - if len(s) > 0 { - return s[0].Text - } - return "" -} - -// spanOpen and spanClose are used to bound spans that -// contain the name of an environment variable. -var ( - spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'} - envVarDefaultDelimiter = ":" -) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/configadapters.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/configadapters.go deleted file mode 100644 index ccac5f88..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/configadapters.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyconfig - -import ( - "encoding/json" - "fmt" - - "github.com/caddyserver/caddy/v2" -) - -// Adapter is a type which can adapt a configuration to Caddy JSON. -// It returns the results and any warnings, or an error. -type Adapter interface { - Adapt(body []byte, options map[string]interface{}) ([]byte, []Warning, error) -} - -// Warning represents a warning or notice related to conversion. -type Warning struct { - File string `json:"file,omitempty"` - Line int `json:"line,omitempty"` - Directive string `json:"directive,omitempty"` - Message string `json:"message,omitempty"` -} - -func (w Warning) String() string { - var directive string - if w.Directive != "" { - directive = fmt.Sprintf(" (%s)", w.Directive) - } - return fmt.Sprintf("%s:%d%s: %s", w.File, w.Line, directive, w.Message) -} - -// JSON encodes val as JSON, returning it as a json.RawMessage. Any -// marshaling errors (which are highly unlikely with correct code) -// are converted to warnings. This is convenient when filling config -// structs that require a json.RawMessage, without having to worry -// about errors. -func JSON(val interface{}, warnings *[]Warning) json.RawMessage { - b, err := json.Marshal(val) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - return b -} - -// JSONModuleObject is like JSON(), except it marshals val into a JSON object -// with an added key named fieldName with the value fieldVal. This is useful -// for encoding module values where the module name has to be described within -// the object by a certain key; for example, `"handler": "file_server"` for a -// file server HTTP handler (fieldName="handler" and fieldVal="file_server"). -// The val parameter must encode into a map[string]interface{} (i.e. it must be -// a struct or map). Any errors are converted into warnings. -func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage { - // encode to a JSON object first - enc, err := json.Marshal(val) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - - // then decode the object - var tmp map[string]interface{} - err = json.Unmarshal(enc, &tmp) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - - // so we can easily add the module's field with its appointed value - tmp[fieldName] = fieldVal - - // then re-marshal as JSON - result, err := json.Marshal(tmp) - if err != nil { - if warnings != nil { - *warnings = append(*warnings, Warning{Message: err.Error()}) - } - return nil - } - - return result -} - -// RegisterAdapter registers a config adapter with the given name. -// This should usually be done at init-time. It panics if the -// adapter cannot be registered successfully. -func RegisterAdapter(name string, adapter Adapter) { - if _, ok := configAdapters[name]; ok { - panic(fmt.Errorf("%s: already registered", name)) - } - configAdapters[name] = adapter - caddy.RegisterModule(adapterModule{name, adapter}) -} - -// GetAdapter returns the adapter with the given name, -// or nil if one with that name is not registered. -func GetAdapter(name string) Adapter { - return configAdapters[name] -} - -// adapterModule is a wrapper type that can turn any config -// adapter into a Caddy module, which has the benefit of being -// counted with other modules, even though they do not -// technically extend the Caddy configuration structure. -// See caddyserver/caddy#3132. -type adapterModule struct { - name string - Adapter -} - -func (am adapterModule) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: caddy.ModuleID("caddy.adapters." + am.name), - New: func() caddy.Module { return am }, - } -} - -var configAdapters = make(map[string]Adapter) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses.go deleted file mode 100644 index 71053204..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "fmt" - "net" - "reflect" - "sort" - "strconv" - "strings" - "unicode" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/certmagic" -) - -// mapAddressToServerBlocks returns a map of listener address to list of server -// blocks that will be served on that address. To do this, each server block is -// expanded so that each one is considered individually, although keys of a -// server block that share the same address stay grouped together so the config -// isn't repeated unnecessarily. For example, this Caddyfile: -// -// example.com { -// bind 127.0.0.1 -// } -// www.example.com, example.net/path, localhost:9999 { -// bind 127.0.0.1 1.2.3.4 -// } -// -// has two server blocks to start with. But expressed in this Caddyfile are -// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999, -// and 127.0.0.1:9999. This is because the bind directive is applied to each -// key of its server block (specifying the host part), and each key may have -// a different port. And we definitely need to be sure that a site which is -// bound to be served on a specific interface is not served on others just -// because that is more convenient: it would be a potential security risk -// if the difference between interfaces means private vs. public. -// -// So what this function does for the example above is iterate each server -// block, and for each server block, iterate its keys. For the first, it -// finds one key (example.com) and determines its listener address -// (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds -// the listener address to the map value returned by this function, with -// the first server block as one of its associations. -// -// It then iterates each key on the second server block and associates them -// with one or more listener addresses. Indeed, each key in this block has -// two listener addresses because of the 'bind' directive. Once we know -// which addresses serve which keys, we can create a new server block for -// each address containing the contents of the server block and only those -// specific keys of the server block which use that address. -// -// It is possible and even likely that some keys in the returned map have -// the exact same list of server blocks (i.e. they are identical). This -// happens when multiple hosts are declared with a 'bind' directive and -// the resulting listener addresses are not shared by any other server -// block (or the other server blocks are exactly identical in their token -// contents). This happens with our example above because 1.2.3.4:443 -// and 1.2.3.4:9999 are used exclusively with the second server block. This -// repetition may be undesirable, so call consolidateAddrMappings() to map -// multiple addresses to the same lists of server blocks (a many:many mapping). -// (Doing this is essentially a map-reduce technique.) -func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock, - options map[string]interface{}) (map[string][]serverBlock, error) { - sbmap := make(map[string][]serverBlock) - - for i, sblock := range originalServerBlocks { - // within a server block, we need to map all the listener addresses - // implied by the server block to the keys of the server block which - // will be served by them; this has the effect of treating each - // key of a server block as its own, but without having to repeat its - // contents in cases where multiple keys really can be served together - addrToKeys := make(map[string][]string) - for j, key := range sblock.block.Keys { - // a key can have multiple listener addresses if there are multiple - // arguments to the 'bind' directive (although they will all have - // the same port, since the port is defined by the key or is implicit - // through automatic HTTPS) - addrs, err := st.listenerAddrsForServerBlockKey(sblock, key, options) - if err != nil { - return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err) - } - - // associate this key with each listener address it is served on - for _, addr := range addrs { - addrToKeys[addr] = append(addrToKeys[addr], key) - } - } - - // now that we know which addresses serve which keys of this - // server block, we iterate that mapping and create a list of - // new server blocks for each address where the keys of the - // server block are only the ones which use the address; but - // the contents (tokens) are of course the same - for addr, keys := range addrToKeys { - // parse keys so that we only have to do it once - parsedKeys := make([]Address, 0, len(keys)) - for _, key := range keys { - addr, err := ParseAddress(key) - if err != nil { - return nil, fmt.Errorf("parsing key '%s': %v", key, err) - } - parsedKeys = append(parsedKeys, addr.Normalize()) - } - sbmap[addr] = append(sbmap[addr], serverBlock{ - block: caddyfile.ServerBlock{ - Keys: keys, - Segments: sblock.block.Segments, - }, - pile: sblock.pile, - keys: parsedKeys, - }) - } - } - - return sbmap, nil -} - -// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of -// single listener addresses to lists of server blocks. Since multiple addresses may serve -// identical sites (server block contents), this function turns a 1:many mapping into a -// many:many mapping. Server block contents (tokens) must be exactly identical so that -// reflect.DeepEqual returns true in order for the addresses to be combined. Identical -// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each -// association from multiple addresses to multiple server blocks; i.e. each element of -// the returned slice) becomes a server definition in the output JSON. -func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]serverBlock) []sbAddrAssociation { - sbaddrs := make([]sbAddrAssociation, 0, len(addrToServerBlocks)) - for addr, sblocks := range addrToServerBlocks { - // we start with knowing that at least this address - // maps to these server blocks - a := sbAddrAssociation{ - addresses: []string{addr}, - serverBlocks: sblocks, - } - - // now find other addresses that map to identical - // server blocks and add them to our list of - // addresses, while removing them from the map - for otherAddr, otherSblocks := range addrToServerBlocks { - if addr == otherAddr { - continue - } - if reflect.DeepEqual(sblocks, otherSblocks) { - a.addresses = append(a.addresses, otherAddr) - delete(addrToServerBlocks, otherAddr) - } - } - - sbaddrs = append(sbaddrs, a) - } - - // sort them by their first address (we know there will always be at least one) - // to avoid problems with non-deterministic ordering (makes tests flaky) - sort.Slice(sbaddrs, func(i, j int) bool { - return sbaddrs[i].addresses[0] < sbaddrs[j].addresses[0] - }) - - return sbaddrs -} - -func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string, - options map[string]interface{}) ([]string, error) { - addr, err := ParseAddress(key) - if err != nil { - return nil, fmt.Errorf("parsing key: %v", err) - } - addr = addr.Normalize() - - // figure out the HTTP and HTTPS ports; either - // use defaults, or override with user config - httpPort, httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPPort), strconv.Itoa(caddyhttp.DefaultHTTPSPort) - if hport, ok := options["http_port"]; ok { - httpPort = strconv.Itoa(hport.(int)) - } - if hsport, ok := options["https_port"]; ok { - httpsPort = strconv.Itoa(hsport.(int)) - } - - // default port is the HTTPS port - lnPort := httpsPort - if addr.Port != "" { - // port explicitly defined - lnPort = addr.Port - } else if addr.Scheme == "http" { - // port inferred from scheme - lnPort = httpPort - } - - // error if scheme and port combination violate convention - if (addr.Scheme == "http" && lnPort == httpsPort) || (addr.Scheme == "https" && lnPort == httpPort) { - return nil, fmt.Errorf("[%s] scheme and port violate convention", key) - } - - // the bind directive specifies hosts, but is optional - lnHosts := make([]string, 0, len(sblock.pile)) - for _, cfgVal := range sblock.pile["bind"] { - lnHosts = append(lnHosts, cfgVal.Value.([]string)...) - } - if len(lnHosts) == 0 { - lnHosts = []string{""} - } - - // use a map to prevent duplication - listeners := make(map[string]struct{}) - for _, host := range lnHosts { - addr, err := caddy.ParseNetworkAddress(host) - if err == nil && addr.IsUnixNetwork() { - listeners[host] = struct{}{} - } else { - listeners[net.JoinHostPort(host, lnPort)] = struct{}{} - } - } - - // now turn map into list - listenersList := make([]string, 0, len(listeners)) - for lnStr := range listeners { - listenersList = append(listenersList, lnStr) - } - - return listenersList, nil -} - -// Address represents a site address. It contains -// the original input value, and the component -// parts of an address. The component parts may be -// updated to the correct values as setup proceeds, -// but the original value should never be changed. -// -// The Host field must be in a normalized form. -type Address struct { - Original, Scheme, Host, Port, Path string -} - -// ParseAddress parses an address string into a structured format with separate -// scheme, host, port, and path portions, as well as the original input string. -func ParseAddress(str string) (Address, error) { - const maxLen = 4096 - if len(str) > maxLen { - str = str[:maxLen] - } - remaining := strings.TrimSpace(str) - a := Address{Original: remaining} - - // extract scheme - splitScheme := strings.SplitN(remaining, "://", 2) - switch len(splitScheme) { - case 0: - return a, nil - case 1: - remaining = splitScheme[0] - case 2: - a.Scheme = splitScheme[0] - remaining = splitScheme[1] - } - - // extract host and port - hostSplit := strings.SplitN(remaining, "/", 2) - if len(hostSplit) > 0 { - host, port, err := net.SplitHostPort(hostSplit[0]) - if err != nil { - host, port, err = net.SplitHostPort(hostSplit[0] + ":") - if err != nil { - host = hostSplit[0] - } - } - a.Host = host - a.Port = port - } - if len(hostSplit) == 2 { - // all that remains is the path - a.Path = "/" + hostSplit[1] - } - - // make sure port is valid - if a.Port != "" { - if portNum, err := strconv.Atoi(a.Port); err != nil { - return Address{}, fmt.Errorf("invalid port '%s': %v", a.Port, err) - } else if portNum < 0 || portNum > 65535 { - return Address{}, fmt.Errorf("port %d is out of range", portNum) - } - } - - return a, nil -} - -// String returns a human-readable form of a. It will -// be a cleaned-up and filled-out URL string. -func (a Address) String() string { - if a.Host == "" && a.Port == "" { - return "" - } - scheme := a.Scheme - if scheme == "" { - if a.Port == strconv.Itoa(certmagic.HTTPSPort) { - scheme = "https" - } else { - scheme = "http" - } - } - s := scheme - if s != "" { - s += "://" - } - if a.Port != "" && - ((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) || - (scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) { - s += net.JoinHostPort(a.Host, a.Port) - } else { - s += a.Host - } - if a.Path != "" { - s += a.Path - } - return s -} - -// Normalize returns a normalized version of a. -func (a Address) Normalize() Address { - path := a.Path - - // ensure host is normalized if it's an IP address - host := strings.TrimSpace(a.Host) - if ip := net.ParseIP(host); ip != nil { - host = ip.String() - } - - return Address{ - Original: a.Original, - Scheme: lowerExceptPlaceholders(a.Scheme), - Host: lowerExceptPlaceholders(host), - Port: a.Port, - Path: path, - } -} - -// Key returns a string form of a, much like String() does, but this -// method doesn't add anything default that wasn't in the original. -func (a Address) Key() string { - res := "" - if a.Scheme != "" { - res += a.Scheme + "://" - } - if a.Host != "" { - res += a.Host - } - // insert port only if the original has its own explicit port - if a.Port != "" && - len(a.Original) >= len(res) && - strings.HasPrefix(a.Original[len(res):], ":"+a.Port) { - res += ":" + a.Port - } - if a.Path != "" { - res += a.Path - } - return res -} - -// lowerExceptPlaceholders lowercases s except within -// placeholders (substrings in non-escaped '{ }' spans). -// See https://github.com/caddyserver/caddy/issues/3264 -func lowerExceptPlaceholders(s string) string { - var sb strings.Builder - var escaped, inPlaceholder bool - for _, ch := range s { - if ch == '\\' && !escaped { - escaped = true - sb.WriteRune(ch) - continue - } - if ch == '{' && !escaped { - inPlaceholder = true - } - if ch == '}' && inPlaceholder && !escaped { - inPlaceholder = false - } - if inPlaceholder { - sb.WriteRune(ch) - } else { - sb.WriteRune(unicode.ToLower(ch)) - } - escaped = false - } - return sb.String() -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses_fuzz.go deleted file mode 100644 index 4ab62984..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/addresses_fuzz.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package httpcaddyfile - -func FuzzParseAddress(data []byte) int { - addr, err := ParseAddress(string(data)) - if err != nil { - if addr == (Address{}) { - return 1 - } - return 0 - } - return 1 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/builtins.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/builtins.go deleted file mode 100644 index d52c5ef8..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/builtins.go +++ /dev/null @@ -1,789 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "encoding/base64" - "encoding/pem" - "fmt" - "html" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "github.com/caddyserver/certmagic" - "github.com/mholt/acmez/acme" - "go.uber.org/zap/zapcore" -) - -func init() { - RegisterDirective("bind", parseBind) - RegisterDirective("tls", parseTLS) - RegisterHandlerDirective("root", parseRoot) - RegisterHandlerDirective("redir", parseRedir) - RegisterHandlerDirective("respond", parseRespond) - RegisterHandlerDirective("abort", parseAbort) - RegisterHandlerDirective("error", parseError) - RegisterHandlerDirective("route", parseRoute) - RegisterHandlerDirective("handle", parseHandle) - RegisterDirective("handle_errors", parseHandleErrors) - RegisterDirective("log", parseLog) -} - -// parseBind parses the bind directive. Syntax: -// -// bind -// -func parseBind(h Helper) ([]ConfigValue, error) { - var lnHosts []string - for h.Next() { - lnHosts = append(lnHosts, h.RemainingArgs()...) - } - return h.NewBindAddresses(lnHosts), nil -} - -// parseTLS parses the tls directive. Syntax: -// -// tls [|internal]|[ ] { -// protocols [] -// ciphers -// curves -// client_auth { -// mode [request|require|verify_if_given|require_and_verify] -// trusted_ca_cert -// trusted_ca_cert_file -// trusted_leaf_cert -// trusted_leaf_cert_file -// } -// alpn -// load -// ca -// ca_root -// dns [...] -// on_demand -// eab -// issuer [...] -// } -// -func parseTLS(h Helper) ([]ConfigValue, error) { - cp := new(caddytls.ConnectionPolicy) - var fileLoader caddytls.FileLoader - var folderLoader caddytls.FolderLoader - var certSelector caddytls.CustomCertSelectionPolicy - var acmeIssuer *caddytls.ACMEIssuer - var keyType string - var internalIssuer *caddytls.InternalIssuer - var issuers []certmagic.Issuer - var onDemand bool - - for h.Next() { - // file certificate loader - firstLine := h.RemainingArgs() - switch len(firstLine) { - case 0: - case 1: - if firstLine[0] == "internal" { - internalIssuer = new(caddytls.InternalIssuer) - } else if !strings.Contains(firstLine[0], "@") { - return nil, h.Err("single argument must either be 'internal' or an email address") - } else { - if acmeIssuer == nil { - acmeIssuer = new(caddytls.ACMEIssuer) - } - acmeIssuer.Email = firstLine[0] - } - - case 2: - certFilename := firstLine[0] - keyFilename := firstLine[1] - - // tag this certificate so if multiple certs match, specifically - // this one that the user has provided will be used, see #2588: - // https://github.com/caddyserver/caddy/issues/2588 ... but we - // must be careful about how we do this; being careless will - // lead to failed handshakes - // - // we need to remember which cert files we've seen, since we - // must load each cert only once; otherwise, they each get a - // different tag... since a cert loaded twice has the same - // bytes, it will overwrite the first one in the cache, and - // only the last cert (and its tag) will survive, so any conn - // policy that is looking for any tag other than the last one - // to be loaded won't find it, and TLS handshakes will fail - // (see end of issue #3004) - // - // tlsCertTags maps certificate filenames to their tag. - // This is used to remember which tag is used for each - // certificate files, since we need to avoid loading - // the same certificate files more than once, overwriting - // previous tags - tlsCertTags, ok := h.State["tlsCertTags"].(map[string]string) - if !ok { - tlsCertTags = make(map[string]string) - h.State["tlsCertTags"] = tlsCertTags - } - - tag, ok := tlsCertTags[certFilename] - if !ok { - // haven't seen this cert file yet, let's give it a tag - // and add a loader for it - tag = fmt.Sprintf("cert%d", len(tlsCertTags)) - fileLoader = append(fileLoader, caddytls.CertKeyFilePair{ - Certificate: certFilename, - Key: keyFilename, - Tags: []string{tag}, - }) - // remember this for next time we see this cert file - tlsCertTags[certFilename] = tag - } - certSelector.AnyTag = append(certSelector.AnyTag, tag) - - default: - return nil, h.ArgErr() - } - - var hasBlock bool - for nesting := h.Nesting(); h.NextBlock(nesting); { - hasBlock = true - - switch h.Val() { - case "protocols": - args := h.RemainingArgs() - if len(args) == 0 { - return nil, h.SyntaxErr("one or two protocols") - } - if len(args) > 0 { - if _, ok := caddytls.SupportedProtocols[args[0]]; !ok { - return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[0]) - } - cp.ProtocolMin = args[0] - } - if len(args) > 1 { - if _, ok := caddytls.SupportedProtocols[args[1]]; !ok { - return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[1]) - } - cp.ProtocolMax = args[1] - } - - case "ciphers": - for h.NextArg() { - if !caddytls.CipherSuiteNameSupported(h.Val()) { - return nil, h.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", h.Val()) - } - cp.CipherSuites = append(cp.CipherSuites, h.Val()) - } - - case "curves": - for h.NextArg() { - if _, ok := caddytls.SupportedCurves[h.Val()]; !ok { - return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val()) - } - cp.Curves = append(cp.Curves, h.Val()) - } - - case "client_auth": - cp.ClientAuthentication = &caddytls.ClientAuthentication{} - for nesting := h.Nesting(); h.NextBlock(nesting); { - subdir := h.Val() - switch subdir { - case "mode": - if !h.Args(&cp.ClientAuthentication.Mode) { - return nil, h.ArgErr() - } - if h.NextArg() { - return nil, h.ArgErr() - } - - case "trusted_ca_cert", - "trusted_leaf_cert": - if !h.NextArg() { - return nil, h.ArgErr() - } - if subdir == "trusted_ca_cert" { - cp.ClientAuthentication.TrustedCACerts = append(cp.ClientAuthentication.TrustedCACerts, h.Val()) - } else { - cp.ClientAuthentication.TrustedLeafCerts = append(cp.ClientAuthentication.TrustedLeafCerts, h.Val()) - } - - case "trusted_ca_cert_file", - "trusted_leaf_cert_file": - if !h.NextArg() { - return nil, h.ArgErr() - } - filename := h.Val() - certDataPEM, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - block, _ := pem.Decode(certDataPEM) - if block == nil || block.Type != "CERTIFICATE" { - return nil, h.Errf("no CERTIFICATE pem block found in %s", h.Val()) - } - if subdir == "trusted_ca_cert_file" { - cp.ClientAuthentication.TrustedCACerts = append(cp.ClientAuthentication.TrustedCACerts, - base64.StdEncoding.EncodeToString(block.Bytes)) - } else { - cp.ClientAuthentication.TrustedLeafCerts = append(cp.ClientAuthentication.TrustedLeafCerts, - base64.StdEncoding.EncodeToString(block.Bytes)) - } - - default: - return nil, h.Errf("unknown subdirective for client_auth: %s", subdir) - } - } - - case "alpn": - args := h.RemainingArgs() - if len(args) == 0 { - return nil, h.ArgErr() - } - cp.ALPN = args - - case "load": - folderLoader = append(folderLoader, h.RemainingArgs()...) - - case "ca": - arg := h.RemainingArgs() - if len(arg) != 1 { - return nil, h.ArgErr() - } - if acmeIssuer == nil { - acmeIssuer = new(caddytls.ACMEIssuer) - } - acmeIssuer.CA = arg[0] - - case "key_type": - arg := h.RemainingArgs() - if len(arg) != 1 { - return nil, h.ArgErr() - } - keyType = arg[0] - - case "eab": - arg := h.RemainingArgs() - if len(arg) != 2 { - return nil, h.ArgErr() - } - if acmeIssuer == nil { - acmeIssuer = new(caddytls.ACMEIssuer) - } - acmeIssuer.ExternalAccount = &acme.EAB{ - KeyID: arg[0], - MACKey: arg[1], - } - - case "issuer": - if !h.NextArg() { - return nil, h.ArgErr() - } - modName := h.Val() - modID := "tls.issuance." + modName - unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) - if err != nil { - return nil, err - } - issuer, ok := unm.(certmagic.Issuer) - if !ok { - return nil, h.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm) - } - issuers = append(issuers, issuer) - - case "dns": - if !h.NextArg() { - return nil, h.ArgErr() - } - provName := h.Val() - if acmeIssuer == nil { - acmeIssuer = new(caddytls.ACMEIssuer) - } - if acmeIssuer.Challenges == nil { - acmeIssuer.Challenges = new(caddytls.ChallengesConfig) - } - if acmeIssuer.Challenges.DNS == nil { - acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) - } - modID := "dns.providers." + provName - unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) - if err != nil { - return nil, err - } - acmeIssuer.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, h.warnings) - - case "resolvers": - args := h.RemainingArgs() - if len(args) == 0 { - return nil, h.ArgErr() - } - if acmeIssuer == nil { - acmeIssuer = new(caddytls.ACMEIssuer) - } - if acmeIssuer.Challenges == nil { - acmeIssuer.Challenges = new(caddytls.ChallengesConfig) - } - if acmeIssuer.Challenges.DNS == nil { - acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig) - } - acmeIssuer.Challenges.DNS.Resolvers = args - - case "ca_root": - arg := h.RemainingArgs() - if len(arg) != 1 { - return nil, h.ArgErr() - } - if acmeIssuer == nil { - acmeIssuer = new(caddytls.ACMEIssuer) - } - acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, arg[0]) - - case "on_demand": - if h.NextArg() { - return nil, h.ArgErr() - } - onDemand = true - - default: - return nil, h.Errf("unknown subdirective: %s", h.Val()) - } - } - - // a naked tls directive is not allowed - if len(firstLine) == 0 && !hasBlock { - return nil, h.ArgErr() - } - } - - // begin building the final config values - configVals := []ConfigValue{} - - // certificate loaders - if len(fileLoader) > 0 { - configVals = append(configVals, ConfigValue{ - Class: "tls.cert_loader", - Value: fileLoader, - }) - } - if len(folderLoader) > 0 { - configVals = append(configVals, ConfigValue{ - Class: "tls.cert_loader", - Value: folderLoader, - }) - } - - // some tls subdirectives are shortcuts that implicitly configure issuers, and the - // user can also configure issuers explicitly using the issuer subdirective; the - // logic to support both would likely be complex, or at least unintuitive - if len(issuers) > 0 && (acmeIssuer != nil || internalIssuer != nil) { - return nil, h.Err("cannot mix issuer subdirective (explicit issuers) with other issuer-specific subdirectives (implicit issuers)") - } - if acmeIssuer != nil && internalIssuer != nil { - return nil, h.Err("cannot create both ACME and internal certificate issuers") - } - - // now we should either have: explicitly-created issuers, or an implicitly-created - // ACME or internal issuer, or no issuers at all - switch { - case len(issuers) > 0: - for _, issuer := range issuers { - configVals = append(configVals, ConfigValue{ - Class: "tls.cert_issuer", - Value: issuer, - }) - } - - case acmeIssuer != nil: - // implicit ACME issuers (from various subdirectives) - use defaults; there might be more than one - defaultIssuers := caddytls.DefaultIssuers() - - // if a CA endpoint was set, override multiple implicit issuers since it's a specific one - if acmeIssuer.CA != "" { - defaultIssuers = []certmagic.Issuer{acmeIssuer} - } - - for _, issuer := range defaultIssuers { - switch iss := issuer.(type) { - case *caddytls.ACMEIssuer: - issuer = acmeIssuer - case *caddytls.ZeroSSLIssuer: - iss.ACMEIssuer = acmeIssuer - } - configVals = append(configVals, ConfigValue{ - Class: "tls.cert_issuer", - Value: issuer, - }) - } - - case internalIssuer != nil: - configVals = append(configVals, ConfigValue{ - Class: "tls.cert_issuer", - Value: internalIssuer, - }) - } - - // certificate key type - if keyType != "" { - configVals = append(configVals, ConfigValue{ - Class: "tls.key_type", - Value: keyType, - }) - } - - // on-demand TLS - if onDemand { - configVals = append(configVals, ConfigValue{ - Class: "tls.on_demand", - Value: true, - }) - } - - // custom certificate selection - if len(certSelector.AnyTag) > 0 { - cp.CertSelection = &certSelector - } - - // connection policy -- always add one, to ensure that TLS - // is enabled, because this directive was used (this is - // needed, for instance, when a site block has a key of - // just ":5000" - i.e. no hostname, and only on-demand TLS - // is enabled) - configVals = append(configVals, ConfigValue{ - Class: "tls.connection_policy", - Value: cp, - }) - - return configVals, nil -} - -// parseRoot parses the root directive. Syntax: -// -// root [] -// -func parseRoot(h Helper) (caddyhttp.MiddlewareHandler, error) { - var root string - for h.Next() { - if !h.NextArg() { - return nil, h.ArgErr() - } - root = h.Val() - if h.NextArg() { - return nil, h.ArgErr() - } - } - return caddyhttp.VarsMiddleware{"root": root}, nil -} - -// parseRedir parses the redir directive. Syntax: -// -// redir [] [] -// -func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) { - if !h.Next() { - return nil, h.ArgErr() - } - - if !h.NextArg() { - return nil, h.ArgErr() - } - to := h.Val() - - var code string - if h.NextArg() { - code = h.Val() - } - - var body string - switch code { - case "permanent": - code = "301" - case "temporary", "": - code = "302" - case "html": - // Script tag comes first since that will better imitate a redirect in the browser's - // history, but the meta tag is a fallback for most non-JS clients. - const metaRedir = ` - - - Redirecting... - - - - Redirecting to %s... - -` - safeTo := html.EscapeString(to) - body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo) - code = "302" - default: - codeInt, err := strconv.Atoi(code) - if err != nil { - return nil, h.Errf("Not a supported redir code type or not valid integer: '%s'", code) - } - if codeInt < 300 || codeInt > 399 { - return nil, h.Errf("Redir code not in the 3xx range: '%v'", codeInt) - } - } - - return caddyhttp.StaticResponse{ - StatusCode: caddyhttp.WeakString(code), - Headers: http.Header{"Location": []string{to}}, - Body: body, - }, nil -} - -// parseRespond parses the respond directive. -func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) { - sr := new(caddyhttp.StaticResponse) - err := sr.UnmarshalCaddyfile(h.Dispenser) - if err != nil { - return nil, err - } - return sr, nil -} - -// parseAbort parses the abort directive. -func parseAbort(h Helper) (caddyhttp.MiddlewareHandler, error) { - h.Next() // consume directive - for h.Next() || h.NextBlock(0) { - return nil, h.ArgErr() - } - return &caddyhttp.StaticResponse{Abort: true}, nil -} - -// parseError parses the error directive. -func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) { - se := new(caddyhttp.StaticError) - err := se.UnmarshalCaddyfile(h.Dispenser) - if err != nil { - return nil, err - } - return se, nil -} - -// parseRoute parses the route directive. -func parseRoute(h Helper) (caddyhttp.MiddlewareHandler, error) { - sr := new(caddyhttp.Subroute) - - allResults, err := parseSegmentAsConfig(h) - if err != nil { - return nil, err - } - - for _, result := range allResults { - switch handler := result.Value.(type) { - case caddyhttp.Route: - sr.Routes = append(sr.Routes, handler) - case caddyhttp.Subroute: - // directives which return a literal subroute instead of a route - // means they intend to keep those handlers together without - // them being reordered; we're doing that anyway since we're in - // the route directive, so just append its handlers - sr.Routes = append(sr.Routes, handler.Routes...) - default: - return nil, h.Errf("%s directive returned something other than an HTTP route or subroute: %#v (only handler directives can be used in routes)", result.directive, result.Value) - } - } - - return sr, nil -} - -func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) { - return ParseSegmentAsSubroute(h) -} - -func parseHandleErrors(h Helper) ([]ConfigValue, error) { - subroute, err := ParseSegmentAsSubroute(h) - if err != nil { - return nil, err - } - return []ConfigValue{ - { - Class: "error_route", - Value: subroute, - }, - }, nil -} - -// parseLog parses the log directive. Syntax: -// -// log { -// output ... -// format ... -// level -// } -// -func parseLog(h Helper) ([]ConfigValue, error) { - return parseLogHelper(h, nil) -} - -// parseLogHelper is used both for the parseLog directive within Server Blocks, -// as well as the global "log" option for configuring loggers at the global -// level. The parseAsGlobalOption parameter is used to distinguish any differing logic -// between the two. -func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) { - // When the globalLogNames parameter is passed in, we make - // modifications to the parsing behavior. - parseAsGlobalOption := globalLogNames != nil - - var configValues []ConfigValue - for h.Next() { - // Logic below expects that a name is always present when a - // global option is being parsed. - var globalLogName string - if parseAsGlobalOption { - if h.NextArg() { - globalLogName = h.Val() - - // Only a single argument is supported. - if h.NextArg() { - return nil, h.ArgErr() - } - } else { - // If there is no log name specified, we - // reference the default logger. See the - // setupNewDefault function in the logging - // package for where this is configured. - globalLogName = "default" - } - - // Verify this name is unused. - _, used := globalLogNames[globalLogName] - if used { - return nil, h.Err("duplicate global log option for: " + globalLogName) - } - globalLogNames[globalLogName] = struct{}{} - } else { - // No arguments are supported for the server block log directive - if h.NextArg() { - return nil, h.ArgErr() - } - } - - cl := new(caddy.CustomLog) - - for h.NextBlock(0) { - switch h.Val() { - case "output": - if !h.NextArg() { - return nil, h.ArgErr() - } - moduleName := h.Val() - - // can't use the usual caddyfile.Unmarshaler flow with the - // standard writers because they are in the caddy package - // (because they are the default) and implementing that - // interface there would unfortunately create circular import - var wo caddy.WriterOpener - switch moduleName { - case "stdout": - wo = caddy.StdoutWriter{} - case "stderr": - wo = caddy.StderrWriter{} - case "discard": - wo = caddy.DiscardWriter{} - default: - modID := "caddy.logging.writers." + moduleName - unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID) - if err != nil { - return nil, err - } - var ok bool - wo, ok = unm.(caddy.WriterOpener) - if !ok { - return nil, h.Errf("module %s (%T) is not a WriterOpener", modID, unm) - } - } - cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings) - - case "format": - if !h.NextArg() { - return nil, h.ArgErr() - } - moduleName := h.Val() - moduleID := "caddy.logging.encoders." + moduleName - unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID) - if err != nil { - return nil, err - } - enc, ok := unm.(zapcore.Encoder) - if !ok { - return nil, h.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm) - } - cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings) - - case "level": - if !h.NextArg() { - return nil, h.ArgErr() - } - cl.Level = h.Val() - if h.NextArg() { - return nil, h.ArgErr() - } - - case "include": - // This configuration is only allowed in the global options - if !parseAsGlobalOption { - return nil, h.ArgErr() - } - for h.NextArg() { - cl.Include = append(cl.Include, h.Val()) - } - - case "exclude": - // This configuration is only allowed in the global options - if !parseAsGlobalOption { - return nil, h.ArgErr() - } - for h.NextArg() { - cl.Exclude = append(cl.Exclude, h.Val()) - } - - default: - return nil, h.Errf("unrecognized subdirective: %s", h.Val()) - } - } - - var val namedCustomLog - // Skip handling of empty logging configs - if !reflect.DeepEqual(cl, new(caddy.CustomLog)) { - if parseAsGlobalOption { - // Use indicated name for global log options - val.name = globalLogName - val.log = cl - } else { - // Construct a log name for server log streams - logCounter, ok := h.State["logCounter"].(int) - if !ok { - logCounter = 0 - } - val.name = fmt.Sprintf("log%d", logCounter) - cl.Include = []string{"http.log.access." + val.name} - val.log = cl - logCounter++ - h.State["logCounter"] = logCounter - } - } - configValues = append(configValues, ConfigValue{ - Class: "custom_log", - Value: val, - }) - } - return configValues, nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/directives.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/directives.go deleted file mode 100644 index 360f91e7..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/directives.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "encoding/json" - "net" - "sort" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" -) - -// directiveOrder specifies the order -// to apply directives in HTTP routes. -// -// The root directive goes first in case rewrites or -// redirects depend on existence of files, i.e. the -// file matcher, which must know the root first. -// -// The header directive goes second so that headers -// can be manipulated before doing redirects. -var directiveOrder = []string{ - "map", - "root", - - "header", - "request_body", - - "redir", - - // URI manipulation - "rewrite", - "uri", - "try_files", - - // middleware handlers; some wrap responses - "basicauth", - "request_header", - "encode", - "push", - "templates", - - // special routing & dispatching directives - "handle", - "handle_path", - "route", - - // handlers that typically respond to requests - "abort", - "error", - "respond", - "metrics", - "reverse_proxy", - "php_fastcgi", - "file_server", - "acme_server", -} - -// directiveIsOrdered returns true if dir is -// a known, ordered (sorted) directive. -func directiveIsOrdered(dir string) bool { - for _, d := range directiveOrder { - if d == dir { - return true - } - } - return false -} - -// RegisterDirective registers a unique directive dir with an -// associated unmarshaling (setup) function. When directive dir -// is encountered in a Caddyfile, setupFunc will be called to -// unmarshal its tokens. -func RegisterDirective(dir string, setupFunc UnmarshalFunc) { - if _, ok := registeredDirectives[dir]; ok { - panic("directive " + dir + " already registered") - } - registeredDirectives[dir] = setupFunc -} - -// RegisterHandlerDirective is like RegisterDirective, but for -// directives which specifically output only an HTTP handler. -// Directives registered with this function will always have -// an optional matcher token as the first argument. -func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) { - RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) { - if !h.Next() { - return nil, h.ArgErr() - } - - matcherSet, err := h.ExtractMatcherSet() - if err != nil { - return nil, err - } - - val, err := setupFunc(h) - if err != nil { - return nil, err - } - - return h.NewRoute(matcherSet, val), nil - }) -} - -// RegisterGlobalOption registers a unique global option opt with -// an associated unmarshaling (setup) function. When the global -// option opt is encountered in a Caddyfile, setupFunc will be -// called to unmarshal its tokens. -func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) { - if _, ok := registeredGlobalOptions[opt]; ok { - panic("global option " + opt + " already registered") - } - registeredGlobalOptions[opt] = setupFunc -} - -// Helper is a type which helps setup a value from -// Caddyfile tokens. -type Helper struct { - *caddyfile.Dispenser - // State stores intermediate variables during caddyfile adaptation. - State map[string]interface{} - options map[string]interface{} - warnings *[]caddyconfig.Warning - matcherDefs map[string]caddy.ModuleMap - parentBlock caddyfile.ServerBlock - groupCounter counter -} - -// Option gets the option keyed by name. -func (h Helper) Option(name string) interface{} { - return h.options[name] -} - -// Caddyfiles returns the list of config files from -// which tokens in the current server block were loaded. -func (h Helper) Caddyfiles() []string { - // first obtain set of names of files involved - // in this server block, without duplicates - files := make(map[string]struct{}) - for _, segment := range h.parentBlock.Segments { - for _, token := range segment { - files[token.File] = struct{}{} - } - } - // then convert the set into a slice - filesSlice := make([]string, 0, len(files)) - for file := range files { - filesSlice = append(filesSlice, file) - } - return filesSlice -} - -// JSON converts val into JSON. Any errors are added to warnings. -func (h Helper) JSON(val interface{}) json.RawMessage { - return caddyconfig.JSON(val, h.warnings) -} - -// MatcherToken assumes the next argument token is (possibly) a matcher, -// and if so, returns the matcher set along with a true value. If the next -// token is not a matcher, nil and false is returned. Note that a true -// value may be returned with a nil matcher set if it is a catch-all. -func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) { - if !h.NextArg() { - return nil, false, nil - } - return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings) -} - -// ExtractMatcherSet is like MatcherToken, except this is a higher-level -// method that returns the matcher set described by the matcher token, -// or nil if there is none, and deletes the matcher token from the -// dispenser and resets it as if this look-ahead never happened. Useful -// when wrapping a route (one or more handlers) in a user-defined matcher. -func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) { - matcherSet, hasMatcher, err := h.MatcherToken() - if err != nil { - return nil, err - } - if hasMatcher { - // strip matcher token; we don't need to - // use the return value here because a - // new dispenser should have been made - // solely for this directive's tokens, - // with no other uses of same slice - h.Dispenser.Delete() - } - h.Dispenser.Reset() // pretend this lookahead never happened - return matcherSet, nil -} - -// NewRoute returns config values relevant to creating a new HTTP route. -func (h Helper) NewRoute(matcherSet caddy.ModuleMap, - handler caddyhttp.MiddlewareHandler) []ConfigValue { - mod, err := caddy.GetModule(caddy.GetModuleID(handler)) - if err != nil { - *h.warnings = append(*h.warnings, caddyconfig.Warning{ - File: h.File(), - Line: h.Line(), - Message: err.Error(), - }) - return nil - } - var matcherSetsRaw []caddy.ModuleMap - if matcherSet != nil { - matcherSetsRaw = append(matcherSetsRaw, matcherSet) - } - return []ConfigValue{ - { - Class: "route", - Value: caddyhttp.Route{ - MatcherSetsRaw: matcherSetsRaw, - HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)}, - }, - }, - } -} - -// GroupRoutes adds the routes (caddyhttp.Route type) in vals to the -// same group, if there is more than one route in vals. -func (h Helper) GroupRoutes(vals []ConfigValue) { - // ensure there's at least two routes; group of one is pointless - var count int - for _, v := range vals { - if _, ok := v.Value.(caddyhttp.Route); ok { - count++ - if count > 1 { - break - } - } - } - if count < 2 { - return - } - - // now that we know the group will have some effect, do it - groupName := h.groupCounter.nextGroup() - for i := range vals { - if route, ok := vals[i].Value.(caddyhttp.Route); ok { - route.Group = groupName - vals[i].Value = route - } - } -} - -// NewBindAddresses returns config values relevant to adding -// listener bind addresses to the config. -func (h Helper) NewBindAddresses(addrs []string) []ConfigValue { - return []ConfigValue{{Class: "bind", Value: addrs}} -} - -// WithDispenser returns a new instance based on d. All others Helper -// fields are copied, so typically maps are shared with this new instance. -func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper { - h.Dispenser = d - return h -} - -// ParseSegmentAsSubroute parses the segment such that its subdirectives -// are themselves treated as directives, from which a subroute is built -// and returned. -func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) { - allResults, err := parseSegmentAsConfig(h) - if err != nil { - return nil, err - } - - return buildSubroute(allResults, h.groupCounter) -} - -// parseSegmentAsConfig parses the segment such that its subdirectives -// are themselves treated as directives, including named matcher definitions, -// and the raw Config structs are returned. -func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) { - var allResults []ConfigValue - - for h.Next() { - // don't allow non-matcher args on the first line - if h.NextArg() { - return nil, h.ArgErr() - } - - // slice the linear list of tokens into top-level segments - var segments []caddyfile.Segment - for nesting := h.Nesting(); h.NextBlock(nesting); { - segments = append(segments, h.NextSegment()) - } - - // copy existing matcher definitions so we can augment - // new ones that are defined only in this scope - matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs)) - for key, val := range h.matcherDefs { - matcherDefs[key] = val - } - - // find and extract any embedded matcher definitions in this scope - for i := 0; i < len(segments); i++ { - seg := segments[i] - if strings.HasPrefix(seg.Directive(), matcherPrefix) { - // parse, then add the matcher to matcherDefs - err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs) - if err != nil { - return nil, err - } - // remove the matcher segment (consumed), then step back the loop - segments = append(segments[:i], segments[i+1:]...) - i-- - } - } - - // with matchers ready to go, evaluate each directive's segment - for _, seg := range segments { - dir := seg.Directive() - dirFunc, ok := registeredDirectives[dir] - if !ok { - return nil, h.Errf("unrecognized directive: %s - are you sure your Caddyfile structure (nesting and braces) is correct?", dir) - } - - subHelper := h - subHelper.Dispenser = caddyfile.NewDispenser(seg) - subHelper.matcherDefs = matcherDefs - - results, err := dirFunc(subHelper) - if err != nil { - return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err) - } - for _, result := range results { - result.directive = dir - allResults = append(allResults, result) - } - } - } - - return allResults, nil -} - -// ConfigValue represents a value to be added to the final -// configuration, or a value to be consulted when building -// the final configuration. -type ConfigValue struct { - // The kind of value this is. As the config is - // being built, the adapter will look in the - // "pile" for values belonging to a certain - // class when it is setting up a certain part - // of the config. The associated value will be - // type-asserted and placed accordingly. - Class string - - // The value to be used when building the config. - // Generally its type is associated with the - // name of the Class. - Value interface{} - - directive string -} - -func sortRoutes(routes []ConfigValue) { - dirPositions := make(map[string]int) - for i, dir := range directiveOrder { - dirPositions[dir] = i - } - - sort.SliceStable(routes, func(i, j int) bool { - // if the directives are different, just use the established directive order - iDir, jDir := routes[i].directive, routes[j].directive - if iDir != jDir { - return dirPositions[iDir] < dirPositions[jDir] - } - - // directives are the same; sub-sort by path matcher length if there's - // only one matcher set and one path (this is a very common case and - // usually -- but not always -- helpful/expected, oh well; user can - // always take manual control of order using handler or route blocks) - iRoute, ok := routes[i].Value.(caddyhttp.Route) - if !ok { - return false - } - jRoute, ok := routes[j].Value.(caddyhttp.Route) - if !ok { - return false - } - - // decode the path matchers, if there is just one of them - var iPM, jPM caddyhttp.MatchPath - if len(iRoute.MatcherSetsRaw) == 1 { - _ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM) - } - if len(jRoute.MatcherSetsRaw) == 1 { - _ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM) - } - - // sort by longer path (more specific) first; missing path - // matchers or multi-matchers are treated as zero-length paths - var iPathLen, jPathLen int - if len(iPM) > 0 { - iPathLen = len(iPM[0]) - } - if len(jPM) > 0 { - jPathLen = len(jPM[0]) - } - - // if both directives have no path matcher, use whichever one - // has any kind of matcher defined first. - if iPathLen == 0 && jPathLen == 0 { - return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0 - } - - // sort with the most-specific (longest) path first - return iPathLen > jPathLen - }) -} - -// serverBlock pairs a Caddyfile server block with -// a "pile" of config values, keyed by class name, -// as well as its parsed keys for convenience. -type serverBlock struct { - block caddyfile.ServerBlock - pile map[string][]ConfigValue // config values obtained from directives - keys []Address -} - -// hostsFromKeys returns a list of all the non-empty hostnames found in -// the keys of the server block sb. If logger mode is false, a key with -// an empty hostname portion will return an empty slice, since that -// server block is interpreted to effectively match all hosts. An empty -// string is never added to the slice. -// -// If loggerMode is true, then the non-standard ports of keys will be -// joined to the hostnames. This is to effectively match the Host -// header of requests that come in for that key. -// -// The resulting slice is not sorted but will never have duplicates. -func (sb serverBlock) hostsFromKeys(loggerMode bool) []string { - // ensure each entry in our list is unique - hostMap := make(map[string]struct{}) - for _, addr := range sb.keys { - if addr.Host == "" { - if !loggerMode { - // server block contains a key like ":443", i.e. the host portion - // is empty / catch-all, which means to match all hosts - return []string{} - } - // never append an empty string - continue - } - if loggerMode && - addr.Port != "" && - addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort) && - addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort) { - hostMap[net.JoinHostPort(addr.Host, addr.Port)] = struct{}{} - } else { - hostMap[addr.Host] = struct{}{} - } - } - - // convert map to slice - sblockHosts := make([]string, 0, len(hostMap)) - for host := range hostMap { - sblockHosts = append(sblockHosts, host) - } - - return sblockHosts -} - -func (sb serverBlock) hostsFromKeysNotHTTP(httpPort string) []string { - // ensure each entry in our list is unique - hostMap := make(map[string]struct{}) - for _, addr := range sb.keys { - if addr.Host == "" { - continue - } - if addr.Scheme != "http" && addr.Port != httpPort { - hostMap[addr.Host] = struct{}{} - } - } - - // convert map to slice - sblockHosts := make([]string, 0, len(hostMap)) - for host := range hostMap { - sblockHosts = append(sblockHosts, host) - } - - return sblockHosts -} - -// hasHostCatchAllKey returns true if sb has a key that -// omits a host portion, i.e. it "catches all" hosts. -func (sb serverBlock) hasHostCatchAllKey() bool { - for _, addr := range sb.keys { - if addr.Host == "" { - return true - } - } - return false -} - -type ( - // UnmarshalFunc is a function which can unmarshal Caddyfile - // tokens into zero or more config values using a Helper type. - // These are passed in a call to RegisterDirective. - UnmarshalFunc func(h Helper) ([]ConfigValue, error) - - // UnmarshalHandlerFunc is like UnmarshalFunc, except the - // output of the unmarshaling is an HTTP handler. This - // function does not need to deal with HTTP request matching - // which is abstracted away. Since writing HTTP handlers - // with Caddyfile support is very common, this is a more - // convenient way to add a handler to the chain since a lot - // of the details common to HTTP handlers are taken care of - // for you. These are passed to a call to - // RegisterHandlerDirective. - UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error) - - // UnmarshalGlobalFunc is a function which can unmarshal Caddyfile - // tokens from a global option. It is passed the tokens to parse and - // existing value from the previous instance of this global option - // (if any). It returns the value to associate with this global option. - UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal interface{}) (interface{}, error) -) - -var registeredDirectives = make(map[string]UnmarshalFunc) - -var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/httptype.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/httptype.go deleted file mode 100644 index e5dafe6a..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/httptype.go +++ /dev/null @@ -1,1352 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/caddy/v2/modules/caddypki" - "github.com/caddyserver/caddy/v2/modules/caddytls" -) - -func init() { - caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}}) -} - -// App represents the configuration for a non-standard -// Caddy app module (e.g. third-party plugin) which was -// parsed from a global options block. -type App struct { - // The JSON key for the app being configured - Name string - - // The raw app config as JSON - Value json.RawMessage -} - -// ServerType can set up a config from an HTTP Caddyfile. -type ServerType struct { -} - -// Setup makes a config from the tokens. -func (st ServerType) Setup(inputServerBlocks []caddyfile.ServerBlock, - options map[string]interface{}) (*caddy.Config, []caddyconfig.Warning, error) { - var warnings []caddyconfig.Warning - gc := counter{new(int)} - state := make(map[string]interface{}) - - // load all the server blocks and associate them with a "pile" - // of config values; also prohibit duplicate keys because they - // can make a config confusing if more than one server block is - // chosen to handle a request - we actually will make each - // server block's route terminal so that only one will run - sbKeys := make(map[string]struct{}) - originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks)) - for i, sblock := range inputServerBlocks { - for j, k := range sblock.Keys { - if j == 0 && strings.HasPrefix(k, "@") { - return nil, warnings, fmt.Errorf("cannot define a matcher outside of a site block: '%s'", k) - } - if _, ok := sbKeys[k]; ok { - return nil, warnings, fmt.Errorf("duplicate site address not allowed: '%s' in %v (site block %d, key %d)", k, sblock.Keys, i, j) - } - sbKeys[k] = struct{}{} - } - originalServerBlocks = append(originalServerBlocks, serverBlock{ - block: sblock, - pile: make(map[string][]ConfigValue), - }) - } - - // apply any global options - var err error - originalServerBlocks, err = st.evaluateGlobalOptionsBlock(originalServerBlocks, options) - if err != nil { - return nil, warnings, err - } - - // replace shorthand placeholders (which are - // convenient when writing a Caddyfile) with - // their actual placeholder identifiers or - // variable names - replacer := strings.NewReplacer( - "{dir}", "{http.request.uri.path.dir}", - "{file}", "{http.request.uri.path.file}", - "{host}", "{http.request.host}", - "{hostport}", "{http.request.hostport}", - "{port}", "{http.request.port}", - "{method}", "{http.request.method}", - "{path}", "{http.request.uri.path}", - "{query}", "{http.request.uri.query}", - "{remote}", "{http.request.remote}", - "{remote_host}", "{http.request.remote.host}", - "{remote_port}", "{http.request.remote.port}", - "{scheme}", "{http.request.scheme}", - "{uri}", "{http.request.uri}", - "{tls_cipher}", "{http.request.tls.cipher_suite}", - "{tls_version}", "{http.request.tls.version}", - "{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}", - "{tls_client_issuer}", "{http.request.tls.client.issuer}", - "{tls_client_serial}", "{http.request.tls.client.serial}", - "{tls_client_subject}", "{http.request.tls.client.subject}", - "{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}", - "{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}", - ) - - // these are placeholders that allow a user-defined final - // parameters, but we still want to provide a shorthand - // for those, so we use a regexp to replace - regexpReplacements := []struct { - search *regexp.Regexp - replace string - }{ - {regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"}, - {regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"}, - {regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"}, - {regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"}, - {regexp.MustCompile(`{re\.([\w-]*)\.([\w-]*)}`), "{http.regexp.$1.$2}"}, - } - - for _, sb := range originalServerBlocks { - for _, segment := range sb.block.Segments { - for i := 0; i < len(segment); i++ { - // simple string replacements - segment[i].Text = replacer.Replace(segment[i].Text) - // complex regexp replacements - for _, r := range regexpReplacements { - segment[i].Text = r.search.ReplaceAllString(segment[i].Text, r.replace) - } - } - } - - if len(sb.block.Keys) == 0 { - return nil, warnings, fmt.Errorf("server block without any key is global configuration, and if used, it must be first") - } - - // extract matcher definitions - matcherDefs := make(map[string]caddy.ModuleMap) - for _, segment := range sb.block.Segments { - if dir := segment.Directive(); strings.HasPrefix(dir, matcherPrefix) { - d := sb.block.DispenseDirective(dir) - err := parseMatcherDefinitions(d, matcherDefs) - if err != nil { - return nil, warnings, err - } - } - } - - // evaluate each directive ("segment") in this block - for _, segment := range sb.block.Segments { - dir := segment.Directive() - - if strings.HasPrefix(dir, matcherPrefix) { - // matcher definitions were pre-processed - continue - } - - dirFunc, ok := registeredDirectives[dir] - if !ok { - tkn := segment[0] - message := "%s:%d: unrecognized directive: %s" - if !sb.block.HasBraces { - message += "\nDid you mean to define a second site? If so, you must use curly braces around each site to separate their configurations." - } - return nil, warnings, fmt.Errorf(message, tkn.File, tkn.Line, dir) - } - - h := Helper{ - Dispenser: caddyfile.NewDispenser(segment), - options: options, - warnings: &warnings, - matcherDefs: matcherDefs, - parentBlock: sb.block, - groupCounter: gc, - State: state, - } - - results, err := dirFunc(h) - if err != nil { - return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err) - } - - // As a special case, we want "handle_path" to be sorted - // at the same level as "handle", so we force them to use - // the same directive name after their parsing is complete. - // See https://github.com/caddyserver/caddy/issues/3675#issuecomment-678042377 - if dir == "handle_path" { - dir = "handle" - } - - for _, result := range results { - result.directive = dir - sb.pile[result.Class] = append(sb.pile[result.Class], result) - } - } - } - - // map - sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks, options) - if err != nil { - return nil, warnings, err - } - - // reduce - pairings := st.consolidateAddrMappings(sbmap) - - // each pairing of listener addresses to list of server - // blocks is basically a server definition - servers, err := st.serversFromPairings(pairings, options, &warnings, gc) - if err != nil { - return nil, warnings, err - } - - // now that each server is configured, make the HTTP app - httpApp := caddyhttp.App{ - HTTPPort: tryInt(options["http_port"], &warnings), - HTTPSPort: tryInt(options["https_port"], &warnings), - GracePeriod: tryDuration(options["grace_period"], &warnings), - Servers: servers, - } - - // then make the TLS app - tlsApp, warnings, err := st.buildTLSApp(pairings, options, warnings) - if err != nil { - return nil, warnings, err - } - - // then make the PKI app - pkiApp, warnings, err := st.buildPKIApp(pairings, options, warnings) - if err != nil { - return nil, warnings, err - } - - // extract any custom logs, and enforce configured levels - var customLogs []namedCustomLog - var hasDefaultLog bool - addCustomLog := func(ncl namedCustomLog) { - if ncl.name == "" { - return - } - if ncl.name == "default" { - hasDefaultLog = true - } - if _, ok := options["debug"]; ok && ncl.log.Level == "" { - ncl.log.Level = "DEBUG" - } - customLogs = append(customLogs, ncl) - } - // Apply global log options, when set - if options["log"] != nil { - for _, logValue := range options["log"].([]ConfigValue) { - addCustomLog(logValue.Value.(namedCustomLog)) - } - } - // Apply server-specific log options - for _, p := range pairings { - for _, sb := range p.serverBlocks { - for _, clVal := range sb.pile["custom_log"] { - addCustomLog(clVal.Value.(namedCustomLog)) - } - } - } - - if !hasDefaultLog { - // if the default log was not customized, ensure we - // configure it with any applicable options - if _, ok := options["debug"]; ok { - customLogs = append(customLogs, namedCustomLog{ - name: "default", - log: &caddy.CustomLog{Level: "DEBUG"}, - }) - } - } - - // annnd the top-level config, then we're done! - cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)} - - // loop through the configured options, and if any of - // them are an httpcaddyfile App, then we insert them - // into the config as raw Caddy apps - for _, opt := range options { - if app, ok := opt.(App); ok { - cfg.AppsRaw[app.Name] = app.Value - } - } - - // insert the standard Caddy apps into the config - if len(httpApp.Servers) > 0 { - cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings) - } - if !reflect.DeepEqual(tlsApp, &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) { - cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings) - } - if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) { - cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings) - } - if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok { - cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr, - "module", - storageCvtr.(caddy.Module).CaddyModule().ID.Name(), - &warnings) - } - if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil { - cfg.Admin = adminConfig - } - if len(customLogs) > 0 { - if cfg.Logging == nil { - cfg.Logging = &caddy.Logging{ - Logs: make(map[string]*caddy.CustomLog), - } - } - for _, ncl := range customLogs { - if ncl.name != "" { - cfg.Logging.Logs[ncl.name] = ncl.log - } - // most users seem to prefer not writing access logs - // to the default log when they are directed to a - // file or have any other special customization - if ncl.name != "default" && len(ncl.log.Include) > 0 { - defaultLog, ok := cfg.Logging.Logs["default"] - if !ok { - defaultLog = new(caddy.CustomLog) - cfg.Logging.Logs["default"] = defaultLog - } - defaultLog.Exclude = append(defaultLog.Exclude, ncl.log.Include...) - } - } - } - - return cfg, warnings, nil -} - -// evaluateGlobalOptionsBlock evaluates the global options block, -// which is expected to be the first server block if it has zero -// keys. It returns the updated list of server blocks with the -// global options block removed, and updates options accordingly. -func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]interface{}) ([]serverBlock, error) { - if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 { - return serverBlocks, nil - } - - for _, segment := range serverBlocks[0].block.Segments { - opt := segment.Directive() - var val interface{} - var err error - disp := caddyfile.NewDispenser(segment) - - optFunc, ok := registeredGlobalOptions[opt] - if !ok { - tkn := segment[0] - return nil, fmt.Errorf("%s:%d: unrecognized global option: %s", tkn.File, tkn.Line, opt) - } - - val, err = optFunc(disp, options[opt]) - if err != nil { - return nil, fmt.Errorf("parsing caddyfile tokens for '%s': %v", opt, err) - } - - // As a special case, fold multiple "servers" options together - // in an array instead of overwriting a possible existing value - if opt == "servers" { - existingOpts, ok := options[opt].([]serverOptions) - if !ok { - existingOpts = []serverOptions{} - } - serverOpts, ok := val.(serverOptions) - if !ok { - return nil, fmt.Errorf("unexpected type from 'servers' global options: %T", val) - } - options[opt] = append(existingOpts, serverOpts) - continue - } - // Additionally, fold multiple "log" options together into an - // array so that multiple loggers can be configured. - if opt == "log" { - existingOpts, ok := options[opt].([]ConfigValue) - if !ok { - existingOpts = []ConfigValue{} - } - logOpts, ok := val.([]ConfigValue) - if !ok { - return nil, fmt.Errorf("unexpected type from 'log' global options: %T", val) - } - options[opt] = append(existingOpts, logOpts...) - continue - } - - options[opt] = val - } - - // If we got "servers" options, we'll sort them by their listener address - if serverOpts, ok := options["servers"].([]serverOptions); ok { - sort.Slice(serverOpts, func(i, j int) bool { - return len(serverOpts[i].ListenerAddress) > len(serverOpts[j].ListenerAddress) - }) - - // Reject the config if there are duplicate listener address - seen := make(map[string]bool) - for _, entry := range serverOpts { - if _, alreadySeen := seen[entry.ListenerAddress]; alreadySeen { - return nil, fmt.Errorf("cannot have 'servers' global options with duplicate listener addresses: %s", entry.ListenerAddress) - } - seen[entry.ListenerAddress] = true - } - } - - return serverBlocks[1:], nil -} - -// serversFromPairings creates the servers for each pairing of addresses -// to server blocks. Each pairing is essentially a server definition. -func (st *ServerType) serversFromPairings( - pairings []sbAddrAssociation, - options map[string]interface{}, - warnings *[]caddyconfig.Warning, - groupCounter counter, -) (map[string]*caddyhttp.Server, error) { - servers := make(map[string]*caddyhttp.Server) - defaultSNI := tryString(options["default_sni"], warnings) - - httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) - if hp, ok := options["http_port"].(int); ok { - httpPort = strconv.Itoa(hp) - } - httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort) - if hsp, ok := options["https_port"].(int); ok { - httpsPort = strconv.Itoa(hsp) - } - autoHTTPS := "on" - if ah, ok := options["auto_https"].(string); ok { - autoHTTPS = ah - } - - for i, p := range pairings { - srv := &caddyhttp.Server{ - Listen: p.addresses, - } - - // handle the auto_https global option - if autoHTTPS != "on" { - srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) - if autoHTTPS == "off" { - srv.AutoHTTPS.Disabled = true - } - if autoHTTPS == "disable_redirects" { - srv.AutoHTTPS.DisableRedir = true - } - if autoHTTPS == "ignore_loaded_certs" { - srv.AutoHTTPS.IgnoreLoadedCerts = true - } - } - - // sort server blocks by their keys; this is important because - // only the first matching site should be evaluated, and we should - // attempt to match most specific site first (host and path), in - // case their matchers overlap; we do this somewhat naively by - // descending sort by length of host then path - sort.SliceStable(p.serverBlocks, func(i, j int) bool { - // TODO: we could pre-process the specificities for efficiency, - // but I don't expect many blocks will have THAT many keys... - var iLongestPath, jLongestPath string - var iLongestHost, jLongestHost string - var iWildcardHost, jWildcardHost bool - for _, addr := range p.serverBlocks[i].keys { - if strings.Contains(addr.Host, "*") || addr.Host == "" { - iWildcardHost = true - } - if specificity(addr.Host) > specificity(iLongestHost) { - iLongestHost = addr.Host - } - if specificity(addr.Path) > specificity(iLongestPath) { - iLongestPath = addr.Path - } - } - for _, addr := range p.serverBlocks[j].keys { - if strings.Contains(addr.Host, "*") || addr.Host == "" { - jWildcardHost = true - } - if specificity(addr.Host) > specificity(jLongestHost) { - jLongestHost = addr.Host - } - if specificity(addr.Path) > specificity(jLongestPath) { - jLongestPath = addr.Path - } - } - // catch-all blocks (blocks with no hostname) should always go - // last, even after blocks with wildcard hosts - if specificity(iLongestHost) == 0 { - return false - } - if specificity(jLongestHost) == 0 { - return true - } - if iWildcardHost != jWildcardHost { - // site blocks that have a key with a wildcard in the hostname - // must always be less specific than blocks without one; see - // https://github.com/caddyserver/caddy/issues/3410 - return jWildcardHost && !iWildcardHost - } - if specificity(iLongestHost) == specificity(jLongestHost) { - return len(iLongestPath) > len(jLongestPath) - } - return specificity(iLongestHost) > specificity(jLongestHost) - }) - - var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool - autoHTTPSWillAddConnPolicy := autoHTTPS != "off" - - // if a catch-all server block (one which accepts all hostnames) exists in this pairing, - // we need to know that so that we can configure logs properly (see #3878) - var catchAllSblockExists bool - for _, sblock := range p.serverBlocks { - if len(sblock.hostsFromKeys(false)) == 0 { - catchAllSblockExists = true - } - } - - // if needed, the ServerLogConfig is initialized beforehand so - // that all server blocks can populate it with data, even when not - // coming with a log directive - for _, sblock := range p.serverBlocks { - if len(sblock.pile["custom_log"]) != 0 { - srv.Logs = new(caddyhttp.ServerLogConfig) - break - } - } - - // create a subroute for each site in the server block - for _, sblock := range p.serverBlocks { - matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock) - if err != nil { - return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err) - } - - hosts := sblock.hostsFromKeys(false) - - // emit warnings if user put unspecified IP addresses; they probably want the bind directive - for _, h := range hosts { - if h == "0.0.0.0" || h == "::" { - log.Printf("[WARNING] Site block has unspecified IP address %s which only matches requests having that Host header; you probably want the 'bind' directive to configure the socket", h) - } - } - - // tls: connection policies - if cpVals, ok := sblock.pile["tls.connection_policy"]; ok { - // tls connection policies - for _, cpVal := range cpVals { - cp := cpVal.Value.(*caddytls.ConnectionPolicy) - - // make sure the policy covers all hostnames from the block - for _, h := range hosts { - if h == defaultSNI { - hosts = append(hosts, "") - cp.DefaultSNI = defaultSNI - break - } - } - - if len(hosts) > 0 { - cp.MatchersRaw = caddy.ModuleMap{ - "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones - } - } else { - cp.DefaultSNI = defaultSNI - } - - // only append this policy if it actually changes something - if !cp.SettingsEmpty() { - srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp) - hasCatchAllTLSConnPolicy = len(hosts) == 0 - } - } - } - - for _, addr := range sblock.keys { - // if server only uses HTTPS port, auto-HTTPS will not apply - if listenersUseAnyPortOtherThan(srv.Listen, httpPort) { - // exclude any hosts that were defined explicitly with "http://" - // in the key from automated cert management (issue #2998) - if addr.Scheme == "http" && addr.Host != "" { - if srv.AutoHTTPS == nil { - srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) - } - if !sliceContains(srv.AutoHTTPS.Skip, addr.Host) { - srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host) - } - } - } - - // we'll need to remember if the address qualifies for auto-HTTPS, so we - // can add a TLS conn policy if necessary - if addr.Scheme == "https" || - (addr.Scheme != "http" && addr.Host != "" && addr.Port != httpPort) { - addressQualifiesForTLS = true - } - // predict whether auto-HTTPS will add the conn policy for us; if so, we - // may not need to add one for this server - autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy && - (addr.Port == httpsPort || (addr.Port != httpPort && addr.Host != "")) - } - - // Look for any config values that provide listener wrappers on the server block - for _, listenerConfig := range sblock.pile["listener_wrapper"] { - listenerWrapper, ok := listenerConfig.Value.(caddy.ListenerWrapper) - if !ok { - return nil, fmt.Errorf("config for a listener wrapper did not provide a value that implements caddy.ListenerWrapper") - } - jsonListenerWrapper := caddyconfig.JSONModuleObject( - listenerWrapper, - "wrapper", - listenerWrapper.(caddy.Module).CaddyModule().ID.Name(), - warnings) - srv.ListenerWrappersRaw = append(srv.ListenerWrappersRaw, jsonListenerWrapper) - } - - // set up each handler directive, making sure to honor directive order - dirRoutes := sblock.pile["route"] - siteSubroute, err := buildSubroute(dirRoutes, groupCounter) - if err != nil { - return nil, err - } - - // add the site block's route(s) to the server - srv.Routes = appendSubrouteToRouteList(srv.Routes, siteSubroute, matcherSetsEnc, p, warnings) - - // if error routes are defined, add those too - if errorSubrouteVals, ok := sblock.pile["error_route"]; ok { - if srv.Errors == nil { - srv.Errors = new(caddyhttp.HTTPErrorConfig) - } - for _, val := range errorSubrouteVals { - sr := val.Value.(*caddyhttp.Subroute) - srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, sr, matcherSetsEnc, p, warnings) - } - } - - // add log associations - // see https://github.com/caddyserver/caddy/issues/3310 - sblockLogHosts := sblock.hostsFromKeys(true) - for _, cval := range sblock.pile["custom_log"] { - ncl := cval.Value.(namedCustomLog) - if sblock.hasHostCatchAllKey() { - // all requests for hosts not able to be listed should use - // this log because it's a catch-all-hosts server block - srv.Logs.DefaultLoggerName = ncl.name - } else { - // map each host to the user's desired logger name - for _, h := range sblockLogHosts { - // if the custom logger name is non-empty, add it to the map; - // otherwise, only map to an empty logger name if this or - // another site block on this server has a catch-all host (in - // which case only requests with mapped hostnames will be - // access-logged, so it'll be necessary to add them to the - // map even if they use default logger) - if ncl.name != "" || catchAllSblockExists { - if srv.Logs.LoggerNames == nil { - srv.Logs.LoggerNames = make(map[string]string) - } - srv.Logs.LoggerNames[h] = ncl.name - } - } - } - } - if srv.Logs != nil && len(sblock.pile["custom_log"]) == 0 { - // server has access logs enabled, but this server block does not - // enable access logs; therefore, all hosts of this server block - // should not be access-logged - if len(hosts) == 0 { - // if the server block has a catch-all-hosts key, then we should - // not log reqs to any host unless it appears in the map - srv.Logs.SkipUnmappedHosts = true - } - srv.Logs.SkipHosts = append(srv.Logs.SkipHosts, sblockLogHosts...) - } - } - - // a server cannot (natively) serve both HTTP and HTTPS at the - // same time, so make sure the configuration isn't in conflict - err := detectConflictingSchemes(srv, p.serverBlocks, options) - if err != nil { - return nil, err - } - - // a catch-all TLS conn policy is necessary to ensure TLS can - // be offered to all hostnames of the server; even though only - // one policy is needed to enable TLS for the server, that - // policy might apply to only certain TLS handshakes; but when - // using the Caddyfile, user would expect all handshakes to at - // least have a matching connection policy, so here we append a - // catch-all/default policy if there isn't one already (it's - // important that it goes at the end) - see issue #3004: - // https://github.com/caddyserver/caddy/issues/3004 - // TODO: maybe a smarter way to handle this might be to just make the - // auto-HTTPS logic at provision-time detect if there is any connection - // policy missing for any HTTPS-enabled hosts, if so, add it... maybe? - if addressQualifiesForTLS && - !hasCatchAllTLSConnPolicy && - (len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "") { - srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{DefaultSNI: defaultSNI}) - } - - // tidy things up a bit - srv.TLSConnPolicies, err = consolidateConnPolicies(srv.TLSConnPolicies) - if err != nil { - return nil, fmt.Errorf("consolidating TLS connection policies for server %d: %v", i, err) - } - srv.Routes = consolidateRoutes(srv.Routes) - - servers[fmt.Sprintf("srv%d", i)] = srv - } - - err := applyServerOptions(servers, options, warnings) - if err != nil { - return nil, err - } - - return servers, nil -} - -func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]interface{}) error { - httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) - if hp, ok := options["http_port"].(int); ok { - httpPort = strconv.Itoa(hp) - } - httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort) - if hsp, ok := options["https_port"].(int); ok { - httpsPort = strconv.Itoa(hsp) - } - - var httpOrHTTPS string - checkAndSetHTTP := func(addr Address) error { - if httpOrHTTPS == "HTTPS" { - errMsg := fmt.Errorf("server listening on %v is configured for HTTPS and cannot natively multiplex HTTP and HTTPS: %s", - srv.Listen, addr.Original) - if addr.Scheme == "" && addr.Host == "" { - errMsg = fmt.Errorf("%s (try specifying https:// in the address)", errMsg) - } - return errMsg - } - if len(srv.TLSConnPolicies) > 0 { - // any connection policies created for an HTTP server - // is a logical conflict, as it would enable HTTPS - return fmt.Errorf("server listening on %v is HTTP, but attempts to configure TLS connection policies", srv.Listen) - } - httpOrHTTPS = "HTTP" - return nil - } - checkAndSetHTTPS := func(addr Address) error { - if httpOrHTTPS == "HTTP" { - return fmt.Errorf("server listening on %v is configured for HTTP and cannot natively multiplex HTTP and HTTPS: %s", - srv.Listen, addr.Original) - } - httpOrHTTPS = "HTTPS" - return nil - } - - for _, sblock := range serverBlocks { - for _, addr := range sblock.keys { - if addr.Scheme == "http" || addr.Port == httpPort { - if err := checkAndSetHTTP(addr); err != nil { - return err - } - } else if addr.Scheme == "https" || addr.Port == httpsPort || len(srv.TLSConnPolicies) > 0 { - if err := checkAndSetHTTPS(addr); err != nil { - return err - } - } else if addr.Host == "" { - if err := checkAndSetHTTP(addr); err != nil { - return err - } - } - } - } - - return nil -} - -// consolidateConnPolicies sorts any catch-all policy to the end, removes empty TLS connection -// policies, and combines equivalent ones for a cleaner overall output. -func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.ConnectionPolicies, error) { - // catch-all policies (those without any matcher) should be at the - // end, otherwise it nullifies any more specific policies - sort.SliceStable(cps, func(i, j int) bool { - return cps[j].MatchersRaw == nil && cps[i].MatchersRaw != nil - }) - - for i := 0; i < len(cps); i++ { - // compare it to the others - for j := 0; j < len(cps); j++ { - if j == i { - continue - } - - // if they're exactly equal in every way, just keep one of them - if reflect.DeepEqual(cps[i], cps[j]) { - cps = append(cps[:j], cps[j+1:]...) - i-- - break - } - - // if they have the same matcher, try to reconcile each field: either they must - // be identical, or we have to be able to combine them safely - if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) { - if len(cps[i].ALPN) > 0 && - len(cps[j].ALPN) > 0 && - !reflect.DeepEqual(cps[i].ALPN, cps[j].ALPN) { - return nil, fmt.Errorf("two policies with same match criteria have conflicting ALPN: %v vs. %v", - cps[i].ALPN, cps[j].ALPN) - } - if len(cps[i].CipherSuites) > 0 && - len(cps[j].CipherSuites) > 0 && - !reflect.DeepEqual(cps[i].CipherSuites, cps[j].CipherSuites) { - return nil, fmt.Errorf("two policies with same match criteria have conflicting cipher suites: %v vs. %v", - cps[i].CipherSuites, cps[j].CipherSuites) - } - if cps[i].ClientAuthentication == nil && - cps[j].ClientAuthentication != nil && - !reflect.DeepEqual(cps[i].ClientAuthentication, cps[j].ClientAuthentication) { - return nil, fmt.Errorf("two policies with same match criteria have conflicting client auth configuration: %+v vs. %+v", - cps[i].ClientAuthentication, cps[j].ClientAuthentication) - } - if len(cps[i].Curves) > 0 && - len(cps[j].Curves) > 0 && - !reflect.DeepEqual(cps[i].Curves, cps[j].Curves) { - return nil, fmt.Errorf("two policies with same match criteria have conflicting curves: %v vs. %v", - cps[i].Curves, cps[j].Curves) - } - if cps[i].DefaultSNI != "" && - cps[j].DefaultSNI != "" && - cps[i].DefaultSNI != cps[j].DefaultSNI { - return nil, fmt.Errorf("two policies with same match criteria have conflicting default SNI: %s vs. %s", - cps[i].DefaultSNI, cps[j].DefaultSNI) - } - if cps[i].ProtocolMin != "" && - cps[j].ProtocolMin != "" && - cps[i].ProtocolMin != cps[j].ProtocolMin { - return nil, fmt.Errorf("two policies with same match criteria have conflicting min protocol: %s vs. %s", - cps[i].ProtocolMin, cps[j].ProtocolMin) - } - if cps[i].ProtocolMax != "" && - cps[j].ProtocolMax != "" && - cps[i].ProtocolMax != cps[j].ProtocolMax { - return nil, fmt.Errorf("two policies with same match criteria have conflicting max protocol: %s vs. %s", - cps[i].ProtocolMax, cps[j].ProtocolMax) - } - if cps[i].CertSelection != nil && cps[j].CertSelection != nil { - // merging fields other than AnyTag is not implemented - if !reflect.DeepEqual(cps[i].CertSelection.SerialNumber, cps[j].CertSelection.SerialNumber) || - !reflect.DeepEqual(cps[i].CertSelection.SubjectOrganization, cps[j].CertSelection.SubjectOrganization) || - cps[i].CertSelection.PublicKeyAlgorithm != cps[j].CertSelection.PublicKeyAlgorithm || - !reflect.DeepEqual(cps[i].CertSelection.AllTags, cps[j].CertSelection.AllTags) { - return nil, fmt.Errorf("two policies with same match criteria have conflicting cert selections: %+v vs. %+v", - cps[i].CertSelection, cps[j].CertSelection) - } - } - - // by now we've decided that we can merge the two -- we'll keep i and drop j - - if len(cps[i].ALPN) == 0 && len(cps[j].ALPN) > 0 { - cps[i].ALPN = cps[j].ALPN - } - if len(cps[i].CipherSuites) == 0 && len(cps[j].CipherSuites) > 0 { - cps[i].CipherSuites = cps[j].CipherSuites - } - if cps[i].ClientAuthentication == nil && cps[j].ClientAuthentication != nil { - cps[i].ClientAuthentication = cps[j].ClientAuthentication - } - if len(cps[i].Curves) == 0 && len(cps[j].Curves) > 0 { - cps[i].Curves = cps[j].Curves - } - if cps[i].DefaultSNI == "" && cps[j].DefaultSNI != "" { - cps[i].DefaultSNI = cps[j].DefaultSNI - } - if cps[i].ProtocolMin == "" && cps[j].ProtocolMin != "" { - cps[i].ProtocolMin = cps[j].ProtocolMin - } - if cps[i].ProtocolMax == "" && cps[j].ProtocolMax != "" { - cps[i].ProtocolMax = cps[j].ProtocolMax - } - - if cps[i].CertSelection == nil && cps[j].CertSelection != nil { - // if j is the only one with a policy, move it over to i - cps[i].CertSelection = cps[j].CertSelection - } else if cps[i].CertSelection != nil && cps[j].CertSelection != nil { - // if both have one, then combine AnyTag - for _, tag := range cps[j].CertSelection.AnyTag { - if !sliceContains(cps[i].CertSelection.AnyTag, tag) { - cps[i].CertSelection.AnyTag = append(cps[i].CertSelection.AnyTag, tag) - } - } - } - - cps = append(cps[:j], cps[j+1:]...) - i-- - break - } - } - } - return cps, nil -} - -// appendSubrouteToRouteList appends the routes in subroute -// to the routeList, optionally qualified by matchers. -func appendSubrouteToRouteList(routeList caddyhttp.RouteList, - subroute *caddyhttp.Subroute, - matcherSetsEnc []caddy.ModuleMap, - p sbAddrAssociation, - warnings *[]caddyconfig.Warning) caddyhttp.RouteList { - - // nothing to do if... there's nothing to do - if len(matcherSetsEnc) == 0 && len(subroute.Routes) == 0 && subroute.Errors == nil { - return routeList - } - - if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 { - // no need to wrap the handlers in a subroute if this is - // the only server block and there is no matcher for it - routeList = append(routeList, subroute.Routes...) - } else { - route := caddyhttp.Route{ - // the semantics of a site block in the Caddyfile dictate - // that only the first matching one is evaluated, since - // site blocks do not cascade nor inherit - Terminal: true, - } - if len(matcherSetsEnc) > 0 { - route.MatcherSetsRaw = matcherSetsEnc - } - if len(subroute.Routes) > 0 || subroute.Errors != nil { - route.HandlersRaw = []json.RawMessage{ - caddyconfig.JSONModuleObject(subroute, "handler", "subroute", warnings), - } - } - if len(route.MatcherSetsRaw) > 0 || len(route.HandlersRaw) > 0 { - routeList = append(routeList, route) - } - } - return routeList -} - -// buildSubroute turns the config values, which are expected to be routes -// into a clean and orderly subroute that has all the routes within it. -func buildSubroute(routes []ConfigValue, groupCounter counter) (*caddyhttp.Subroute, error) { - for _, val := range routes { - if !directiveIsOrdered(val.directive) { - return nil, fmt.Errorf("directive '%s' is not ordered, so it cannot be used here", val.directive) - } - } - - sortRoutes(routes) - - subroute := new(caddyhttp.Subroute) - - // some directives are mutually exclusive (only first matching - // instance should be evaluated); this is done by putting their - // routes in the same group - mutuallyExclusiveDirs := map[string]*struct { - count int - groupName string - }{ - // as a special case, group rewrite directives so that they are mutually exclusive; - // this means that only the first matching rewrite will be evaluated, and that's - // probably a good thing, since there should never be a need to do more than one - // rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites: - // rewrite /docs/json/* /docs/json/index.html - // rewrite /docs/* /docs/index.html - // (We use this on the Caddy website, or at least we did once.) The first rewrite's - // result is also matched by the second rewrite, making the first rewrite pointless. - // See issue #2959. - "rewrite": {}, - - // handle blocks are also mutually exclusive by definition - "handle": {}, - - // root just sets a variable, so if it was not mutually exclusive, intersecting - // root directives would overwrite previously-matched ones; they should not cascade - "root": {}, - } - - // we need to deterministically loop over each of these directives - // in order to keep the group numbers consistent - keys := make([]string, 0, len(mutuallyExclusiveDirs)) - for k := range mutuallyExclusiveDirs { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, meDir := range keys { - info := mutuallyExclusiveDirs[meDir] - - // see how many instances of the directive there are - for _, r := range routes { - if r.directive == meDir { - info.count++ - if info.count > 1 { - break - } - } - } - // if there is more than one, put them in a group - // (special case: "rewrite" directive must always be in - // its own group--even if there is only one--because we - // do not want a rewrite to be consolidated into other - // adjacent routes that happen to have the same matcher, - // see caddyserver/caddy#3108 - because the implied - // intent of rewrite is to do an internal redirect, - // we can't assume that the request will continue to - // match the same matcher; anyway, giving a route a - // unique group name should keep it from consolidating) - if info.count > 1 || meDir == "rewrite" { - info.groupName = groupCounter.nextGroup() - } - } - - // add all the routes piled in from directives - for _, r := range routes { - // put this route into a group if it is mutually exclusive - if info, ok := mutuallyExclusiveDirs[r.directive]; ok { - route := r.Value.(caddyhttp.Route) - route.Group = info.groupName - r.Value = route - } - - switch route := r.Value.(type) { - case caddyhttp.Subroute: - // if a route-class config value is actually a Subroute handler - // with nothing but a list of routes, then it is the intention - // of the directive to keep these handlers together and in this - // same order, but not necessarily in a subroute (if it wanted - // to keep them in a subroute, the directive would have returned - // a route with a Subroute as its handler); this is useful to - // keep multiple handlers/routes together and in the same order - // so that the sorting procedure we did above doesn't reorder them - if route.Errors != nil { - // if error handlers are also set, this is confusing; it's - // probably supposed to be wrapped in a Route and encoded - // as a regular handler route... programmer error. - panic("found subroute with more than just routes; perhaps it should have been wrapped in a route?") - } - subroute.Routes = append(subroute.Routes, route.Routes...) - case caddyhttp.Route: - subroute.Routes = append(subroute.Routes, route) - } - } - - subroute.Routes = consolidateRoutes(subroute.Routes) - - return subroute, nil -} - -// consolidateRoutes combines routes with the same properties -// (same matchers, same Terminal and Group settings) for a -// cleaner overall output. -func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList { - for i := 0; i < len(routes)-1; i++ { - if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) && - routes[i].Terminal == routes[i+1].Terminal && - routes[i].Group == routes[i+1].Group { - // keep the handlers in the same order, then splice out repetitive route - routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...) - routes = append(routes[:i+1], routes[i+2:]...) - i-- - } - } - return routes -} - -func matcherSetFromMatcherToken( - tkn caddyfile.Token, - matcherDefs map[string]caddy.ModuleMap, - warnings *[]caddyconfig.Warning, -) (caddy.ModuleMap, bool, error) { - // matcher tokens can be wildcards, simple path matchers, - // or refer to a pre-defined matcher by some name - if tkn.Text == "*" { - // match all requests == no matchers, so nothing to do - return nil, true, nil - } else if strings.HasPrefix(tkn.Text, "/") { - // convenient way to specify a single path match - return caddy.ModuleMap{ - "path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings), - }, true, nil - } else if strings.HasPrefix(tkn.Text, matcherPrefix) { - // pre-defined matcher - m, ok := matcherDefs[tkn.Text] - if !ok { - return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text) - } - return m, true, nil - } - return nil, false, nil -} - -func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.ModuleMap, error) { - type hostPathPair struct { - hostm caddyhttp.MatchHost - pathm caddyhttp.MatchPath - } - - // keep routes with common host and path matchers together - var matcherPairs []*hostPathPair - - var catchAllHosts bool - for _, addr := range sblock.keys { - // choose a matcher pair that should be shared by this - // server block; if none exists yet, create one - var chosenMatcherPair *hostPathPair - for _, mp := range matcherPairs { - if (len(mp.pathm) == 0 && addr.Path == "") || - (len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) { - chosenMatcherPair = mp - break - } - } - if chosenMatcherPair == nil { - chosenMatcherPair = new(hostPathPair) - if addr.Path != "" { - chosenMatcherPair.pathm = []string{addr.Path} - } - matcherPairs = append(matcherPairs, chosenMatcherPair) - } - - // if one of the keys has no host (i.e. is a catch-all for - // any hostname), then we need to null out the host matcher - // entirely so that it matches all hosts - if addr.Host == "" && !catchAllHosts { - chosenMatcherPair.hostm = nil - catchAllHosts = true - } - if catchAllHosts { - continue - } - - // add this server block's keys to the matcher - // pair if it doesn't already exist - if addr.Host != "" { - var found bool - for _, h := range chosenMatcherPair.hostm { - if h == addr.Host { - found = true - break - } - } - if !found { - chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host) - } - } - } - - // iterate each pairing of host and path matchers and - // put them into a map for JSON encoding - var matcherSets []map[string]caddyhttp.RequestMatcher - for _, mp := range matcherPairs { - matcherSet := make(map[string]caddyhttp.RequestMatcher) - if len(mp.hostm) > 0 { - matcherSet["host"] = mp.hostm - } - if len(mp.pathm) > 0 { - matcherSet["path"] = mp.pathm - } - if len(matcherSet) > 0 { - matcherSets = append(matcherSets, matcherSet) - } - } - - // finally, encode each of the matcher sets - matcherSetsEnc := make([]caddy.ModuleMap, 0, len(matcherSets)) - for _, ms := range matcherSets { - msEncoded, err := encodeMatcherSet(ms) - if err != nil { - return nil, fmt.Errorf("server block %v: %v", sblock.block.Keys, err) - } - matcherSetsEnc = append(matcherSetsEnc, msEncoded) - } - - return matcherSetsEnc, nil -} - -func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error { - for d.Next() { - definitionName := d.Val() - - if _, ok := matchers[definitionName]; ok { - return fmt.Errorf("matcher is defined more than once: %s", definitionName) - } - matchers[definitionName] = make(caddy.ModuleMap) - - // in case there are multiple instances of the same matcher, concatenate - // their tokens (we expect that UnmarshalCaddyfile should be able to - // handle more than one segment); otherwise, we'd overwrite other - // instances of the matcher in this set - tokensByMatcherName := make(map[string][]caddyfile.Token) - for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { - matcherName := d.Val() - tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...) - } - for matcherName, tokens := range tokensByMatcherName { - mod, err := caddy.GetModule("http.matchers." + matcherName) - if err != nil { - return fmt.Errorf("getting matcher module '%s': %v", matcherName, err) - } - unm, ok := mod.New().(caddyfile.Unmarshaler) - if !ok { - return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName) - } - err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens)) - if err != nil { - return err - } - rm, ok := unm.(caddyhttp.RequestMatcher) - if !ok { - return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName) - } - matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil) - } - } - return nil -} - -func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.ModuleMap, error) { - msEncoded := make(caddy.ModuleMap) - for matcherName, val := range matchers { - jsonBytes, err := json.Marshal(val) - if err != nil { - return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err) - } - msEncoded[matcherName] = jsonBytes - } - return msEncoded, nil -} - -// tryInt tries to convert val to an integer. If it fails, -// it downgrades the error to a warning and returns 0. -func tryInt(val interface{}, warnings *[]caddyconfig.Warning) int { - intVal, ok := val.(int) - if val != nil && !ok && warnings != nil { - *warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"}) - } - return intVal -} - -func tryString(val interface{}, warnings *[]caddyconfig.Warning) string { - stringVal, ok := val.(string) - if val != nil && !ok && warnings != nil { - *warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"}) - } - return stringVal -} - -func tryDuration(val interface{}, warnings *[]caddyconfig.Warning) caddy.Duration { - durationVal, ok := val.(caddy.Duration) - if val != nil && !ok && warnings != nil { - *warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"}) - } - return durationVal -} - -// sliceContains returns true if needle is in haystack. -func sliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// listenersUseAnyPortOtherThan returns true if there are any -// listeners in addresses that use a port which is not otherPort. -// Mostly borrowed from unexported method in caddyhttp package. -func listenersUseAnyPortOtherThan(addresses []string, otherPort string) bool { - otherPortInt, err := strconv.Atoi(otherPort) - if err != nil { - return false - } - for _, lnAddr := range addresses { - laddrs, err := caddy.ParseNetworkAddress(lnAddr) - if err != nil { - continue - } - if uint(otherPortInt) > laddrs.EndPort || uint(otherPortInt) < laddrs.StartPort { - return true - } - } - return false -} - -// specificity returns len(s) minus any wildcards (*) and -// placeholders ({...}). Basically, it's a length count -// that penalizes the use of wildcards and placeholders. -// This is useful for comparing hostnames and paths. -// However, wildcards in paths are not a sure answer to -// the question of specificity. For example, -// '*.example.com' is clearly less specific than -// 'a.example.com', but is '/a' more or less specific -// than '/a*'? -func specificity(s string) int { - l := len(s) - strings.Count(s, "*") - for len(s) > 0 { - start := strings.Index(s, "{") - if start < 0 { - return l - } - end := strings.Index(s[start:], "}") + start + 1 - if end <= start { - return l - } - l -= end - start - s = s[end:] - } - return l -} - -type counter struct { - n *int -} - -func (c counter) nextGroup() string { - name := fmt.Sprintf("group%d", *c.n) - *c.n++ - return name -} - -type namedCustomLog struct { - name string - log *caddy.CustomLog -} - -// sbAddrAssociation is a mapping from a list of -// addresses to a list of server blocks that are -// served on those addresses. -type sbAddrAssociation struct { - addresses []string - serverBlocks []serverBlock -} - -const matcherPrefix = "@" - -// Interface guard -var _ caddyfile.ServerType = (*ServerType)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/options.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/options.go deleted file mode 100644 index f693110e..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/options.go +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "strconv" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "github.com/caddyserver/certmagic" - "github.com/mholt/acmez/acme" -) - -func init() { - RegisterGlobalOption("debug", parseOptTrue) - RegisterGlobalOption("http_port", parseOptHTTPPort) - RegisterGlobalOption("https_port", parseOptHTTPSPort) - RegisterGlobalOption("grace_period", parseOptDuration) - RegisterGlobalOption("default_sni", parseOptSingleString) - RegisterGlobalOption("order", parseOptOrder) - RegisterGlobalOption("storage", parseOptStorage) - RegisterGlobalOption("storage_clean_interval", parseOptDuration) - RegisterGlobalOption("acme_ca", parseOptSingleString) - RegisterGlobalOption("acme_ca_root", parseOptSingleString) - RegisterGlobalOption("acme_dns", parseOptACMEDNS) - RegisterGlobalOption("acme_eab", parseOptACMEEAB) - RegisterGlobalOption("cert_issuer", parseOptCertIssuer) - RegisterGlobalOption("skip_install_trust", parseOptTrue) - RegisterGlobalOption("email", parseOptSingleString) - RegisterGlobalOption("admin", parseOptAdmin) - RegisterGlobalOption("on_demand_tls", parseOptOnDemand) - RegisterGlobalOption("local_certs", parseOptTrue) - RegisterGlobalOption("key_type", parseOptSingleString) - RegisterGlobalOption("auto_https", parseOptAutoHTTPS) - RegisterGlobalOption("servers", parseServerOptions) - RegisterGlobalOption("ocsp_stapling", parseOCSPStaplingOptions) - RegisterGlobalOption("log", parseLogOptions) - RegisterGlobalOption("preferred_chains", parseOptPreferredChains) -} - -func parseOptTrue(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { return true, nil } - -func parseOptHTTPPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - var httpPort int - for d.Next() { - var httpPortStr string - if !d.AllArgs(&httpPortStr) { - return 0, d.ArgErr() - } - var err error - httpPort, err = strconv.Atoi(httpPortStr) - if err != nil { - return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err) - } - } - return httpPort, nil -} - -func parseOptHTTPSPort(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - var httpsPort int - for d.Next() { - var httpsPortStr string - if !d.AllArgs(&httpsPortStr) { - return 0, d.ArgErr() - } - var err error - httpsPort, err = strconv.Atoi(httpsPortStr) - if err != nil { - return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err) - } - } - return httpsPort, nil -} - -func parseOptOrder(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - newOrder := directiveOrder - - for d.Next() { - // get directive name - if !d.Next() { - return nil, d.ArgErr() - } - dirName := d.Val() - if _, ok := registeredDirectives[dirName]; !ok { - return nil, d.Errf("%s is not a registered directive", dirName) - } - - // get positional token - if !d.Next() { - return nil, d.ArgErr() - } - pos := d.Val() - - // if directive exists, first remove it - for i, d := range newOrder { - if d == dirName { - newOrder = append(newOrder[:i], newOrder[i+1:]...) - break - } - } - - // act on the positional - switch pos { - case "first": - newOrder = append([]string{dirName}, newOrder...) - if d.NextArg() { - return nil, d.ArgErr() - } - directiveOrder = newOrder - return newOrder, nil - case "last": - newOrder = append(newOrder, dirName) - if d.NextArg() { - return nil, d.ArgErr() - } - directiveOrder = newOrder - return newOrder, nil - case "before": - case "after": - default: - return nil, d.Errf("unknown positional '%s'", pos) - } - - // get name of other directive - if !d.NextArg() { - return nil, d.ArgErr() - } - otherDir := d.Val() - if d.NextArg() { - return nil, d.ArgErr() - } - - // insert directive into proper position - for i, d := range newOrder { - if d == otherDir { - if pos == "before" { - newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...) - } else if pos == "after" { - newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...) - } - break - } - } - } - - directiveOrder = newOrder - - return newOrder, nil -} - -func parseOptStorage(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - if !d.Next() { // consume option name - return nil, d.ArgErr() - } - if !d.Next() { // get storage module name - return nil, d.ArgErr() - } - modID := "caddy.storage." + d.Val() - unm, err := caddyfile.UnmarshalModule(d, modID) - if err != nil { - return nil, err - } - storage, ok := unm.(caddy.StorageConverter) - if !ok { - return nil, d.Errf("module %s is not a caddy.StorageConverter", modID) - } - return storage, nil -} - -func parseOptDuration(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - if !d.Next() { // consume option name - return nil, d.ArgErr() - } - if !d.Next() { // get duration value - return nil, d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return nil, err - } - return caddy.Duration(dur), nil -} - -func parseOptACMEDNS(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - if !d.Next() { // consume option name - return nil, d.ArgErr() - } - if !d.Next() { // get DNS module name - return nil, d.ArgErr() - } - modID := "dns.providers." + d.Val() - unm, err := caddyfile.UnmarshalModule(d, modID) - if err != nil { - return nil, err - } - prov, ok := unm.(certmagic.ACMEDNSProvider) - if !ok { - return nil, d.Errf("module %s (%T) is not a certmagic.ACMEDNSProvider", modID, unm) - } - return prov, nil -} - -func parseOptACMEEAB(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - eab := new(acme.EAB) - for d.Next() { - if d.NextArg() { - return nil, d.ArgErr() - } - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "key_id": - if !d.NextArg() { - return nil, d.ArgErr() - } - eab.KeyID = d.Val() - - case "mac_key": - if !d.NextArg() { - return nil, d.ArgErr() - } - eab.MACKey = d.Val() - - default: - return nil, d.Errf("unrecognized parameter '%s'", d.Val()) - } - } - } - return eab, nil -} - -func parseOptCertIssuer(d *caddyfile.Dispenser, existing interface{}) (interface{}, error) { - var issuers []certmagic.Issuer - if existing != nil { - issuers = existing.([]certmagic.Issuer) - } - for d.Next() { // consume option name - if !d.Next() { // get issuer module name - return nil, d.ArgErr() - } - modID := "tls.issuance." + d.Val() - unm, err := caddyfile.UnmarshalModule(d, modID) - if err != nil { - return nil, err - } - iss, ok := unm.(certmagic.Issuer) - if !ok { - return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm) - } - issuers = append(issuers, iss) - } - return issuers, nil -} - -func parseOptSingleString(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - d.Next() // consume parameter name - if !d.Next() { - return "", d.ArgErr() - } - val := d.Val() - if d.Next() { - return "", d.ArgErr() - } - return val, nil -} - -func parseOptAdmin(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - adminCfg := new(caddy.AdminConfig) - for d.Next() { - if d.NextArg() { - listenAddress := d.Val() - if listenAddress == "off" { - adminCfg.Disabled = true - if d.Next() { // Do not accept any remaining options including block - return nil, d.Err("No more option is allowed after turning off admin config") - } - } else { - adminCfg.Listen = listenAddress - if d.NextArg() { // At most 1 arg is allowed - return nil, d.ArgErr() - } - } - } - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "enforce_origin": - adminCfg.EnforceOrigin = true - - case "origins": - adminCfg.Origins = d.RemainingArgs() - - default: - return nil, d.Errf("unrecognized parameter '%s'", d.Val()) - } - } - } - if adminCfg.Listen == "" && !adminCfg.Disabled { - adminCfg.Listen = caddy.DefaultAdminListen - } - return adminCfg, nil -} - -func parseOptOnDemand(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - var ond *caddytls.OnDemandConfig - for d.Next() { - if d.NextArg() { - return nil, d.ArgErr() - } - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "ask": - if !d.NextArg() { - return nil, d.ArgErr() - } - if ond == nil { - ond = new(caddytls.OnDemandConfig) - } - ond.Ask = d.Val() - - case "interval": - if !d.NextArg() { - return nil, d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return nil, err - } - if ond == nil { - ond = new(caddytls.OnDemandConfig) - } - if ond.RateLimit == nil { - ond.RateLimit = new(caddytls.RateLimit) - } - ond.RateLimit.Interval = caddy.Duration(dur) - - case "burst": - if !d.NextArg() { - return nil, d.ArgErr() - } - burst, err := strconv.Atoi(d.Val()) - if err != nil { - return nil, err - } - if ond == nil { - ond = new(caddytls.OnDemandConfig) - } - if ond.RateLimit == nil { - ond.RateLimit = new(caddytls.RateLimit) - } - ond.RateLimit.Burst = burst - - default: - return nil, d.Errf("unrecognized parameter '%s'", d.Val()) - } - } - } - if ond == nil { - return nil, d.Err("expected at least one config parameter for on_demand_tls") - } - return ond, nil -} - -func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - d.Next() // consume parameter name - if !d.Next() { - return "", d.ArgErr() - } - val := d.Val() - if d.Next() { - return "", d.ArgErr() - } - if val != "off" && val != "disable_redirects" && val != "ignore_loaded_certs" { - return "", d.Errf("auto_https must be one of 'off', 'disable_redirects' or 'ignore_loaded_certs'") - } - return val, nil -} - -func parseServerOptions(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - return unmarshalCaddyfileServerOptions(d) -} - -func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - d.Next() // consume option name - var val string - if !d.AllArgs(&val) { - return nil, d.ArgErr() - } - if val != "off" { - return nil, d.Errf("invalid argument '%s'", val) - } - return certmagic.OCSPConfig{ - DisableStapling: val == "off", - }, nil -} - -// parseLogOptions parses the global log option. Syntax: -// -// log [name] { -// output ... -// format ... -// level -// include -// exclude -// } -// -// When the name argument is unspecified, this directive modifies the default -// logger. -// -func parseLogOptions(d *caddyfile.Dispenser, existingVal interface{}) (interface{}, error) { - currentNames := make(map[string]struct{}) - if existingVal != nil { - innerVals, ok := existingVal.([]ConfigValue) - if !ok { - return nil, d.Errf("existing log values of unexpected type: %T", existingVal) - } - for _, rawVal := range innerVals { - val, ok := rawVal.Value.(namedCustomLog) - if !ok { - return nil, d.Errf("existing log value of unexpected type: %T", existingVal) - } - currentNames[val.name] = struct{}{} - } - } - - var warnings []caddyconfig.Warning - // Call out the same parser that handles server-specific log configuration. - configValues, err := parseLogHelper( - Helper{ - Dispenser: d, - warnings: &warnings, - }, - currentNames, - ) - if err != nil { - return nil, err - } - if len(warnings) > 0 { - return nil, d.Errf("warnings found in parsing global log options: %+v", warnings) - } - - return configValues, nil -} - -func parseOptPreferredChains(d *caddyfile.Dispenser, _ interface{}) (interface{}, error) { - d.Next() - return caddytls.ParseCaddyfilePreferredChainsOptions(d) -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/pkiapp.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/pkiapp.go deleted file mode 100644 index a21951db..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/pkiapp.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/modules/caddypki" -) - -func (st ServerType) buildPKIApp( - pairings []sbAddrAssociation, - options map[string]interface{}, - warnings []caddyconfig.Warning, -) (*caddypki.PKI, []caddyconfig.Warning, error) { - - pkiApp := &caddypki.PKI{CAs: make(map[string]*caddypki.CA)} - - skipInstallTrust := false - if _, ok := options["skip_install_trust"]; ok { - skipInstallTrust = true - } - falseBool := false - - for _, p := range pairings { - for _, sblock := range p.serverBlocks { - // find all the CAs that were defined and add them to the app config - // i.e. from any "acme_server" directives - for _, caCfgValue := range sblock.pile["pki.ca"] { - ca := caCfgValue.Value.(*caddypki.CA) - if skipInstallTrust { - ca.InstallTrust = &falseBool - } - pkiApp.CAs[ca.ID] = ca - } - } - } - - // if there was no CAs defined in any of the servers, - // and we were requested to not install trust, then - // add one for the default/local CA to do so - if len(pkiApp.CAs) == 0 && skipInstallTrust { - ca := new(caddypki.CA) - ca.ID = caddypki.DefaultCAID - ca.InstallTrust = &falseBool - pkiApp.CAs[ca.ID] = ca - } - - return pkiApp, warnings, nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/serveroptions.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/serveroptions.go deleted file mode 100644 index 9e94b863..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/serveroptions.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "encoding/json" - "fmt" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/dustin/go-humanize" -) - -// serverOptions collects server config overrides parsed from Caddyfile global options -type serverOptions struct { - // If set, will only apply these options to servers that contain a - // listener address that matches exactly. If empty, will apply to all - // servers that were not already matched by another serverOptions. - ListenerAddress string - - // These will all map 1:1 to the caddyhttp.Server struct - ListenerWrappersRaw []json.RawMessage - ReadTimeout caddy.Duration - ReadHeaderTimeout caddy.Duration - WriteTimeout caddy.Duration - IdleTimeout caddy.Duration - MaxHeaderBytes int - AllowH2C bool - ExperimentalHTTP3 bool - StrictSNIHost *bool -} - -func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (interface{}, error) { - serverOpts := serverOptions{} - for d.Next() { - if d.NextArg() { - serverOpts.ListenerAddress = d.Val() - if d.NextArg() { - return nil, d.ArgErr() - } - } - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "listener_wrappers": - for nesting := d.Nesting(); d.NextBlock(nesting); { - modID := "caddy.listeners." + d.Val() - unm, err := caddyfile.UnmarshalModule(d, modID) - if err != nil { - return nil, err - } - listenerWrapper, ok := unm.(caddy.ListenerWrapper) - if !ok { - return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm) - } - jsonListenerWrapper := caddyconfig.JSONModuleObject( - listenerWrapper, - "wrapper", - listenerWrapper.(caddy.Module).CaddyModule().ID.Name(), - nil, - ) - serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper) - } - - case "timeouts": - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "read_body": - if !d.NextArg() { - return nil, d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return nil, d.Errf("parsing read_body timeout duration: %v", err) - } - serverOpts.ReadTimeout = caddy.Duration(dur) - - case "read_header": - if !d.NextArg() { - return nil, d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return nil, d.Errf("parsing read_header timeout duration: %v", err) - } - serverOpts.ReadHeaderTimeout = caddy.Duration(dur) - - case "write": - if !d.NextArg() { - return nil, d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return nil, d.Errf("parsing write timeout duration: %v", err) - } - serverOpts.WriteTimeout = caddy.Duration(dur) - - case "idle": - if !d.NextArg() { - return nil, d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return nil, d.Errf("parsing idle timeout duration: %v", err) - } - serverOpts.IdleTimeout = caddy.Duration(dur) - - default: - return nil, d.Errf("unrecognized timeouts option '%s'", d.Val()) - } - } - - case "max_header_size": - var sizeStr string - if !d.AllArgs(&sizeStr) { - return nil, d.ArgErr() - } - size, err := humanize.ParseBytes(sizeStr) - if err != nil { - return nil, d.Errf("parsing max_header_size: %v", err) - } - serverOpts.MaxHeaderBytes = int(size) - - case "protocol": - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "allow_h2c": - if d.NextArg() { - return nil, d.ArgErr() - } - serverOpts.AllowH2C = true - - case "experimental_http3": - if d.NextArg() { - return nil, d.ArgErr() - } - serverOpts.ExperimentalHTTP3 = true - - case "strict_sni_host": - if d.NextArg() { - return nil, d.ArgErr() - } - trueBool := true - serverOpts.StrictSNIHost = &trueBool - - default: - return nil, d.Errf("unrecognized protocol option '%s'", d.Val()) - } - } - - default: - return nil, d.Errf("unrecognized servers option '%s'", d.Val()) - } - } - } - return serverOpts, nil -} - -// applyServerOptions sets the server options on the appropriate servers -func applyServerOptions( - servers map[string]*caddyhttp.Server, - options map[string]interface{}, - warnings *[]caddyconfig.Warning, -) error { - // If experimental HTTP/3 is enabled, enable it on each server. - // We already know there won't be a conflict with serverOptions because - // we validated earlier that "experimental_http3" cannot be set at the same - // time as "servers" - if enableH3, ok := options["experimental_http3"].(bool); ok && enableH3 { - *warnings = append(*warnings, caddyconfig.Warning{Message: "the 'experimental_http3' global option is deprecated, please use the 'servers > protocol > experimental_http3' option instead"}) - for _, srv := range servers { - srv.ExperimentalHTTP3 = true - } - } - - serverOpts, ok := options["servers"].([]serverOptions) - if !ok { - return nil - } - - for _, server := range servers { - // find the options that apply to this server - opts := func() *serverOptions { - for _, entry := range serverOpts { - if entry.ListenerAddress == "" { - return &entry - } - for _, listener := range server.Listen { - if entry.ListenerAddress == listener { - return &entry - } - } - } - return nil - }() - - // if none apply, then move to the next server - if opts == nil { - continue - } - - // set all the options - server.ListenerWrappersRaw = opts.ListenerWrappersRaw - server.ReadTimeout = opts.ReadTimeout - server.ReadHeaderTimeout = opts.ReadHeaderTimeout - server.WriteTimeout = opts.WriteTimeout - server.IdleTimeout = opts.IdleTimeout - server.MaxHeaderBytes = opts.MaxHeaderBytes - server.AllowH2C = opts.AllowH2C - server.ExperimentalHTTP3 = opts.ExperimentalHTTP3 - server.StrictSNIHost = opts.StrictSNIHost - } - - return nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/tlsapp.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/tlsapp.go deleted file mode 100644 index 0fe1fc5f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile/tlsapp.go +++ /dev/null @@ -1,632 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpcaddyfile - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "github.com/caddyserver/certmagic" - "github.com/mholt/acmez/acme" -) - -func (st ServerType) buildTLSApp( - pairings []sbAddrAssociation, - options map[string]interface{}, - warnings []caddyconfig.Warning, -) (*caddytls.TLS, []caddyconfig.Warning, error) { - - tlsApp := &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)} - var certLoaders []caddytls.CertificateLoader - - httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) - if hp, ok := options["http_port"].(int); ok { - httpPort = strconv.Itoa(hp) - } - httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort) - if hsp, ok := options["https_port"].(int); ok { - httpsPort = strconv.Itoa(hsp) - } - - // count how many server blocks have a TLS-enabled key with - // no host, and find all hosts that share a server block with - // a hostless key, so that they don't get forgotten/omitted - // by auto-HTTPS (since they won't appear in route matchers) - var serverBlocksWithTLSHostlessKey int - httpsHostsSharedWithHostlessKey := make(map[string]struct{}) - for _, pair := range pairings { - for _, sb := range pair.serverBlocks { - for _, addr := range sb.keys { - if addr.Host == "" { - // this address has no hostname, but if it's explicitly set - // to HTTPS, then we need to count it as being TLS-enabled - if addr.Scheme == "https" || addr.Port == httpsPort { - serverBlocksWithTLSHostlessKey++ - } - // this server block has a hostless key, now - // go through and add all the hosts to the set - for _, otherAddr := range sb.keys { - if otherAddr.Original == addr.Original { - continue - } - if otherAddr.Host != "" && otherAddr.Scheme != "http" && otherAddr.Port != httpPort { - httpsHostsSharedWithHostlessKey[otherAddr.Host] = struct{}{} - } - } - break - } - } - } - } - - // a catch-all automation policy is used as a "default" for all subjects that - // don't have custom configuration explicitly associated with them; this - // is only to add if the global settings or defaults are non-empty - catchAllAP, err := newBaseAutomationPolicy(options, warnings, false) - if err != nil { - return nil, warnings, err - } - if catchAllAP != nil { - if tlsApp.Automation == nil { - tlsApp.Automation = new(caddytls.AutomationConfig) - } - tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP) - } - - for _, p := range pairings { - // avoid setting up TLS automation policies for a server that is HTTP-only - if !listenersUseAnyPortOtherThan(p.addresses, httpPort) { - continue - } - - for _, sblock := range p.serverBlocks { - // get values that populate an automation policy for this block - ap, err := newBaseAutomationPolicy(options, warnings, true) - if err != nil { - return nil, warnings, err - } - - sblockHosts := sblock.hostsFromKeys(false) - if len(sblockHosts) == 0 && catchAllAP != nil { - ap = catchAllAP - } - - // on-demand tls - if _, ok := sblock.pile["tls.on_demand"]; ok { - ap.OnDemand = true - } - - if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok { - ap.KeyType = keyTypeVals[0].Value.(string) - } - - // certificate issuers - if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok { - var issuers []certmagic.Issuer - for _, issuerVal := range issuerVals { - issuers = append(issuers, issuerVal.Value.(certmagic.Issuer)) - } - if ap == catchAllAP && !reflect.DeepEqual(ap.Issuers, issuers) { - return nil, warnings, fmt.Errorf("automation policy from site block is also default/catch-all policy because of key without hostname, and the two are in conflict: %#v != %#v", ap.Issuers, issuers) - } - ap.Issuers = issuers - } - - // custom bind host - for _, cfgVal := range sblock.pile["bind"] { - for _, iss := range ap.Issuers { - // if an issuer was already configured and it is NOT an ACME issuer, - // skip, since we intend to adjust only ACME issuers; ensure we - // include any issuer that embeds/wraps an underlying ACME issuer - var acmeIssuer *caddytls.ACMEIssuer - if acmeWrapper, ok := iss.(acmeCapable); ok { - acmeIssuer = acmeWrapper.GetACMEIssuer() - } - if acmeIssuer == nil { - continue - } - - // proceed to configure the ACME issuer's bind host, without - // overwriting any existing settings - if acmeIssuer.Challenges == nil { - acmeIssuer.Challenges = new(caddytls.ChallengesConfig) - } - if acmeIssuer.Challenges.BindHost == "" { - // only binding to one host is supported - var bindHost string - if bindHosts, ok := cfgVal.Value.([]string); ok && len(bindHosts) > 0 { - bindHost = bindHosts[0] - } - acmeIssuer.Challenges.BindHost = bindHost - } - } - } - - // first make sure this block is allowed to create an automation policy; - // doing so is forbidden if it has a key with no host (i.e. ":443") - // and if there is a different server block that also has a key with no - // host -- since a key with no host matches any host, we need its - // associated automation policy to have an empty Subjects list, i.e. no - // host filter, which is indistinguishable between the two server blocks - // because automation is not done in the context of a particular server... - // this is an example of a poor mapping from Caddyfile to JSON but that's - // the least-leaky abstraction I could figure out - if len(sblockHosts) == 0 { - if serverBlocksWithTLSHostlessKey > 1 { - // this server block and at least one other has a key with no host, - // making the two indistinguishable; it is misleading to define such - // a policy within one server block since it actually will apply to - // others as well - return nil, warnings, fmt.Errorf("cannot make a TLS automation policy from a server block that has a host-less address when there are other TLS-enabled server block addresses lacking a host") - } - if catchAllAP == nil { - // this server block has a key with no hosts, but there is not yet - // a catch-all automation policy (probably because no global options - // were set), so this one becomes it - catchAllAP = ap - } - } - - // associate our new automation policy with this server block's hosts - ap.Subjects = sblock.hostsFromKeysNotHTTP(httpPort) - sort.Strings(ap.Subjects) // solely for deterministic test results - - // if a combination of public and internal names were given - // for this same server block and no issuer was specified, we - // need to separate them out in the automation policies so - // that the internal names can use the internal issuer and - // the other names can use the default/public/ACME issuer - var ap2 *caddytls.AutomationPolicy - if len(ap.Issuers) == 0 { - var internal, external []string - for _, s := range ap.Subjects { - if !certmagic.SubjectQualifiesForCert(s) { - return nil, warnings, fmt.Errorf("subject does not qualify for certificate: '%s'", s) - } - // we don't use certmagic.SubjectQualifiesForPublicCert() because of one nuance: - // names like *.*.tld that may not qualify for a public certificate are actually - // fine when used with OnDemand, since OnDemand (currently) does not obtain - // wildcards (if it ever does, there will be a separate config option to enable - // it that we would need to check here) since the hostname is known at handshake; - // and it is unexpected to switch to internal issuer when the user wants to get - // regular certificates on-demand for a class of certs like *.*.tld. - if subjectQualifiesForPublicCert(ap, s) { - external = append(external, s) - } else { - internal = append(internal, s) - } - } - if len(external) > 0 && len(internal) > 0 { - ap.Subjects = external - apCopy := *ap - ap2 = &apCopy - ap2.Subjects = internal - ap2.IssuersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings)} - } - } - if tlsApp.Automation == nil { - tlsApp.Automation = new(caddytls.AutomationConfig) - } - tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap) - if ap2 != nil { - tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2) - } - - // certificate loaders - if clVals, ok := sblock.pile["tls.cert_loader"]; ok { - for _, clVal := range clVals { - certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader)) - } - } - } - } - - // group certificate loaders by module name, then add to config - if len(certLoaders) > 0 { - loadersByName := make(map[string]caddytls.CertificateLoader) - for _, cl := range certLoaders { - name := caddy.GetModuleName(cl) - // ugh... technically, we may have multiple FileLoader and FolderLoader - // modules (because the tls directive returns one per occurrence), but - // the config structure expects only one instance of each kind of loader - // module, so we have to combine them... instead of enumerating each - // possible cert loader module in a type switch, we can use reflection, - // which works on any cert loaders that are slice types - if reflect.TypeOf(cl).Kind() == reflect.Slice { - combined := reflect.ValueOf(loadersByName[name]) - if !combined.IsValid() { - combined = reflect.New(reflect.TypeOf(cl)).Elem() - } - clVal := reflect.ValueOf(cl) - for i := 0; i < clVal.Len(); i++ { - combined = reflect.Append(combined, clVal.Index(i)) - } - loadersByName[name] = combined.Interface().(caddytls.CertificateLoader) - } - } - for certLoaderName, loaders := range loadersByName { - tlsApp.CertificatesRaw[certLoaderName] = caddyconfig.JSON(loaders, &warnings) - } - } - - // set any of the on-demand options, for if/when on-demand TLS is enabled - if onDemand, ok := options["on_demand_tls"].(*caddytls.OnDemandConfig); ok { - if tlsApp.Automation == nil { - tlsApp.Automation = new(caddytls.AutomationConfig) - } - tlsApp.Automation.OnDemand = onDemand - } - - // set the storage clean interval if configured - if storageCleanInterval, ok := options["storage_clean_interval"].(caddy.Duration); ok { - if tlsApp.Automation == nil { - tlsApp.Automation = new(caddytls.AutomationConfig) - } - tlsApp.Automation.StorageCleanInterval = storageCleanInterval - } - - // if any hostnames appear on the same server block as a key with - // no host, they will not be used with route matchers because the - // hostless key matches all hosts, therefore, it wouldn't be - // considered for auto-HTTPS, so we need to make sure those hosts - // are manually considered for managed certificates; we also need - // to make sure that any of these names which are internal-only - // get internal certificates by default rather than ACME - var al caddytls.AutomateLoader - internalAP := &caddytls.AutomationPolicy{ - IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)}, - } - for h := range httpsHostsSharedWithHostlessKey { - al = append(al, h) - if !certmagic.SubjectQualifiesForPublicCert(h) { - internalAP.Subjects = append(internalAP.Subjects, h) - } - } - if len(al) > 0 { - tlsApp.CertificatesRaw["automate"] = caddyconfig.JSON(al, &warnings) - } - if len(internalAP.Subjects) > 0 { - if tlsApp.Automation == nil { - tlsApp.Automation = new(caddytls.AutomationConfig) - } - tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, internalAP) - } - - // if there are any global options set for issuers (ACME ones in particular), make sure they - // take effect in every automation policy that does not have any issuers - if tlsApp.Automation != nil { - globalEmail := options["email"] - globalACMECA := options["acme_ca"] - globalACMECARoot := options["acme_ca_root"] - globalACMEDNS := options["acme_dns"] - globalACMEEAB := options["acme_eab"] - globalPreferredChains := options["preferred_chains"] - hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS != nil || globalACMEEAB != nil || globalPreferredChains != nil - if hasGlobalACMEDefaults { - // for _, ap := range tlsApp.Automation.Policies { - for i := 0; i < len(tlsApp.Automation.Policies); i++ { - ap := tlsApp.Automation.Policies[i] - if len(ap.Issuers) == 0 && automationPolicyHasAllPublicNames(ap) { - // for public names, create default issuers which will later be filled in with configured global defaults - // (internal names will implicitly use the internal issuer at auto-https time) - ap.Issuers = caddytls.DefaultIssuers() - - // if a specific endpoint is configured, can't use multiple default issuers - if globalACMECA != nil { - if strings.Contains(globalACMECA.(string), "zerossl") { - ap.Issuers = []certmagic.Issuer{&caddytls.ZeroSSLIssuer{ACMEIssuer: new(caddytls.ACMEIssuer)}} - } else { - ap.Issuers = []certmagic.Issuer{new(caddytls.ACMEIssuer)} - } - } - } - } - } - } - - // finalize and verify policies; do cleanup - if tlsApp.Automation != nil { - for i, ap := range tlsApp.Automation.Policies { - // ensure all issuers have global defaults filled in - for j, issuer := range ap.Issuers { - err := fillInGlobalACMEDefaults(issuer, options) - if err != nil { - return nil, warnings, fmt.Errorf("filling in global issuer defaults for AP %d, issuer %d: %v", i, j, err) - } - } - - // encode all issuer values we created, so they will be rendered in the output - if len(ap.Issuers) > 0 && ap.IssuersRaw == nil { - for _, iss := range ap.Issuers { - issuerName := iss.(caddy.Module).CaddyModule().ID.Name() - ap.IssuersRaw = append(ap.IssuersRaw, caddyconfig.JSONModuleObject(iss, "module", issuerName, &warnings)) - } - } - } - - // consolidate automation policies that are the exact same - tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies) - - // ensure automation policies don't overlap subjects (this should be - // an error at provision-time as well, but catch it in the adapt phase - // for convenience) - automationHostSet := make(map[string]struct{}) - for _, ap := range tlsApp.Automation.Policies { - for _, s := range ap.Subjects { - if _, ok := automationHostSet[s]; ok { - return nil, warnings, fmt.Errorf("hostname appears in more than one automation policy, making certificate management ambiguous: %s", s) - } - automationHostSet[s] = struct{}{} - } - } - - // if nothing remains, remove any excess values to clean up the resulting config - if len(tlsApp.Automation.Policies) == 0 { - tlsApp.Automation.Policies = nil - } - if reflect.DeepEqual(tlsApp.Automation, new(caddytls.AutomationConfig)) { - tlsApp.Automation = nil - } - } - - return tlsApp, warnings, nil -} - -type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer } - -func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]interface{}) error { - acmeWrapper, ok := issuer.(acmeCapable) - if !ok { - return nil - } - acmeIssuer := acmeWrapper.GetACMEIssuer() - if acmeIssuer == nil { - return nil - } - - globalEmail := options["email"] - globalACMECA := options["acme_ca"] - globalACMECARoot := options["acme_ca_root"] - globalACMEDNS := options["acme_dns"] - globalACMEEAB := options["acme_eab"] - globalPreferredChains := options["preferred_chains"] - - if globalEmail != nil && acmeIssuer.Email == "" { - acmeIssuer.Email = globalEmail.(string) - } - if globalACMECA != nil && acmeIssuer.CA == "" { - acmeIssuer.CA = globalACMECA.(string) - } - if globalACMECARoot != nil && !sliceContains(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) { - acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) - } - if globalACMEDNS != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) { - acmeIssuer.Challenges = &caddytls.ChallengesConfig{ - DNS: &caddytls.DNSChallengeConfig{ - ProviderRaw: caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil), - }, - } - } - if globalACMEEAB != nil && acmeIssuer.ExternalAccount == nil { - acmeIssuer.ExternalAccount = globalACMEEAB.(*acme.EAB) - } - if globalPreferredChains != nil && acmeIssuer.PreferredChains == nil { - acmeIssuer.PreferredChains = globalPreferredChains.(*caddytls.ChainPreference) - } - return nil -} - -// newBaseAutomationPolicy returns a new TLS automation policy that gets -// its values from the global options map. It should be used as the base -// for any other automation policies. A nil policy (and no error) will be -// returned if there are no default/global options. However, if always is -// true, a non-nil value will always be returned (unless there is an error). -func newBaseAutomationPolicy(options map[string]interface{}, warnings []caddyconfig.Warning, always bool) (*caddytls.AutomationPolicy, error) { - issuers, hasIssuers := options["cert_issuer"] - _, hasLocalCerts := options["local_certs"] - keyType, hasKeyType := options["key_type"] - ocspStapling, hasOCSPStapling := options["ocsp_stapling"] - - hasGlobalAutomationOpts := hasIssuers || hasLocalCerts || hasKeyType || hasOCSPStapling - - // if there are no global options related to automation policies - // set, then we can just return right away - if !hasGlobalAutomationOpts { - if always { - return new(caddytls.AutomationPolicy), nil - } - return nil, nil - } - - ap := new(caddytls.AutomationPolicy) - if hasKeyType { - ap.KeyType = keyType.(string) - } - - if hasIssuers && hasLocalCerts { - return nil, fmt.Errorf("global options are ambiguous: local_certs is confusing when combined with cert_issuer, because local_certs is also a specific kind of issuer") - } - - if hasIssuers { - ap.Issuers = issuers.([]certmagic.Issuer) - } else if hasLocalCerts { - ap.Issuers = []certmagic.Issuer{new(caddytls.InternalIssuer)} - } - - if hasOCSPStapling { - ocspConfig := ocspStapling.(certmagic.OCSPConfig) - ap.DisableOCSPStapling = ocspConfig.DisableStapling - ap.OCSPOverrides = ocspConfig.ResponderOverrides - } - - return ap, nil -} - -// consolidateAutomationPolicies combines automation policies that are the same, -// for a cleaner overall output. -func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy { - // sort from most specific to least specific; we depend on this ordering - sort.SliceStable(aps, func(i, j int) bool { - if automationPolicyIsSubset(aps[i], aps[j]) { - return true - } - if automationPolicyIsSubset(aps[j], aps[i]) { - return false - } - return len(aps[i].Subjects) > len(aps[j].Subjects) - }) - - emptyAPCount := 0 - origLenAPs := len(aps) - // compute the number of empty policies (disregarding subjects) - see #4128 - emptyAP := new(caddytls.AutomationPolicy) - for i := 0; i < len(aps); i++ { - emptyAP.Subjects = aps[i].Subjects - if reflect.DeepEqual(aps[i], emptyAP) { - emptyAPCount++ - if !automationPolicyHasAllPublicNames(aps[i]) { - // if this automation policy has internal names, we might as well remove it - // so auto-https can implicitly use the internal issuer - aps = append(aps[:i], aps[i+1:]...) - i-- - } - } - } - // If all policies are empty, we can return nil, as there is no need to set any policy - if emptyAPCount == origLenAPs { - return nil - } - - // remove or combine duplicate policies -outer: - for i := 0; i < len(aps); i++ { - // compare only with next policies; we sorted by specificity so we must not delete earlier policies - for j := i + 1; j < len(aps); j++ { - // if they're exactly equal in every way, just keep one of them - if reflect.DeepEqual(aps[i], aps[j]) { - aps = append(aps[:j], aps[j+1:]...) - // must re-evaluate current i against next j; can't skip it! - // even if i decrements to -1, will be incremented to 0 immediately - i-- - continue outer - } - - // if the policy is the same, we can keep just one, but we have - // to be careful which one we keep; if only one has any hostnames - // defined, then we need to keep the one without any hostnames, - // otherwise the one without any subjects (a catch-all) would be - // eaten up by the one with subjects; and if both have subjects, we - // need to combine their lists - if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) && - bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) && - aps[i].MustStaple == aps[j].MustStaple && - aps[i].KeyType == aps[j].KeyType && - aps[i].OnDemand == aps[j].OnDemand && - aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio { - if len(aps[i].Subjects) > 0 && len(aps[j].Subjects) == 0 { - // later policy (at j) has no subjects ("catch-all"), so we can - // remove the identical-but-more-specific policy that comes first - // AS LONG AS it is not shadowed by another policy before it; e.g. - // if policy i is for example.com, policy i+1 is '*.com', and policy - // j is catch-all, we cannot remove policy i because that would - // cause example.com to be served by the less specific policy for - // '*.com', which might be different (yes we've seen this happen) - if automationPolicyShadows(i, aps) >= j { - aps = append(aps[:i], aps[i+1:]...) - i-- - continue outer - } - } else { - // avoid repeated subjects - for _, subj := range aps[j].Subjects { - if !sliceContains(aps[i].Subjects, subj) { - aps[i].Subjects = append(aps[i].Subjects, subj) - } - } - aps = append(aps[:j], aps[j+1:]...) - j-- - } - } - } - } - - return aps -} - -// automationPolicyIsSubset returns true if a's subjects are a subset -// of b's subjects. -func automationPolicyIsSubset(a, b *caddytls.AutomationPolicy) bool { - if len(b.Subjects) == 0 { - return true - } - if len(a.Subjects) == 0 { - return false - } - for _, aSubj := range a.Subjects { - var inSuperset bool - for _, bSubj := range b.Subjects { - if certmagic.MatchWildcard(aSubj, bSubj) { - inSuperset = true - break - } - } - if !inSuperset { - return false - } - } - return true -} - -// automationPolicyShadows returns the index of a policy that aps[i] shadows; -// in other words, for all policies after position i, if that policy covers -// the same subjects but is less specific, that policy's position is returned, -// or -1 if no shadowing is found. For example, if policy i is for -// "foo.example.com" and policy i+2 is for "*.example.com", then i+2 will be -// returned, since that policy is shadowed by i, which is in front. -func automationPolicyShadows(i int, aps []*caddytls.AutomationPolicy) int { - for j := i + 1; j < len(aps); j++ { - if automationPolicyIsSubset(aps[i], aps[j]) { - return j - } - } - return -1 -} - -// subjectQualifiesForPublicCert is like certmagic.SubjectQualifiesForPublicCert() except -// that this allows domains with multiple wildcard levels like '*.*.example.com' to qualify -// if the automation policy has OnDemand enabled (i.e. this function is more lenient). -func subjectQualifiesForPublicCert(ap *caddytls.AutomationPolicy, subj string) bool { - return !certmagic.SubjectIsIP(subj) && - !certmagic.SubjectIsInternal(subj) && - (strings.Count(subj, "*.") < 2 || ap.OnDemand) -} - -func automationPolicyHasAllPublicNames(ap *caddytls.AutomationPolicy) bool { - for _, subj := range ap.Subjects { - if !subjectQualifiesForPublicCert(ap, subj) { - return false - } - } - return true -} diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httploader.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httploader.go deleted file mode 100644 index aabd1035..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/httploader.go +++ /dev/null @@ -1,151 +0,0 @@ -package caddyconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "time" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - caddy.RegisterModule(HTTPLoader{}) -} - -// HTTPLoader can load Caddy configs over HTTP(S). It can adapt the config -// based on the Content-Type header of the HTTP response. -type HTTPLoader struct { - // The method for the request. Default: GET - Method string `json:"method,omitempty"` - - // The URL of the request. - URL string `json:"url,omitempty"` - - // HTTP headers to add to the request. - Headers http.Header `json:"header,omitempty"` - - // Maximum time allowed for a complete connection and request. - Timeout caddy.Duration `json:"timeout,omitempty"` - - TLS *struct { - // Present this instance's managed remote identity credentials to the server. - UseServerIdentity bool `json:"use_server_identity,omitempty"` - - // PEM-encoded client certificate filename to present to the server. - ClientCertificateFile string `json:"client_certificate_file,omitempty"` - - // PEM-encoded key to use with the client certificate. - ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"` - - // List of PEM-encoded CA certificate files to add to the same trust - // store as RootCAPool (or root_ca_pool in the JSON). - RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"` - } `json:"tls,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (HTTPLoader) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "caddy.config_loaders.http", - New: func() caddy.Module { return new(HTTPLoader) }, - } -} - -// LoadConfig loads a Caddy config. -func (hl HTTPLoader) LoadConfig(ctx caddy.Context) ([]byte, error) { - client, err := hl.makeClient(ctx) - if err != nil { - return nil, err - } - - method := hl.Method - if method == "" { - method = http.MethodGet - } - - req, err := http.NewRequest(method, hl.URL, nil) - if err != nil { - return nil, err - } - req.Header = hl.Headers - - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("server responded with HTTP %d", resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - result, warnings, err := adaptByContentType(resp.Header.Get("Content-Type"), body) - if err != nil { - return nil, err - } - for _, warn := range warnings { - ctx.Logger(hl).Warn(warn.String()) - } - - return result, nil -} - -func (hl HTTPLoader) makeClient(ctx caddy.Context) (*http.Client, error) { - client := &http.Client{ - Timeout: time.Duration(hl.Timeout), - } - - if hl.TLS != nil { - var tlsConfig *tls.Config - - // client authentication - if hl.TLS.UseServerIdentity { - certs, err := ctx.IdentityCredentials(ctx.Logger(hl)) - if err != nil { - return nil, fmt.Errorf("getting server identity credentials: %v", err) - } - if tlsConfig == nil { - tlsConfig = new(tls.Config) - } - tlsConfig.Certificates = certs - } else if hl.TLS.ClientCertificateFile != "" && hl.TLS.ClientCertificateKeyFile != "" { - cert, err := tls.LoadX509KeyPair(hl.TLS.ClientCertificateFile, hl.TLS.ClientCertificateKeyFile) - if err != nil { - return nil, err - } - if tlsConfig == nil { - tlsConfig = new(tls.Config) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - // trusted server certs - if len(hl.TLS.RootCAPEMFiles) > 0 { - rootPool := x509.NewCertPool() - for _, pemFile := range hl.TLS.RootCAPEMFiles { - pemData, err := ioutil.ReadFile(pemFile) - if err != nil { - return nil, fmt.Errorf("failed reading ca cert: %v", err) - } - rootPool.AppendCertsFromPEM(pemData) - } - if tlsConfig == nil { - tlsConfig = new(tls.Config) - } - tlsConfig.RootCAs = rootPool - } - - client.Transport = &http.Transport{TLSClientConfig: tlsConfig} - } - - return client, nil -} - -var _ caddy.ConfigLoader = (*HTTPLoader)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/load.go b/vendor/github.com/caddyserver/caddy/v2/caddyconfig/load.go deleted file mode 100644 index 7a390d0b..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/caddyconfig/load.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyconfig - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "mime" - "net/http" - "strings" - "sync" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - caddy.RegisterModule(adminLoad{}) -} - -// adminLoad is a module that provides the /load endpoint -// for the Caddy admin API. The only reason it's not baked -// into the caddy package directly is because of the import -// of the caddyconfig package for its GetAdapter function. -// If the caddy package depends on the caddyconfig package, -// then the caddyconfig package will not be able to import -// the caddy package, and it can more easily cause backward -// edges in the dependency tree (i.e. import cycle). -// Fortunately, the admin API has first-class support for -// adding endpoints from modules. -type adminLoad struct{} - -// CaddyModule returns the Caddy module information. -func (adminLoad) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "admin.api.load", - New: func() caddy.Module { return new(adminLoad) }, - } -} - -// Routes returns a route for the /load endpoint. -func (al adminLoad) Routes() []caddy.AdminRoute { - return []caddy.AdminRoute{ - { - Pattern: "/load", - Handler: caddy.AdminHandlerFunc(al.handleLoad), - }, - } -} - -// handleLoad replaces the entire current configuration with -// a new one provided in the response body. It supports config -// adapters through the use of the Content-Type header. A -// config that is identical to the currently-running config -// will be a no-op unless Cache-Control: must-revalidate is set. -func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error { - if r.Method != http.MethodPost { - return caddy.APIError{ - HTTPStatus: http.StatusMethodNotAllowed, - Err: fmt.Errorf("method not allowed"), - } - } - - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - - _, err := io.Copy(buf, r.Body) - if err != nil { - return caddy.APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("reading request body: %v", err), - } - } - body := buf.Bytes() - - // if the config is formatted other than Caddy's native - // JSON, we need to adapt it before loading it - if ctHeader := r.Header.Get("Content-Type"); ctHeader != "" { - result, warnings, err := adaptByContentType(ctHeader, body) - if err != nil { - return caddy.APIError{ - HTTPStatus: http.StatusBadRequest, - Err: err, - } - } - if len(warnings) > 0 { - respBody, err := json.Marshal(warnings) - if err != nil { - caddy.Log().Named("admin.api.load").Error(err.Error()) - } - _, _ = w.Write(respBody) - } - body = result - } - - forceReload := r.Header.Get("Cache-Control") == "must-revalidate" - - err = caddy.Load(body, forceReload) - if err != nil { - return caddy.APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("loading config: %v", err), - } - } - - caddy.Log().Named("admin.api").Info("load complete") - - return nil -} - -// adaptByContentType adapts body to Caddy JSON using the adapter specified by contenType. -// If contentType is empty or ends with "/json", the input will be returned, as a no-op. -func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, error) { - // assume JSON as the default - if contentType == "" { - return body, nil, nil - } - - ct, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, nil, caddy.APIError{ - HTTPStatus: http.StatusBadRequest, - Err: fmt.Errorf("invalid Content-Type: %v", err), - } - } - - // if already JSON, no need to adapt - if strings.HasSuffix(ct, "/json") { - return body, nil, nil - } - - // adapter name should be suffix of MIME type - slashIdx := strings.Index(ct, "/") - if slashIdx < 0 { - return nil, nil, fmt.Errorf("malformed Content-Type") - } - - adapterName := ct[slashIdx+1:] - cfgAdapter := GetAdapter(adapterName) - if cfgAdapter == nil { - return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName) - } - - result, warnings, err := cfgAdapter.Adapt(body, nil) - if err != nil { - return nil, nil, fmt.Errorf("adapting config using %s adapter: %v", adapterName, err) - } - - return result, warnings, nil -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/commandfuncs.go b/vendor/github.com/caddyserver/caddy/v2/cmd/commandfuncs.go deleted file mode 100644 index 8f1c68c8..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/cmd/commandfuncs.go +++ /dev/null @@ -1,716 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddycmd - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "os/exec" - "runtime" - "runtime/debug" - "sort" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "go.uber.org/zap" -) - -func cmdStart(fl Flags) (int, error) { - startCmdConfigFlag := fl.String("config") - startCmdConfigAdapterFlag := fl.String("adapter") - startCmdPidfileFlag := fl.String("pidfile") - startCmdWatchFlag := fl.Bool("watch") - startCmdEnvfileFlag := fl.String("envfile") - - // open a listener to which the child process will connect when - // it is ready to confirm that it has successfully started - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("opening listener for success confirmation: %v", err) - } - defer ln.Close() - - // craft the command with a pingback address and with a - // pipe for its stdin, so we can tell it our confirmation - // code that we expect so that some random port scan at - // the most unfortunate time won't fool us into thinking - // the child succeeded (i.e. the alternative is to just - // wait for any connection on our listener, but better to - // ensure it's the process we're expecting - we can be - // sure by giving it some random bytes and having it echo - // them back to us) - cmd := exec.Command(os.Args[0], "run", "--pingback", ln.Addr().String()) - if startCmdConfigFlag != "" { - cmd.Args = append(cmd.Args, "--config", startCmdConfigFlag) - } - if startCmdEnvfileFlag != "" { - cmd.Args = append(cmd.Args, "--envfile", startCmdEnvfileFlag) - } - if startCmdConfigAdapterFlag != "" { - cmd.Args = append(cmd.Args, "--adapter", startCmdConfigAdapterFlag) - } - if startCmdWatchFlag { - cmd.Args = append(cmd.Args, "--watch") - } - if startCmdPidfileFlag != "" { - cmd.Args = append(cmd.Args, "--pidfile", startCmdPidfileFlag) - } - stdinpipe, err := cmd.StdinPipe() - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("creating stdin pipe: %v", err) - } - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - // generate the random bytes we'll send to the child process - expect := make([]byte, 32) - _, err = rand.Read(expect) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("generating random confirmation bytes: %v", err) - } - - // begin writing the confirmation bytes to the child's - // stdin; use a goroutine since the child hasn't been - // started yet, and writing synchronously would result - // in a deadlock - go func() { - _, _ = stdinpipe.Write(expect) - stdinpipe.Close() - }() - - // start the process - err = cmd.Start() - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("starting caddy process: %v", err) - } - - // there are two ways we know we're done: either - // the process will connect to our listener, or - // it will exit with an error - success, exit := make(chan struct{}), make(chan error) - - // in one goroutine, we await the success of the child process - go func() { - for { - conn, err := ln.Accept() - if err != nil { - if !errors.Is(err, net.ErrClosed) { - log.Println(err) - } - break - } - err = handlePingbackConn(conn, expect) - if err == nil { - close(success) - break - } - log.Println(err) - } - }() - - // in another goroutine, we await the failure of the child process - go func() { - err := cmd.Wait() // don't send on this line! Wait blocks, but send starts before it unblocks - exit <- err // sending on separate line ensures select won't trigger until after Wait unblocks - }() - - // when one of the goroutines unblocks, we're done and can exit - select { - case <-success: - fmt.Printf("Successfully started Caddy (pid=%d) - Caddy is running in the background\n", cmd.Process.Pid) - case err := <-exit: - return caddy.ExitCodeFailedStartup, - fmt.Errorf("caddy process exited with error: %v", err) - } - - return caddy.ExitCodeSuccess, nil -} - -func cmdRun(fl Flags) (int, error) { - caddy.TrapSignals() - - runCmdConfigFlag := fl.String("config") - runCmdConfigAdapterFlag := fl.String("adapter") - runCmdResumeFlag := fl.Bool("resume") - runCmdLoadEnvfileFlag := fl.String("envfile") - runCmdPrintEnvFlag := fl.Bool("environ") - runCmdWatchFlag := fl.Bool("watch") - runCmdPidfileFlag := fl.String("pidfile") - runCmdPingbackFlag := fl.String("pingback") - - // load all additional envs as soon as possible - if runCmdLoadEnvfileFlag != "" { - if err := loadEnvFromFile(runCmdLoadEnvfileFlag); err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("loading additional environment variables: %v", err) - } - } - - // if we are supposed to print the environment, do that first - if runCmdPrintEnvFlag { - printEnvironment() - } - - // load the config, depending on flags - var config []byte - var err error - if runCmdResumeFlag { - config, err = ioutil.ReadFile(caddy.ConfigAutosavePath) - if os.IsNotExist(err) { - // not a bad error; just can't resume if autosave file doesn't exist - caddy.Log().Info("no autosave file exists", zap.String("autosave_file", caddy.ConfigAutosavePath)) - runCmdResumeFlag = false - } else if err != nil { - return caddy.ExitCodeFailedStartup, err - } else { - if runCmdConfigFlag == "" { - caddy.Log().Info("resuming from last configuration", - zap.String("autosave_file", caddy.ConfigAutosavePath)) - } else { - // if they also specified a config file, user should be aware that we're not - // using it (doing so could lead to data/config loss by overwriting!) - caddy.Log().Warn("--config and --resume flags were used together; ignoring --config and resuming from last configuration", - zap.String("autosave_file", caddy.ConfigAutosavePath)) - } - } - } - // we don't use 'else' here since this value might have been changed in 'if' block; i.e. not mutually exclusive - var configFile string - if !runCmdResumeFlag { - config, configFile, err = loadConfig(runCmdConfigFlag, runCmdConfigAdapterFlag) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - } - - // run the initial config - err = caddy.Load(config, true) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("loading initial config: %v", err) - } - caddy.Log().Info("serving initial configuration") - - // if we are to report to another process the successful start - // of the server, do so now by echoing back contents of stdin - if runCmdPingbackFlag != "" { - confirmationBytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("reading confirmation bytes from stdin: %v", err) - } - conn, err := net.Dial("tcp", runCmdPingbackFlag) - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("dialing confirmation address: %v", err) - } - defer conn.Close() - _, err = conn.Write(confirmationBytes) - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("writing confirmation bytes to %s: %v", runCmdPingbackFlag, err) - } - } - - // if enabled, reload config file automatically on changes - // (this better only be used in dev!) - if runCmdWatchFlag { - go watchConfigFile(configFile, runCmdConfigAdapterFlag) - } - - // create pidfile - if runCmdPidfileFlag != "" { - err := caddy.PIDFile(runCmdPidfileFlag) - if err != nil { - caddy.Log().Error("unable to write PID file", - zap.String("pidfile", runCmdPidfileFlag), - zap.Error(err)) - } - } - - // warn if the environment does not provide enough information about the disk - hasXDG := os.Getenv("XDG_DATA_HOME") != "" && - os.Getenv("XDG_CONFIG_HOME") != "" && - os.Getenv("XDG_CACHE_HOME") != "" - switch runtime.GOOS { - case "windows": - if os.Getenv("HOME") == "" && os.Getenv("USERPROFILE") == "" && !hasXDG { - caddy.Log().Warn("neither HOME nor USERPROFILE environment variables are set - please fix; some assets might be stored in ./caddy") - } - case "plan9": - if os.Getenv("home") == "" && !hasXDG { - caddy.Log().Warn("$home environment variable is empty - please fix; some assets might be stored in ./caddy") - } - default: - if os.Getenv("HOME") == "" && !hasXDG { - caddy.Log().Warn("$HOME environment variable is empty - please fix; some assets might be stored in ./caddy") - } - } - - select {} -} - -func cmdStop(fl Flags) (int, error) { - stopCmdAddrFlag := fl.String("address") - - err := apiRequest(stopCmdAddrFlag, http.MethodPost, "/stop", nil, nil) - if err != nil { - caddy.Log().Warn("failed using API to stop instance", zap.Error(err)) - return caddy.ExitCodeFailedStartup, err - } - - return caddy.ExitCodeSuccess, nil -} - -func cmdReload(fl Flags) (int, error) { - reloadCmdConfigFlag := fl.String("config") - reloadCmdConfigAdapterFlag := fl.String("adapter") - reloadCmdAddrFlag := fl.String("address") - reloadCmdForceFlag := fl.Bool("force") - - // get the config in caddy's native format - config, configFile, err := loadConfig(reloadCmdConfigFlag, reloadCmdConfigAdapterFlag) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - if configFile == "" { - return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load") - } - - // get the address of the admin listener; use flag if specified - adminAddr := reloadCmdAddrFlag - if adminAddr == "" && len(config) > 0 { - var tmpStruct struct { - Admin caddy.AdminConfig `json:"admin"` - } - err = json.Unmarshal(config, &tmpStruct) - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("unmarshaling admin listener address from config: %v", err) - } - adminAddr = tmpStruct.Admin.Listen - } - - // optionally force a config reload - headers := make(http.Header) - if reloadCmdForceFlag { - headers.Set("Cache-Control", "must-revalidate") - } - - err = apiRequest(adminAddr, http.MethodPost, "/load", headers, bytes.NewReader(config)) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("sending configuration to instance: %v", err) - } - - return caddy.ExitCodeSuccess, nil -} - -func cmdVersion(_ Flags) (int, error) { - fmt.Println(CaddyVersion()) - return caddy.ExitCodeSuccess, nil -} - -func cmdBuildInfo(fl Flags) (int, error) { - bi, ok := debug.ReadBuildInfo() - if !ok { - return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information") - } - - fmt.Printf("go_version: %s\n", runtime.Version()) - fmt.Printf("go_os: %s\n", runtime.GOOS) - fmt.Printf("go_arch: %s\n", runtime.GOARCH) - fmt.Printf("path: %s\n", bi.Path) - fmt.Printf("main: %s %s %s\n", bi.Main.Path, bi.Main.Version, bi.Main.Sum) - fmt.Println("dependencies:") - - for _, goMod := range bi.Deps { - fmt.Printf("%s %s %s", goMod.Path, goMod.Version, goMod.Sum) - if goMod.Replace != nil { - fmt.Printf(" => %s %s %s", goMod.Replace.Path, goMod.Replace.Version, goMod.Replace.Sum) - } - fmt.Println() - } - return caddy.ExitCodeSuccess, nil -} - -func cmdListModules(fl Flags) (int, error) { - packages := fl.Bool("packages") - versions := fl.Bool("versions") - - printModuleInfo := func(mi moduleInfo) { - fmt.Print(mi.caddyModuleID) - if versions && mi.goModule != nil { - fmt.Print(" " + mi.goModule.Version) - } - if packages && mi.goModule != nil { - fmt.Print(" " + mi.goModule.Path) - if mi.goModule.Replace != nil { - fmt.Print(" => " + mi.goModule.Replace.Path) - } - } - if mi.err != nil { - fmt.Printf(" [%v]", mi.err) - } - fmt.Println() - } - - // organize modules by whether they come with the standard distribution - standard, nonstandard, unknown, err := getModules() - if err != nil { - // oh well, just print the module IDs and exit - for _, m := range caddy.Modules() { - fmt.Println(m) - } - return caddy.ExitCodeSuccess, nil - } - - if len(standard) > 0 { - for _, mod := range standard { - printModuleInfo(mod) - } - } - fmt.Printf("\n Standard modules: %d\n", len(standard)) - if len(nonstandard) > 0 { - if len(standard) > 0 { - fmt.Println() - } - for _, mod := range nonstandard { - printModuleInfo(mod) - } - } - fmt.Printf("\n Non-standard modules: %d\n", len(nonstandard)) - if len(unknown) > 0 { - if len(standard) > 0 || len(nonstandard) > 0 { - fmt.Println() - } - for _, mod := range unknown { - printModuleInfo(mod) - } - } - fmt.Printf("\n Unknown modules: %d\n", len(unknown)) - - return caddy.ExitCodeSuccess, nil -} - -func cmdEnviron(_ Flags) (int, error) { - printEnvironment() - return caddy.ExitCodeSuccess, nil -} - -func cmdAdaptConfig(fl Flags) (int, error) { - adaptCmdInputFlag := fl.String("config") - adaptCmdAdapterFlag := fl.String("adapter") - adaptCmdPrettyFlag := fl.Bool("pretty") - adaptCmdValidateFlag := fl.Bool("validate") - - // if no input file was specified, try a default - // Caddyfile if the Caddyfile adapter is plugged in - if adaptCmdInputFlag == "" && caddyconfig.GetAdapter("caddyfile") != nil { - _, err := os.Stat("Caddyfile") - if err == nil { - // default Caddyfile exists - adaptCmdInputFlag = "Caddyfile" - caddy.Log().Info("using adjacent Caddyfile") - } else if !os.IsNotExist(err) { - // default Caddyfile exists, but error accessing it - return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing default Caddyfile: %v", err) - } - } - - if adaptCmdInputFlag == "" { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)") - } - if adaptCmdAdapterFlag == "" { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("adapter name is required (use --adapt flag or leave unspecified for default)") - } - - cfgAdapter := caddyconfig.GetAdapter(adaptCmdAdapterFlag) - if cfgAdapter == nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("unrecognized config adapter: %s", adaptCmdAdapterFlag) - } - - input, err := ioutil.ReadFile(adaptCmdInputFlag) - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("reading input file: %v", err) - } - - opts := map[string]interface{}{"filename": adaptCmdInputFlag} - - adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - if adaptCmdPrettyFlag { - var prettyBuf bytes.Buffer - err = json.Indent(&prettyBuf, adaptedConfig, "", "\t") - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - adaptedConfig = prettyBuf.Bytes() - } - - // print result to stdout - fmt.Println(string(adaptedConfig)) - - // print warnings to stderr - for _, warn := range warnings { - msg := warn.Message - if warn.Directive != "" { - msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message) - } - fmt.Fprintf(os.Stderr, "[WARNING][%s] %s:%d: %s\n", adaptCmdAdapterFlag, warn.File, warn.Line, msg) - } - - // validate output if requested - if adaptCmdValidateFlag { - var cfg *caddy.Config - err = json.Unmarshal(adaptedConfig, &cfg) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err) - } - err = caddy.Validate(cfg) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("validation: %v", err) - } - } - - return caddy.ExitCodeSuccess, nil -} - -func cmdValidateConfig(fl Flags) (int, error) { - validateCmdConfigFlag := fl.String("config") - validateCmdAdapterFlag := fl.String("adapter") - - input, _, err := loadConfig(validateCmdConfigFlag, validateCmdAdapterFlag) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - input = caddy.RemoveMetaFields(input) - - var cfg *caddy.Config - err = json.Unmarshal(input, &cfg) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err) - } - - err = caddy.Validate(cfg) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - fmt.Println("Valid configuration") - - return caddy.ExitCodeSuccess, nil -} - -func cmdFmt(fl Flags) (int, error) { - formatCmdConfigFile := fl.Arg(0) - if formatCmdConfigFile == "" { - formatCmdConfigFile = "Caddyfile" - } - - // as a special case, read from stdin if the file name is "-" - if formatCmdConfigFile == "-" { - input, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("reading stdin: %v", err) - } - fmt.Print(string(caddyfile.Format(input))) - return caddy.ExitCodeSuccess, nil - } - - input, err := ioutil.ReadFile(formatCmdConfigFile) - if err != nil { - return caddy.ExitCodeFailedStartup, - fmt.Errorf("reading input file: %v", err) - } - - output := caddyfile.Format(input) - - if fl.Bool("overwrite") { - if err := ioutil.WriteFile(formatCmdConfigFile, output, 0600); err != nil { - return caddy.ExitCodeFailedStartup, nil - } - } else { - fmt.Print(string(output)) - } - - return caddy.ExitCodeSuccess, nil -} - -func cmdHelp(fl Flags) (int, error) { - const fullDocs = `Full documentation is available at: -https://caddyserver.com/docs/command-line` - - args := fl.Args() - if len(args) == 0 { - s := `Caddy is an extensible server platform. - -usage: - caddy [] - -commands: -` - keys := make([]string, 0, len(commands)) - for k := range commands { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - cmd := commands[k] - short := strings.TrimSuffix(cmd.Short, ".") - s += fmt.Sprintf(" %-15s %s\n", cmd.Name, short) - } - - s += "\nUse 'caddy help ' for more information about a command.\n" - s += "\n" + fullDocs + "\n" - - fmt.Print(s) - - return caddy.ExitCodeSuccess, nil - } else if len(args) > 1 { - return caddy.ExitCodeFailedStartup, fmt.Errorf("can only give help with one command") - } - - subcommand, ok := commands[args[0]] - if !ok { - return caddy.ExitCodeFailedStartup, fmt.Errorf("unknown command: %s", args[0]) - } - - helpText := strings.TrimSpace(subcommand.Long) - if helpText == "" { - helpText = subcommand.Short - if !strings.HasSuffix(helpText, ".") { - helpText += "." - } - } - - result := fmt.Sprintf("%s\n\nusage:\n caddy %s %s\n", - helpText, - subcommand.Name, - strings.TrimSpace(subcommand.Usage), - ) - - if help := flagHelp(subcommand.Flags); help != "" { - result += fmt.Sprintf("\nflags:\n%s", help) - } - - result += "\n" + fullDocs + "\n" - - fmt.Print(result) - - return caddy.ExitCodeSuccess, nil -} - -// apiRequest makes an API request to the endpoint adminAddr with the -// given HTTP method and request URI. If body is non-nil, it will be -// assumed to be Content-Type application/json. -func apiRequest(adminAddr, method, uri string, headers http.Header, body io.Reader) error { - // parse the admin address - if adminAddr == "" { - adminAddr = caddy.DefaultAdminListen - } - parsedAddr, err := caddy.ParseNetworkAddress(adminAddr) - if err != nil || parsedAddr.PortRangeSize() > 1 { - return fmt.Errorf("invalid admin address %s: %v", adminAddr, err) - } - origin := parsedAddr.JoinHostPort(0) - if parsedAddr.IsUnixNetwork() { - origin = "unixsocket" // hack so that http.NewRequest() is happy - } - - // form the request - req, err := http.NewRequest(method, "http://"+origin+uri, body) - if err != nil { - return fmt.Errorf("making request: %v", err) - } - if parsedAddr.IsUnixNetwork() { - // When listening on a unix socket, the admin endpoint doesn't - // accept any Host header because there is no host:port for - // a unix socket's address. The server's host check is fairly - // strict for security reasons, so we don't allow just any - // Host header. For unix sockets, the Host header must be - // empty. Unfortunately, Go makes it impossible to make HTTP - // requests with an empty Host header... except with this one - // weird trick. (Hopefully they don't fix it. It's already - // hard enough to use HTTP over unix sockets.) - // - // An equivalent curl command would be something like: - // $ curl --unix-socket caddy.sock http:/:$REQUEST_URI - req.URL.Host = " " - req.Host = "" - } else { - req.Header.Set("Origin", origin) - } - if body != nil { - req.Header.Set("Content-Type", "application/json") - } - for k, v := range headers { - req.Header[k] = v - } - - // make an HTTP client that dials our network type, since admin - // endpoints aren't always TCP, which is what the default transport - // expects; reuse is not of particular concern here - client := http.Client{ - Transport: &http.Transport{ - DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial(parsedAddr.Network, parsedAddr.JoinHostPort(0)) - }, - }, - } - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("performing request: %v", err) - } - defer resp.Body.Close() - - // if it didn't work, let the user know - if resp.StatusCode >= 400 { - respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*10)) - if err != nil { - return fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err) - } - return fmt.Errorf("caddy responded with error: HTTP %d: %s", resp.StatusCode, respBody) - } - - return nil -} - -type moduleInfo struct { - caddyModuleID string - goModule *debug.Module - err error -} diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/commands.go b/vendor/github.com/caddyserver/caddy/v2/cmd/commands.go deleted file mode 100644 index ccb82b11..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/cmd/commands.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddycmd - -import ( - "flag" - "regexp" -) - -// Command represents a subcommand. Name, Func, -// and Short are required. -type Command struct { - // The name of the subcommand. Must conform to the - // format described by the RegisterCommand() godoc. - // Required. - Name string - - // Func is a function that executes a subcommand using - // the parsed flags. It returns an exit code and any - // associated error. - // Required. - Func CommandFunc - - // Usage is a brief message describing the syntax of - // the subcommand's flags and args. Use [] to indicate - // optional parameters and <> to enclose literal values - // intended to be replaced by the user. Do not prefix - // the string with "caddy" or the name of the command - // since these will be prepended for you; only include - // the actual parameters for this command. - Usage string - - // Short is a one-line message explaining what the - // command does. Should not end with punctuation. - // Required. - Short string - - // Long is the full help text shown to the user. - // Will be trimmed of whitespace on both ends before - // being printed. - Long string - - // Flags is the flagset for command. - Flags *flag.FlagSet -} - -// CommandFunc is a command's function. It runs the -// command and returns the proper exit code along with -// any error that occurred. -type CommandFunc func(Flags) (int, error) - -// Commands returns a list of commands initialised by -// RegisterCommand -func Commands() map[string]Command { - return commands -} - -var commands = make(map[string]Command) - -func init() { - RegisterCommand(Command{ - Name: "help", - Func: cmdHelp, - Usage: "", - Short: "Shows help for a Caddy subcommand", - }) - - RegisterCommand(Command{ - Name: "start", - Func: cmdStart, - Usage: "[--config [--adapter ]] [--envfile ] [--watch] [--pidfile ]", - Short: "Starts the Caddy process in the background and then returns", - Long: ` -Starts the Caddy process, optionally bootstrapped with an initial config file. -This command unblocks after the server starts running or fails to run. - -If --envfile is specified, an environment file with environment variables in -the KEY=VALUE format will be loaded into the Caddy process. - -On Windows, the spawned child process will remain attached to the terminal, so -closing the window will forcefully stop Caddy; to avoid forgetting this, try -using 'caddy run' instead to keep it in the foreground.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("start", flag.ExitOnError) - fs.String("config", "", "Configuration file") - fs.String("envfile", "", "Environment file to load") - fs.String("adapter", "", "Name of config adapter to apply") - fs.String("pidfile", "", "Path of file to which to write process ID") - fs.Bool("watch", false, "Reload changed config file automatically") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "run", - Func: cmdRun, - Usage: "[--config [--adapter ]] [--envfile ] [--environ] [--resume] [--watch] [--pidfile ]", - Short: `Starts the Caddy process and blocks indefinitely`, - Long: ` -Starts the Caddy process, optionally bootstrapped with an initial config file, -and blocks indefinitely until the server is stopped; i.e. runs Caddy in -"daemon" mode (foreground). - -If a config file is specified, it will be applied immediately after the process -is running. If the config file is not in Caddy's native JSON format, you can -specify an adapter with --adapter to adapt the given config file to -Caddy's native format. The config adapter must be a registered module. Any -warnings will be printed to the log, but beware that any adaptation without -errors will immediately be used. If you want to review the results of the -adaptation first, use the 'adapt' subcommand. - -As a special case, if the current working directory has a file called -"Caddyfile" and the caddyfile config adapter is plugged in (default), then -that file will be loaded and used to configure Caddy, even without any command -line flags. - -If --envfile is specified, an environment file with environment variables in -the KEY=VALUE format will be loaded into the Caddy process. - -If --environ is specified, the environment as seen by the Caddy process will -be printed before starting. This is the same as the environ command but does -not quit after printing, and can be useful for troubleshooting. - -The --resume flag will override the --config flag if there is a config auto- -save file. It is not an error if --resume is used and no autosave file exists. - -If --watch is specified, the config file will be loaded automatically after -changes. âš ï¸ This is dangerous in production! Only use this option in a local -development environment.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("run", flag.ExitOnError) - fs.String("config", "", "Configuration file") - fs.String("adapter", "", "Name of config adapter to apply") - fs.String("envfile", "", "Environment file to load") - fs.Bool("environ", false, "Print environment") - fs.Bool("resume", false, "Use saved config, if any (and prefer over --config file)") - fs.Bool("watch", false, "Watch config file for changes and reload it automatically") - fs.String("pidfile", "", "Path of file to which to write process ID") - fs.String("pingback", "", "Echo confirmation bytes to this address on success") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "stop", - Func: cmdStop, - Short: "Gracefully stops a started Caddy process", - Long: ` -Stops the background Caddy process as gracefully as possible. - -It requires that the admin API is enabled and accessible, since it will -use the API's /stop endpoint. The address of this request can be -customized using the --address flag if it is not the default.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("stop", flag.ExitOnError) - fs.String("address", "", "The address to use to reach the admin API endpoint, if not the default") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "reload", - Func: cmdReload, - Usage: "--config [--adapter ] [--address ]", - Short: "Changes the config of the running Caddy instance", - Long: ` -Gives the running Caddy instance a new configuration. This has the same effect -as POSTing a document to the /load API endpoint, but is convenient for simple -workflows revolving around config files. - -Since the admin endpoint is configurable, the endpoint configuration is loaded -from the --address flag if specified; otherwise it is loaded from the given -config file; otherwise the default is assumed.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("reload", flag.ExitOnError) - fs.String("config", "", "Configuration file (required)") - fs.String("adapter", "", "Name of config adapter to apply") - fs.String("address", "", "Address of the administration listener, if different from config") - fs.Bool("force", false, "Force config reload, even if it is the same") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "version", - Func: cmdVersion, - Short: "Prints the version", - }) - - RegisterCommand(Command{ - Name: "list-modules", - Func: cmdListModules, - Usage: "[--packages] [--versions]", - Short: "Lists the installed Caddy modules", - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("list-modules", flag.ExitOnError) - fs.Bool("packages", false, "Print package paths") - fs.Bool("versions", false, "Print version information") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "build-info", - Func: cmdBuildInfo, - Short: "Prints information about this build", - }) - - RegisterCommand(Command{ - Name: "environ", - Func: cmdEnviron, - Short: "Prints the environment", - }) - - RegisterCommand(Command{ - Name: "adapt", - Func: cmdAdaptConfig, - Usage: "--config [--adapter ] [--pretty] [--validate]", - Short: "Adapts a configuration to Caddy's native JSON", - Long: ` -Adapts a configuration to Caddy's native JSON format and writes the -output to stdout, along with any warnings to stderr. - -If --pretty is specified, the output will be formatted with indentation -for human readability. - -If --validate is used, the adapted config will be checked for validity. -If the config is invalid, an error will be printed to stderr and a non- -zero exit status will be returned.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("adapt", flag.ExitOnError) - fs.String("config", "", "Configuration file to adapt (required)") - fs.String("adapter", "caddyfile", "Name of config adapter") - fs.Bool("pretty", false, "Format the output for human readability") - fs.Bool("validate", false, "Validate the output") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "validate", - Func: cmdValidateConfig, - Usage: "--config [--adapter ]", - Short: "Tests whether a configuration file is valid", - Long: ` -Loads and provisions the provided config, but does not start running it. -This reveals any errors with the configuration through the loading and -provisioning stages.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("load", flag.ExitOnError) - fs.String("config", "", "Input configuration file") - fs.String("adapter", "", "Name of config adapter") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "fmt", - Func: cmdFmt, - Usage: "[--overwrite] []", - Short: "Formats a Caddyfile", - Long: ` -Formats the Caddyfile by adding proper indentation and spaces to improve -human readability. It prints the result to stdout. - -If --overwrite is specified, the output will be written to the config file -directly instead of printing it. - -If you wish you use stdin instead of a regular file, use - as the path. -When reading from stdin, the --overwrite flag has no effect: the result -is always printed to stdout.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("format", flag.ExitOnError) - fs.Bool("overwrite", false, "Overwrite the input file with the results") - return fs - }(), - }) - - RegisterCommand(Command{ - Name: "upgrade", - Func: cmdUpgrade, - Short: "Upgrade Caddy (EXPERIMENTAL)", - Long: ` -Downloads an updated Caddy binary with the same modules/plugins at the -latest versions. EXPERIMENTAL: May be changed or removed.`, - }) - - RegisterCommand(Command{ - Name: "add-package", - Func: cmdAddPackage, - Usage: "", - Short: "Adds Caddy packages (EXPERIMENTAL)", - Long: ` -Downloads an updated Caddy binary with the specified packages (module/plugin) -added. Retains existing packages. Returns an error if the any of packages are -already included. EXPERIMENTAL: May be changed or removed. -`, - }) - - RegisterCommand(Command{ - Name: "remove-package", - Func: cmdRemovePackage, - Usage: "", - Short: "Removes Caddy packages (EXPERIMENTAL)", - Long: ` -Downloads an updated Caddy binaries without the specified packages (module/plugin). -Returns an error if any of the packages are not included. -EXPERIMENTAL: May be changed or removed. -`, - }) - -} - -// RegisterCommand registers the command cmd. -// cmd.Name must be unique and conform to the -// following format: -// -// - lowercase -// - alphanumeric and hyphen characters only -// - cannot start or end with a hyphen -// - hyphen cannot be adjacent to another hyphen -// -// This function panics if the name is already registered, -// if the name does not meet the described format, or if -// any of the fields are missing from cmd. -// -// This function should be used in init(). -func RegisterCommand(cmd Command) { - if cmd.Name == "" { - panic("command name is required") - } - if cmd.Func == nil { - panic("command function missing") - } - if cmd.Short == "" { - panic("command short string is required") - } - if _, exists := commands[cmd.Name]; exists { - panic("command already registered: " + cmd.Name) - } - if !commandNameRegex.MatchString(cmd.Name) { - panic("invalid command name") - } - commands[cmd.Name] = cmd -} - -var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`) diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/main.go b/vendor/github.com/caddyserver/caddy/v2/cmd/main.go deleted file mode 100644 index 8a9a2896..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/cmd/main.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddycmd - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - "runtime" - "runtime/debug" - "strconv" - "strings" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/certmagic" - "go.uber.org/zap" -) - -func init() { - // set a fitting User-Agent for ACME requests - goModule := caddy.GoModule() - cleanModVersion := strings.TrimPrefix(goModule.Version, "v") - certmagic.UserAgent = "Caddy/" + cleanModVersion - - // by using Caddy, user indicates agreement to CA terms - // (very important, or ACME account creation will fail!) - certmagic.DefaultACME.Agreed = true -} - -// Main implements the main function of the caddy command. -// Call this if Caddy is to be the main() of your program. -func Main() { - switch len(os.Args) { - case 0: - fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n") - os.Exit(caddy.ExitCodeFailedStartup) - case 1: - os.Args = append(os.Args, "help") - } - - subcommandName := os.Args[1] - subcommand, ok := commands[subcommandName] - if !ok { - if strings.HasPrefix(os.Args[1], "-") { - // user probably forgot to type the subcommand - fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'") - } else { - fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1]) - } - os.Exit(caddy.ExitCodeFailedStartup) - } - - fs := subcommand.Flags - if fs == nil { - fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError) - } - - err := fs.Parse(os.Args[2:]) - if err != nil { - fmt.Println(err) - os.Exit(caddy.ExitCodeFailedStartup) - } - - exitCode, err := subcommand.Func(Flags{fs}) - if err != nil { - fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err) - } - - os.Exit(exitCode) -} - -// handlePingbackConn reads from conn and ensures it matches -// the bytes in expect, or returns an error if it doesn't. -func handlePingbackConn(conn net.Conn, expect []byte) error { - defer conn.Close() - confirmationBytes, err := ioutil.ReadAll(io.LimitReader(conn, 32)) - if err != nil { - return err - } - if !bytes.Equal(confirmationBytes, expect) { - return fmt.Errorf("wrong confirmation: %x", confirmationBytes) - } - return nil -} - -// loadConfig loads the config from configFile and adapts it -// using adapterName. If adapterName is specified, configFile -// must be also. If no configFile is specified, it tries -// loading a default config file. The lack of a config file is -// not treated as an error, but false will be returned if -// there is no config available. It prints any warnings to stderr, -// and returns the resulting JSON config bytes along with -// whether a config file was loaded or not. -func loadConfig(configFile, adapterName string) ([]byte, string, error) { - // specifying an adapter without a config file is ambiguous - if adapterName != "" && configFile == "" { - return nil, "", fmt.Errorf("cannot adapt config without config file (use --config)") - } - - // load initial config and adapter - var config []byte - var cfgAdapter caddyconfig.Adapter - var err error - if configFile != "" { - if configFile == "-" { - config, err = ioutil.ReadAll(os.Stdin) - } else { - config, err = ioutil.ReadFile(configFile) - } - if err != nil { - return nil, "", fmt.Errorf("reading config file: %v", err) - } - caddy.Log().Info("using provided configuration", - zap.String("config_file", configFile), - zap.String("config_adapter", adapterName)) - } else if adapterName == "" { - // as a special case when no config file or adapter - // is specified, see if the Caddyfile adapter is - // plugged in, and if so, try using a default Caddyfile - cfgAdapter = caddyconfig.GetAdapter("caddyfile") - if cfgAdapter != nil { - config, err = ioutil.ReadFile("Caddyfile") - if os.IsNotExist(err) { - // okay, no default Caddyfile; pretend like this never happened - cfgAdapter = nil - } else if err != nil { - // default Caddyfile exists, but error reading it - return nil, "", fmt.Errorf("reading default Caddyfile: %v", err) - } else { - // success reading default Caddyfile - configFile = "Caddyfile" - caddy.Log().Info("using adjacent Caddyfile") - } - } - } - - // as a special case, if a config file called "Caddyfile" was - // specified, and no adapter is specified, assume caddyfile adapter - // for convenience - if strings.HasPrefix(filepath.Base(configFile), "Caddyfile") && - filepath.Ext(configFile) != ".json" && - adapterName == "" { - adapterName = "caddyfile" - } - - // load config adapter - if adapterName != "" { - cfgAdapter = caddyconfig.GetAdapter(adapterName) - if cfgAdapter == nil { - return nil, "", fmt.Errorf("unrecognized config adapter: %s", adapterName) - } - } - - // adapt config - if cfgAdapter != nil { - adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{ - "filename": configFile, - }) - if err != nil { - return nil, "", fmt.Errorf("adapting config using %s: %v", adapterName, err) - } - for _, warn := range warnings { - msg := warn.Message - if warn.Directive != "" { - msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message) - } - caddy.Log().Warn(msg, zap.String("adapter", adapterName), zap.String("file", warn.File), zap.Int("line", warn.Line)) - } - config = adaptedConfig - } - - return config, configFile, nil -} - -// watchConfigFile watches the config file at filename for changes -// and reloads the config if the file was updated. This function -// blocks indefinitely; it only quits if the poller has errors for -// long enough time. The filename passed in must be the actual -// config file used, not one to be discovered. -func watchConfigFile(filename, adapterName string) { - defer func() { - if err := recover(); err != nil { - log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack()) - } - }() - - // make our logger; since config reloads can change the - // default logger, we need to get it dynamically each time - logger := func() *zap.Logger { - return caddy.Log(). - Named("watcher"). - With(zap.String("config_file", filename)) - } - - // get the initial timestamp on the config file - info, err := os.Stat(filename) - if err != nil { - logger().Error("cannot watch config file", zap.Error(err)) - return - } - lastModified := info.ModTime() - - logger().Info("watching config file for changes") - - // if the file disappears or something, we can - // stop polling if the error lasts long enough - var lastErr time.Time - finalError := func(err error) bool { - if lastErr.IsZero() { - lastErr = time.Now() - return false - } - if time.Since(lastErr) > 30*time.Second { - logger().Error("giving up watching config file; too many errors", - zap.Error(err)) - return true - } - return false - } - - // begin poller - //nolint:staticcheck - for range time.Tick(1 * time.Second) { - // get the file info - info, err := os.Stat(filename) - if err != nil { - if finalError(err) { - return - } - continue - } - lastErr = time.Time{} // no error, so clear any memory of one - - // if it hasn't changed, nothing to do - if !info.ModTime().After(lastModified) { - continue - } - - logger().Info("config file changed; reloading") - - // remember this timestamp - lastModified = info.ModTime() - - // load the contents of the file - config, _, err := loadConfig(filename, adapterName) - if err != nil { - logger().Error("unable to load latest config", zap.Error(err)) - continue - } - - // apply the updated config - err = caddy.Load(config, false) - if err != nil { - logger().Error("applying latest config", zap.Error(err)) - continue - } - } -} - -// Flags wraps a FlagSet so that typed values -// from flags can be easily retrieved. -type Flags struct { - *flag.FlagSet -} - -// String returns the string representation of the -// flag given by name. It panics if the flag is not -// in the flag set. -func (f Flags) String(name string) string { - return f.FlagSet.Lookup(name).Value.String() -} - -// Bool returns the boolean representation of the -// flag given by name. It returns false if the flag -// is not a boolean type. It panics if the flag is -// not in the flag set. -func (f Flags) Bool(name string) bool { - val, _ := strconv.ParseBool(f.String(name)) - return val -} - -// Int returns the integer representation of the -// flag given by name. It returns 0 if the flag -// is not an integer type. It panics if the flag is -// not in the flag set. -func (f Flags) Int(name string) int { - val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize) - return int(val) -} - -// Float64 returns the float64 representation of the -// flag given by name. It returns false if the flag -// is not a float64 type. It panics if the flag is -// not in the flag set. -func (f Flags) Float64(name string) float64 { - val, _ := strconv.ParseFloat(f.String(name), 64) - return val -} - -// Duration returns the duration representation of the -// flag given by name. It returns false if the flag -// is not a duration type. It panics if the flag is -// not in the flag set. -func (f Flags) Duration(name string) time.Duration { - val, _ := caddy.ParseDuration(f.String(name)) - return val -} - -// flagHelp returns the help text for fs. -func flagHelp(fs *flag.FlagSet) string { - if fs == nil { - return "" - } - - // temporarily redirect output - out := fs.Output() - defer fs.SetOutput(out) - - buf := new(bytes.Buffer) - fs.SetOutput(buf) - fs.PrintDefaults() - return buf.String() -} - -func loadEnvFromFile(envFile string) error { - file, err := os.Open(envFile) - if err != nil { - return fmt.Errorf("reading environment file: %v", err) - } - defer file.Close() - - envMap, err := parseEnvFile(file) - if err != nil { - return fmt.Errorf("parsing environment file: %v", err) - } - - for k, v := range envMap { - if err := os.Setenv(k, v); err != nil { - return fmt.Errorf("setting environment variables: %v", err) - } - } - - // Update the storage paths to ensure they have the proper - // value after loading a specified env file. - caddy.ConfigAutosavePath = filepath.Join(caddy.AppConfigDir(), "autosave.json") - caddy.DefaultStorage = &certmagic.FileStorage{Path: caddy.AppDataDir()} - - return nil -} - -func parseEnvFile(envInput io.Reader) (map[string]string, error) { - envMap := make(map[string]string) - - scanner := bufio.NewScanner(envInput) - var line string - lineNumber := 0 - - for scanner.Scan() { - line = strings.TrimSpace(scanner.Text()) - lineNumber++ - - // skip lines starting with comment - if strings.HasPrefix(line, "#") { - continue - } - - // skip empty line - if len(line) == 0 { - continue - } - - fields := strings.SplitN(line, "=", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber) - } - - if strings.Contains(fields[0], " ") { - return nil, fmt.Errorf("bad key on line %d: contains whitespace", lineNumber) - } - - key := fields[0] - val := fields[1] - - if key == "" { - return nil, fmt.Errorf("missing or empty key on line %d", lineNumber) - } - envMap[key] = val - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - return envMap, nil -} - -func printEnvironment() { - fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir()) - fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir()) - fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir()) - fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath) - fmt.Printf("caddy.Version=%s\n", CaddyVersion()) - fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS) - fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH) - fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler) - fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU()) - fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0)) - fmt.Printf("runtime.Version=%s\n", runtime.Version()) - cwd, err := os.Getwd() - if err != nil { - cwd = fmt.Sprintf("", err) - } - fmt.Printf("os.Getwd=%s\n\n", cwd) - for _, v := range os.Environ() { - fmt.Println(v) - } -} - -// CaddyVersion returns a detailed version string, if available. -func CaddyVersion() string { - goModule := caddy.GoModule() - ver := goModule.Version - if goModule.Sum != "" { - ver += " " + goModule.Sum - } - if goModule.Replace != nil { - ver += " => " + goModule.Replace.Path - if goModule.Replace.Version != "" { - ver += "@" + goModule.Replace.Version - } - if goModule.Replace.Sum != "" { - ver += " " + goModule.Replace.Sum - } - } - return ver -} diff --git a/vendor/github.com/caddyserver/caddy/v2/cmd/packagesfuncs.go b/vendor/github.com/caddyserver/caddy/v2/cmd/packagesfuncs.go deleted file mode 100644 index 6aaf52bf..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/cmd/packagesfuncs.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddycmd - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "os" - "os/exec" - "reflect" - "runtime" - "runtime/debug" - "strings" - - "github.com/caddyserver/caddy/v2" - "go.uber.org/zap" -) - -func cmdUpgrade(_ Flags) (int, error) { - _, nonstandard, _, err := getModules() - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err) - } - pluginPkgs, err := getPluginPackages(nonstandard) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - return upgradeBuild(pluginPkgs) -} - -func cmdAddPackage(fl Flags) (int, error) { - if len(fl.Args()) == 0 { - return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified") - } - _, nonstandard, _, err := getModules() - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err) - } - pluginPkgs, err := getPluginPackages(nonstandard) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - for _, arg := range fl.Args() { - if _, ok := pluginPkgs[arg]; ok { - return caddy.ExitCodeFailedStartup, fmt.Errorf("package is already added") - } - pluginPkgs[arg] = struct{}{} - } - - return upgradeBuild(pluginPkgs) -} - -func cmdRemovePackage(fl Flags) (int, error) { - if len(fl.Args()) == 0 { - return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified") - } - _, nonstandard, _, err := getModules() - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err) - } - pluginPkgs, err := getPluginPackages(nonstandard) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - for _, arg := range fl.Args() { - if _, ok := pluginPkgs[arg]; !ok { - // package does not exist - return caddy.ExitCodeFailedStartup, fmt.Errorf("package is not added") - } - delete(pluginPkgs, arg) - } - - return upgradeBuild(pluginPkgs) -} - -func upgradeBuild(pluginPkgs map[string]struct{}) (int, error) { - l := caddy.Log() - - thisExecPath, err := os.Executable() - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("determining current executable path: %v", err) - } - thisExecStat, err := os.Stat(thisExecPath) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("retrieving current executable permission bits: %v", err) - } - l.Info("this executable will be replaced", zap.String("path", thisExecPath)) - - // build the request URL to download this custom build - qs := url.Values{ - "os": {runtime.GOOS}, - "arch": {runtime.GOARCH}, - } - for pkg := range pluginPkgs { - qs.Add("p", pkg) - } - - // initiate the build - resp, err := downloadBuild(qs) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("download failed: %v", err) - } - defer resp.Body.Close() - - // back up the current binary, in case something goes wrong we can replace it - backupExecPath := thisExecPath + ".tmp" - l.Info("build acquired; backing up current executable", - zap.String("current_path", thisExecPath), - zap.String("backup_path", backupExecPath)) - err = os.Rename(thisExecPath, backupExecPath) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("backing up current binary: %v", err) - } - defer func() { - if err != nil { - err2 := os.Rename(backupExecPath, thisExecPath) - if err2 != nil { - l.Error("restoring original executable failed; will need to be restored manually", - zap.String("backup_path", backupExecPath), - zap.String("original_path", thisExecPath), - zap.Error(err2)) - } - } - }() - - // download the file; do this in a closure to close reliably before we execute it - err = writeCaddyBinary(thisExecPath, &resp.Body, thisExecStat) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - l.Info("download successful; displaying new binary details", zap.String("location", thisExecPath)) - - // use the new binary to print out version and module info - fmt.Print("\nModule versions:\n\n") - if err = listModules(thisExecPath); err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute: %v", err) - } - fmt.Println("\nVersion:") - if err = showVersion(thisExecPath); err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute: %v", err) - } - fmt.Println() - - // clean up the backup file - if err = os.Remove(backupExecPath); err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to clean up backup binary: %v", err) - } - l.Info("upgrade successful; please restart any running Caddy instances", zap.String("executable", thisExecPath)) - - return caddy.ExitCodeSuccess, nil -} - -func getModules() (standard, nonstandard, unknown []moduleInfo, err error) { - bi, ok := debug.ReadBuildInfo() - if !ok { - err = fmt.Errorf("no build info") - return - } - - for _, modID := range caddy.Modules() { - modInfo, err := caddy.GetModule(modID) - if err != nil { - // that's weird, shouldn't happen - unknown = append(unknown, moduleInfo{caddyModuleID: modID, err: err}) - continue - } - - // to get the Caddy plugin's version info, we need to know - // the package that the Caddy module's value comes from; we - // can use reflection but we need a non-pointer value (I'm - // not sure why), and since New() should return a pointer - // value, we need to dereference it first - iface := interface{}(modInfo.New()) - if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr { - iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface() - } - modPkgPath := reflect.TypeOf(iface).PkgPath() - - // now we find the Go module that the Caddy module's package - // belongs to; we assume the Caddy module package path will - // be prefixed by its Go module path, and we will choose the - // longest matching prefix in case there are nested modules - var matched *debug.Module - for _, dep := range bi.Deps { - if strings.HasPrefix(modPkgPath, dep.Path) { - if matched == nil || len(dep.Path) > len(matched.Path) { - matched = dep - } - } - } - - caddyModGoMod := moduleInfo{caddyModuleID: modID, goModule: matched} - - if strings.HasPrefix(modPkgPath, caddy.ImportPath) { - standard = append(standard, caddyModGoMod) - } else { - nonstandard = append(nonstandard, caddyModGoMod) - } - } - return -} - -func listModules(path string) error { - cmd := exec.Command(path, "list-modules", "--versions") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - return fmt.Errorf("download succeeded, but unable to execute: %v", err) - } - return nil -} - -func showVersion(path string) error { - cmd := exec.Command(path, "version") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - return fmt.Errorf("download succeeded, but unable to execute: %v", err) - } - return nil -} - -func downloadBuild(qs url.Values) (*http.Response, error) { - l := caddy.Log() - l.Info("requesting build", - zap.String("os", qs.Get("os")), - zap.String("arch", qs.Get("arch")), - zap.Strings("packages", qs["p"])) - resp, err := http.Get(fmt.Sprintf("%s?%s", downloadPath, qs.Encode())) - if err != nil { - return nil, fmt.Errorf("secure request failed: %v", err) - } - if resp.StatusCode >= 400 { - var details struct { - StatusCode int `json:"status_code"` - Error struct { - Message string `json:"message"` - ID string `json:"id"` - } `json:"error"` - } - err2 := json.NewDecoder(resp.Body).Decode(&details) - if err2 != nil { - return nil, fmt.Errorf("download and error decoding failed: HTTP %d: %v", resp.StatusCode, err2) - } - return nil, fmt.Errorf("download failed: HTTP %d: %s (id=%s)", resp.StatusCode, details.Error.Message, details.Error.ID) - } - return resp, nil -} - -func getPluginPackages(modules []moduleInfo) (map[string]struct{}, error) { - pluginPkgs := make(map[string]struct{}) - for _, mod := range modules { - if mod.goModule.Replace != nil { - return nil, fmt.Errorf("cannot auto-upgrade when Go module has been replaced: %s => %s", - mod.goModule.Path, mod.goModule.Replace.Path) - } - pluginPkgs[mod.goModule.Path] = struct{}{} - } - return pluginPkgs, nil -} - -func writeCaddyBinary(path string, body *io.ReadCloser, fileInfo os.FileInfo) error { - l := caddy.Log() - destFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileInfo.Mode()) - if err != nil { - return fmt.Errorf("unable to open destination file: %v", err) - } - defer destFile.Close() - - l.Info("downloading binary", zap.String("destination", path)) - - _, err = io.Copy(destFile, *body) - if err != nil { - return fmt.Errorf("unable to download file: %v", err) - } - - err = destFile.Sync() - if err != nil { - return fmt.Errorf("syncing downloaded file to device: %v", err) - } - - return nil -} - -const downloadPath = "https://caddyserver.com/api/download" diff --git a/vendor/github.com/caddyserver/caddy/v2/context.go b/vendor/github.com/caddyserver/caddy/v2/context.go deleted file mode 100644 index a6386aa8..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/context.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "context" - "encoding/json" - "fmt" - "log" - "reflect" - - "github.com/caddyserver/certmagic" - "go.uber.org/zap" -) - -// Context is a type which defines the lifetime of modules that -// are loaded and provides access to the parent configuration -// that spawned the modules which are loaded. It should be used -// with care and wrapped with derivation functions from the -// standard context package only if you don't need the Caddy -// specific features. These contexts are canceled when the -// lifetime of the modules loaded from it is over. -// -// Use NewContext() to get a valid value (but most modules will -// not actually need to do this). -type Context struct { - context.Context - moduleInstances map[string][]interface{} - cfg *Config - cleanupFuncs []func() -} - -// NewContext provides a new context derived from the given -// context ctx. Normally, you will not need to call this -// function unless you are loading modules which have a -// different lifespan than the ones for the context the -// module was provisioned with. Be sure to call the cancel -// func when the context is to be cleaned up so that -// modules which are loaded will be properly unloaded. -// See standard library context package's documentation. -func NewContext(ctx Context) (Context, context.CancelFunc) { - newCtx := Context{moduleInstances: make(map[string][]interface{}), cfg: ctx.cfg} - c, cancel := context.WithCancel(ctx.Context) - wrappedCancel := func() { - cancel() - - for _, f := range ctx.cleanupFuncs { - f() - } - - for modName, modInstances := range newCtx.moduleInstances { - for _, inst := range modInstances { - if cu, ok := inst.(CleanerUpper); ok { - err := cu.Cleanup() - if err != nil { - log.Printf("[ERROR] %s (%p): cleanup: %v", modName, inst, err) - } - } - } - } - } - newCtx.Context = c - return newCtx, wrappedCancel -} - -// OnCancel executes f when ctx is canceled. -func (ctx *Context) OnCancel(f func()) { - ctx.cleanupFuncs = append(ctx.cleanupFuncs, f) -} - -// LoadModule loads the Caddy module(s) from the specified field of the parent struct -// pointer and returns the loaded module(s). The struct pointer and its field name as -// a string are necessary so that reflection can be used to read the struct tag on the -// field to get the module namespace and inline module name key (if specified). -// -// The field can be any one of the supported raw module types: json.RawMessage, -// []json.RawMessage, map[string]json.RawMessage, or []map[string]json.RawMessage. -// ModuleMap may be used in place of map[string]json.RawMessage. The return value's -// underlying type mirrors the input field's type: -// -// json.RawMessage => interface{} -// []json.RawMessage => []interface{} -// [][]json.RawMessage => [][]interface{} -// map[string]json.RawMessage => map[string]interface{} -// []map[string]json.RawMessage => []map[string]interface{} -// -// The field must have a "caddy" struct tag in this format: -// -// caddy:"key1=val1 key2=val2" -// -// To load modules, a "namespace" key is required. For example, to load modules -// in the "http.handlers" namespace, you'd put: `namespace=http.handlers` in the -// Caddy struct tag. -// -// The module name must also be available. If the field type is a map or slice of maps, -// then key is assumed to be the module name if an "inline_key" is NOT specified in the -// caddy struct tag. In this case, the module name does NOT need to be specified in-line -// with the module itself. -// -// If not a map, or if inline_key is non-empty, then the module name must be embedded -// into the values, which must be objects; then there must be a key in those objects -// where its associated value is the module name. This is called the "inline key", -// meaning the key containing the module's name that is defined inline with the module -// itself. You must specify the inline key in a struct tag, along with the namespace: -// -// caddy:"namespace=http.handlers inline_key=handler" -// -// This will look for a key/value pair like `"handler": "..."` in the json.RawMessage -// in order to know the module name. -// -// To make use of the loaded module(s) (the return value), you will probably want -// to type-assert each interface{} value(s) to the types that are useful to you -// and store them on the same struct. Storing them on the same struct makes for -// easy garbage collection when your host module is no longer needed. -// -// Loaded modules have already been provisioned and validated. Upon returning -// successfully, this method clears the json.RawMessage(s) in the field since -// the raw JSON is no longer needed, and this allows the GC to free up memory. -func (ctx Context) LoadModule(structPointer interface{}, fieldName string) (interface{}, error) { - val := reflect.ValueOf(structPointer).Elem().FieldByName(fieldName) - typ := val.Type() - - field, ok := reflect.TypeOf(structPointer).Elem().FieldByName(fieldName) - if !ok { - panic(fmt.Sprintf("field %s does not exist in %#v", fieldName, structPointer)) - } - - opts, err := ParseStructTag(field.Tag.Get("caddy")) - if err != nil { - panic(fmt.Sprintf("malformed tag on field %s: %v", fieldName, err)) - } - - moduleNamespace, ok := opts["namespace"] - if !ok { - panic(fmt.Sprintf("missing 'namespace' key in struct tag on field %s", fieldName)) - } - inlineModuleKey := opts["inline_key"] - - var result interface{} - - switch val.Kind() { - case reflect.Slice: - if isJSONRawMessage(typ) { - // val is `json.RawMessage` ([]uint8 under the hood) - - if inlineModuleKey == "" { - panic("unable to determine module name without inline_key when type is not a ModuleMap") - } - val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Interface().(json.RawMessage)) - if err != nil { - return nil, err - } - result = val - - } else if isJSONRawMessage(typ.Elem()) { - // val is `[]json.RawMessage` - - if inlineModuleKey == "" { - panic("unable to determine module name without inline_key because type is not a ModuleMap") - } - var all []interface{} - for i := 0; i < val.Len(); i++ { - val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Index(i).Interface().(json.RawMessage)) - if err != nil { - return nil, fmt.Errorf("position %d: %v", i, err) - } - all = append(all, val) - } - result = all - - } else if typ.Elem().Kind() == reflect.Slice && isJSONRawMessage(typ.Elem().Elem()) { - // val is `[][]json.RawMessage` - - if inlineModuleKey == "" { - panic("unable to determine module name without inline_key because type is not a ModuleMap") - } - var all [][]interface{} - for i := 0; i < val.Len(); i++ { - innerVal := val.Index(i) - var allInner []interface{} - for j := 0; j < innerVal.Len(); j++ { - innerInnerVal, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, innerVal.Index(j).Interface().(json.RawMessage)) - if err != nil { - return nil, fmt.Errorf("position %d: %v", j, err) - } - allInner = append(allInner, innerInnerVal) - } - all = append(all, allInner) - } - result = all - - } else if isModuleMapType(typ.Elem()) { - // val is `[]map[string]json.RawMessage` - - var all []map[string]interface{} - for i := 0; i < val.Len(); i++ { - thisSet, err := ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val.Index(i)) - if err != nil { - return nil, err - } - all = append(all, thisSet) - } - result = all - } - - case reflect.Map: - // val is a ModuleMap or some other kind of map - result, err = ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val) - if err != nil { - return nil, err - } - - default: - return nil, fmt.Errorf("unrecognized type for module: %s", typ) - } - - // we're done with the raw bytes; allow GC to deallocate - val.Set(reflect.Zero(typ)) - - return result, nil -} - -// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]interface{}. -// Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module -// name) or as a regular map (key is not the module name, and module name is defined inline). -func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) { - // if no inline_key is specified, then val must be a ModuleMap, - // where the key is the module name - if inlineModuleKey == "" { - if !isModuleMapType(val.Type()) { - panic(fmt.Sprintf("expected ModuleMap because inline_key is empty; but we do not recognize this type: %s", val.Type())) - } - return ctx.loadModuleMap(namespace, val) - } - - // otherwise, val is a map with modules, but the module name is - // inline with each value (the key means something else) - return ctx.loadModulesFromRegularMap(namespace, inlineModuleKey, val) -} - -// loadModulesFromRegularMap loads modules from val, where val is a map[string]json.RawMessage. -// Map keys are NOT interpreted as module names, so module names are still expected to appear -// inline with the objects. -func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]interface{}, error) { - mods := make(map[string]interface{}) - iter := val.MapRange() - for iter.Next() { - k := iter.Key() - v := iter.Value() - mod, err := ctx.loadModuleInline(inlineModuleKey, namespace, v.Interface().(json.RawMessage)) - if err != nil { - return nil, fmt.Errorf("key %s: %v", k, err) - } - mods[k.String()] = mod - } - return mods, nil -} - -// loadModuleMap loads modules from a ModuleMap, i.e. map[string]interface{}, where the key is the -// module name. With a module map, module names do not need to be defined inline with their values. -func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]interface{}, error) { - all := make(map[string]interface{}) - iter := val.MapRange() - for iter.Next() { - k := iter.Key().Interface().(string) - v := iter.Value().Interface().(json.RawMessage) - moduleName := namespace + "." + k - if namespace == "" { - moduleName = k - } - val, err := ctx.LoadModuleByID(moduleName, v) - if err != nil { - return nil, fmt.Errorf("module name '%s': %v", k, err) - } - all[k] = val - } - return all, nil -} - -// LoadModuleByID decodes rawMsg into a new instance of mod and -// returns the value. If mod.New is nil, an error is returned. -// If the module implements Validator or Provisioner interfaces, -// those methods are invoked to ensure the module is fully -// configured and valid before being used. -// -// This is a lower-level method and will usually not be called -// directly by most modules. However, this method is useful when -// dynamically loading/unloading modules in their own context, -// like from embedded scripts, etc. -func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (interface{}, error) { - modulesMu.RLock() - mod, ok := modules[id] - modulesMu.RUnlock() - if !ok { - return nil, fmt.Errorf("unknown module: %s", id) - } - - if mod.New == nil { - return nil, fmt.Errorf("module '%s' has no constructor", mod.ID) - } - - val := mod.New().(interface{}) - - // value must be a pointer for unmarshaling into concrete type, even if - // the module's concrete type is a slice or map; New() *should* return - // a pointer, otherwise unmarshaling errors or panics will occur - if rv := reflect.ValueOf(val); rv.Kind() != reflect.Ptr { - log.Printf("[WARNING] ModuleInfo.New() for module '%s' did not return a pointer,"+ - " so we are using reflection to make a pointer instead; please fix this by"+ - " using new(Type) or &Type notation in your module's New() function.", id) - val = reflect.New(rv.Type()).Elem().Addr().Interface().(Module) - } - - // fill in its config only if there is a config to fill in - if len(rawMsg) > 0 { - err := strictUnmarshalJSON(rawMsg, &val) - if err != nil { - return nil, fmt.Errorf("decoding module config: %s: %v", mod, err) - } - } - - if val == nil { - // returned module values are almost always type-asserted - // before being used, so a nil value would panic; and there - // is no good reason to explicitly declare null modules in - // a config; it might be because the user is trying to achieve - // a result the developer isn't expecting, which is a smell - return nil, fmt.Errorf("module value cannot be null") - } - - if prov, ok := val.(Provisioner); ok { - err := prov.Provision(ctx) - if err != nil { - // incomplete provisioning could have left state - // dangling, so make sure it gets cleaned up - if cleanerUpper, ok := val.(CleanerUpper); ok { - err2 := cleanerUpper.Cleanup() - if err2 != nil { - err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2) - } - } - return nil, fmt.Errorf("provision %s: %v", mod, err) - } - } - - if validator, ok := val.(Validator); ok { - err := validator.Validate() - if err != nil { - // since the module was already provisioned, make sure we clean up - if cleanerUpper, ok := val.(CleanerUpper); ok { - err2 := cleanerUpper.Cleanup() - if err2 != nil { - err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2) - } - } - return nil, fmt.Errorf("%s: invalid configuration: %v", mod, err) - } - } - - ctx.moduleInstances[id] = append(ctx.moduleInstances[id], val) - - return val, nil -} - -// loadModuleInline loads a module from a JSON raw message which decodes to -// a map[string]interface{}, where one of the object keys is moduleNameKey -// and the corresponding value is the module name (as a string) which can -// be found in the given scope. In other words, the module name is declared -// in-line with the module itself. -// -// This allows modules to be decoded into their concrete types and used when -// their names cannot be the unique key in a map, such as when there are -// multiple instances in the map or it appears in an array (where there are -// no custom keys). In other words, the key containing the module name is -// treated special/separate from all the other keys in the object. -func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (interface{}, error) { - moduleName, raw, err := getModuleNameInline(moduleNameKey, raw) - if err != nil { - return nil, err - } - - val, err := ctx.LoadModuleByID(moduleScope+"."+moduleName, raw) - if err != nil { - return nil, fmt.Errorf("loading module '%s': %v", moduleName, err) - } - - return val, nil -} - -// App returns the configured app named name. If that app has -// not yet been loaded and provisioned, it will be immediately -// loaded and provisioned. If no app with that name is -// configured, a new empty one will be instantiated instead. -// (The app module must still be registered.) This must not be -// called during the Provision/Validate phase to reference a -// module's own host app (since the parent app module is still -// in the process of being provisioned, it is not yet ready). -func (ctx Context) App(name string) (interface{}, error) { - if app, ok := ctx.cfg.apps[name]; ok { - return app, nil - } - appRaw := ctx.cfg.AppsRaw[name] - modVal, err := ctx.LoadModuleByID(name, appRaw) - if err != nil { - return nil, fmt.Errorf("loading %s app module: %v", name, err) - } - if appRaw != nil { - ctx.cfg.AppsRaw[name] = nil // allow GC to deallocate - } - ctx.cfg.apps[name] = modVal.(App) - return modVal, nil -} - -// Storage returns the configured Caddy storage implementation. -func (ctx Context) Storage() certmagic.Storage { - return ctx.cfg.storage -} - -// Logger returns a logger that can be used by mod. -func (ctx Context) Logger(mod Module) *zap.Logger { - if ctx.cfg == nil { - // often the case in tests; just use a dev logger - l, err := zap.NewDevelopment() - if err != nil { - panic("config missing, unable to create dev logger: " + err.Error()) - } - return l - } - return ctx.cfg.Logging.Logger(mod) -} diff --git a/vendor/github.com/caddyserver/caddy/v2/duration_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/duration_fuzz.go deleted file mode 100644 index e7afed3f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/duration_fuzz.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package caddy - -func FuzzParseDuration(data []byte) int { - _, err := ParseDuration(string(data)) - if err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/listeners.go b/vendor/github.com/caddyserver/caddy/v2/listeners.go deleted file mode 100644 index e1edcd6c..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/listeners.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "fmt" - "log" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" -) - -// Listen returns a listener suitable for use in a Caddy module. -// Always be sure to close listeners when you are done with them. -func Listen(network, addr string) (net.Listener, error) { - lnKey := network + "/" + addr - - listenersMu.Lock() - defer listenersMu.Unlock() - - // if listener already exists, increment usage counter, then return listener - if lnGlobal, ok := listeners[lnKey]; ok { - atomic.AddInt32(&lnGlobal.usage, 1) - return &fakeCloseListener{ - usage: &lnGlobal.usage, - deadline: &lnGlobal.deadline, - deadlineMu: &lnGlobal.deadlineMu, - key: lnKey, - Listener: lnGlobal.ln, - }, nil - } - - // or, create new one and save it - ln, err := net.Listen(network, addr) - if err != nil { - return nil, err - } - - // make sure to start its usage counter at 1 - lnGlobal := &globalListener{usage: 1, ln: ln} - listeners[lnKey] = lnGlobal - - return &fakeCloseListener{ - usage: &lnGlobal.usage, - deadline: &lnGlobal.deadline, - deadlineMu: &lnGlobal.deadlineMu, - key: lnKey, - Listener: ln, - }, nil -} - -// ListenPacket returns a net.PacketConn suitable for use in a Caddy module. -// Always be sure to close the PacketConn when you are done. -func ListenPacket(network, addr string) (net.PacketConn, error) { - lnKey := network + "/" + addr - - listenersMu.Lock() - defer listenersMu.Unlock() - - // if listener already exists, increment usage counter, then return listener - if lnGlobal, ok := listeners[lnKey]; ok { - atomic.AddInt32(&lnGlobal.usage, 1) - log.Printf("[DEBUG] %s: Usage counter should not go above 2 or maybe 3, is now: %d", lnKey, atomic.LoadInt32(&lnGlobal.usage)) // TODO: remove - return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: lnGlobal.pc}, nil - } - - // or, create new one and save it - pc, err := net.ListenPacket(network, addr) - if err != nil { - return nil, err - } - - // make sure to start its usage counter at 1 - lnGlobal := &globalListener{usage: 1, pc: pc} - listeners[lnKey] = lnGlobal - - return &fakeClosePacketConn{usage: &lnGlobal.usage, key: lnKey, PacketConn: pc}, nil -} - -// fakeCloseListener's Close() method is a no-op. This allows -// stopping servers that are using the listener without giving -// up the socket; thus, servers become hot-swappable while the -// listener remains running. Listeners should be re-wrapped in -// a new fakeCloseListener each time the listener is reused. -// Other than the 'closed' field (which pertains to this value -// only), the other fields in this struct should be pointers to -// the associated globalListener's struct fields (except 'key' -// which is there for read-only purposes, so it can be a copy). -type fakeCloseListener struct { - closed int32 // accessed atomically; belongs to this struct only - usage *int32 // accessed atomically; global - deadline *bool // protected by deadlineMu; global - deadlineMu *sync.Mutex // global - key string // global, but read-only, so can be copy - net.Listener // global -} - -// Accept accepts connections until Close() is called. -func (fcl *fakeCloseListener) Accept() (net.Conn, error) { - // if the listener is already "closed", return error - if atomic.LoadInt32(&fcl.closed) == 1 { - return nil, fcl.fakeClosedErr() - } - - // wrap underlying accept - conn, err := fcl.Listener.Accept() - if err == nil { - return conn, nil - } - - // accept returned with error - // TODO: This may be better as a condition variable so the deadline is cleared only once? - fcl.deadlineMu.Lock() - if *fcl.deadline { - switch ln := fcl.Listener.(type) { - case *net.TCPListener: - _ = ln.SetDeadline(time.Time{}) - case *net.UnixListener: - _ = ln.SetDeadline(time.Time{}) - } - *fcl.deadline = false - } - fcl.deadlineMu.Unlock() - - if atomic.LoadInt32(&fcl.closed) == 1 { - // if we canceled the Accept() by setting a deadline - // on the listener, we need to make sure any callers of - // Accept() think the listener was actually closed; - // if we return the timeout error instead, callers might - // simply retry, leaking goroutines for longer - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - return nil, fcl.fakeClosedErr() - } - } - - return nil, err -} - -// Close stops accepting new connections without -// closing the underlying listener, unless no one -// else is using it. -func (fcl *fakeCloseListener) Close() error { - if atomic.CompareAndSwapInt32(&fcl.closed, 0, 1) { - // unfortunately, there is no way to cancel any - // currently-blocking calls to Accept() that are - // awaiting connections since we're not actually - // closing the listener; so we cheat by setting - // a deadline in the past, which forces it to - // time out; note that this only works for - // certain types of listeners... - fcl.deadlineMu.Lock() - if !*fcl.deadline { - switch ln := fcl.Listener.(type) { - case *net.TCPListener: - _ = ln.SetDeadline(time.Now().Add(-1 * time.Minute)) - case *net.UnixListener: - _ = ln.SetDeadline(time.Now().Add(-1 * time.Minute)) - } - *fcl.deadline = true - } - fcl.deadlineMu.Unlock() - - // since we're no longer using this listener, - // decrement the usage counter and, if no one - // else is using it, close underlying listener - if atomic.AddInt32(fcl.usage, -1) == 0 { - listenersMu.Lock() - delete(listeners, fcl.key) - listenersMu.Unlock() - err := fcl.Listener.Close() - if err != nil { - return err - } - } - - } - - return nil -} - -func (fcl *fakeCloseListener) fakeClosedErr() error { - return &net.OpError{ - Op: "accept", - Net: fcl.Listener.Addr().Network(), - Addr: fcl.Listener.Addr(), - Err: errFakeClosed, - } -} - -type fakeClosePacketConn struct { - closed int32 // accessed atomically - usage *int32 // accessed atomically - key string - net.PacketConn -} - -func (fcpc *fakeClosePacketConn) Close() error { - log.Println("[DEBUG] Fake-closing underlying packet conn") // TODO: remove this - - if atomic.CompareAndSwapInt32(&fcpc.closed, 0, 1) { - // since we're no longer using this listener, - // decrement the usage counter and, if no one - // else is using it, close underlying listener - if atomic.AddInt32(fcpc.usage, -1) == 0 { - listenersMu.Lock() - delete(listeners, fcpc.key) - listenersMu.Unlock() - err := fcpc.PacketConn.Close() - if err != nil { - return err - } - } - } - - return nil -} - -// Supports QUIC implementation: https://github.com/caddyserver/caddy/issues/3998 -func (fcpc fakeClosePacketConn) SetReadBuffer(bytes int) error { - if conn, ok := fcpc.PacketConn.(interface{ SetReadBuffer(int) error }); ok { - return conn.SetReadBuffer(bytes) - } - return fmt.Errorf("SetReadBuffer() not implemented for %T", fcpc.PacketConn) -} - -// Supports QUIC implementation: https://github.com/caddyserver/caddy/issues/3998 -func (fcpc fakeClosePacketConn) SyscallConn() (syscall.RawConn, error) { - if conn, ok := fcpc.PacketConn.(interface { - SyscallConn() (syscall.RawConn, error) - }); ok { - return conn.SyscallConn() - } - return nil, fmt.Errorf("SyscallConn() not implemented for %T", fcpc.PacketConn) -} - -// ErrFakeClosed is the underlying error value returned by -// fakeCloseListener.Accept() after Close() has been called, -// indicating that it is pretending to be closed so that the -// server using it can terminate, while the underlying -// socket is actually left open. -var errFakeClosed = fmt.Errorf("listener 'closed' 😉") - -// globalListener keeps global state for a listener -// that may be shared by multiple servers. In other -// words, values in this struct exist only once and -// all other uses of these values point to the ones -// in this struct. In particular, the usage count -// (how many callers are using the listener), the -// actual listener, and synchronization of the -// listener's deadline changes are singular, global -// values that must not be copied. -type globalListener struct { - usage int32 // accessed atomically - deadline bool - deadlineMu sync.Mutex - ln net.Listener - pc net.PacketConn -} - -// NetworkAddress contains the individual components -// for a parsed network address of the form accepted -// by ParseNetworkAddress(). Network should be a -// network value accepted by Go's net package. Port -// ranges are given by [StartPort, EndPort]. -type NetworkAddress struct { - Network string - Host string - StartPort uint - EndPort uint -} - -// IsUnixNetwork returns true if na.Network is -// unix, unixgram, or unixpacket. -func (na NetworkAddress) IsUnixNetwork() bool { - return isUnixNetwork(na.Network) -} - -// JoinHostPort is like net.JoinHostPort, but where the port -// is StartPort + offset. -func (na NetworkAddress) JoinHostPort(offset uint) string { - if na.IsUnixNetwork() { - return na.Host - } - return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset))) -} - -// PortRangeSize returns how many ports are in -// pa's port range. Port ranges are inclusive, -// so the size is the difference of start and -// end ports plus one. -func (na NetworkAddress) PortRangeSize() uint { - return (na.EndPort - na.StartPort) + 1 -} - -func (na NetworkAddress) isLoopback() bool { - if na.IsUnixNetwork() { - return true - } - if na.Host == "localhost" { - return true - } - if ip := net.ParseIP(na.Host); ip != nil { - return ip.IsLoopback() - } - return false -} - -func (na NetworkAddress) isWildcardInterface() bool { - if na.Host == "" { - return true - } - if ip := net.ParseIP(na.Host); ip != nil { - return ip.IsUnspecified() - } - return false -} - -func (na NetworkAddress) port() string { - if na.StartPort == na.EndPort { - return strconv.FormatUint(uint64(na.StartPort), 10) - } - return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort) -} - -// String reconstructs the address string to the form expected -// by ParseNetworkAddress(). If the address is a unix socket, -// any non-zero port will be dropped. -func (na NetworkAddress) String() string { - return JoinNetworkAddress(na.Network, na.Host, na.port()) -} - -func isUnixNetwork(netw string) bool { - return netw == "unix" || netw == "unixgram" || netw == "unixpacket" -} - -// ParseNetworkAddress parses addr into its individual -// components. The input string is expected to be of -// the form "network/host:port-range" where any part is -// optional. The default network, if unspecified, is tcp. -// Port ranges are inclusive. -// -// Network addresses are distinct from URLs and do not -// use URL syntax. -func ParseNetworkAddress(addr string) (NetworkAddress, error) { - var host, port string - network, host, port, err := SplitNetworkAddress(addr) - if network == "" { - network = "tcp" - } - if err != nil { - return NetworkAddress{}, err - } - if isUnixNetwork(network) { - return NetworkAddress{ - Network: network, - Host: host, - }, nil - } - ports := strings.SplitN(port, "-", 2) - if len(ports) == 1 { - ports = append(ports, ports[0]) - } - var start, end uint64 - start, err = strconv.ParseUint(ports[0], 10, 16) - if err != nil { - return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err) - } - end, err = strconv.ParseUint(ports[1], 10, 16) - if err != nil { - return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err) - } - if end < start { - return NetworkAddress{}, fmt.Errorf("end port must not be less than start port") - } - if (end - start) > maxPortSpan { - return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan) - } - return NetworkAddress{ - Network: network, - Host: host, - StartPort: uint(start), - EndPort: uint(end), - }, nil -} - -// SplitNetworkAddress splits a into its network, host, and port components. -// Note that port may be a port range (:X-Y), or omitted for unix sockets. -func SplitNetworkAddress(a string) (network, host, port string, err error) { - if idx := strings.Index(a, "/"); idx >= 0 { - network = strings.ToLower(strings.TrimSpace(a[:idx])) - a = a[idx+1:] - } - if isUnixNetwork(network) { - host = a - return - } - host, port, err = net.SplitHostPort(a) - return -} - -// JoinNetworkAddress combines network, host, and port into a single -// address string of the form accepted by ParseNetworkAddress(). For -// unix sockets, the network should be "unix" (or "unixgram" or -// "unixpacket") and the path to the socket should be given as the -// host parameter. -func JoinNetworkAddress(network, host, port string) string { - var a string - if network != "" { - a = network + "/" - } - if (host != "" && port == "") || isUnixNetwork(network) { - a += host - } else if port != "" { - a += net.JoinHostPort(host, port) - } - return a -} - -// ListenerWrapper is a type that wraps a listener -// so it can modify the input listener's methods. -// Modules that implement this interface are found -// in the caddy.listeners namespace. Usually, to -// wrap a listener, you will define your own struct -// type that embeds the input listener, then -// implement your own methods that you want to wrap, -// calling the underlying listener's methods where -// appropriate. -type ListenerWrapper interface { - WrapListener(net.Listener) net.Listener -} - -var ( - listeners = make(map[string]*globalListener) - listenersMu sync.Mutex -) - -const maxPortSpan = 65535 - -// Interface guards (see https://github.com/caddyserver/caddy/issues/3998) -var ( - _ (interface{ SetReadBuffer(int) error }) = (*fakeClosePacketConn)(nil) - _ (interface { - SyscallConn() (syscall.RawConn, error) - }) = (*fakeClosePacketConn)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/listeners_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/listeners_fuzz.go deleted file mode 100644 index 823d0beb..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/listeners_fuzz.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package caddy - -func FuzzParseNetworkAddress(data []byte) int { - _, err := ParseNetworkAddress(string(data)) - if err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/logging.go b/vendor/github.com/caddyserver/caddy/v2/logging.go deleted file mode 100644 index 7837145d..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/logging.go +++ /dev/null @@ -1,705 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "strings" - "sync" - "time" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "golang.org/x/term" -) - -func init() { - RegisterModule(StdoutWriter{}) - RegisterModule(StderrWriter{}) - RegisterModule(DiscardWriter{}) -} - -// Logging facilitates logging within Caddy. The default log is -// called "default" and you can customize it. You can also define -// additional logs. -// -// By default, all logs at INFO level and higher are written to -// standard error ("stderr" writer) in a human-readable format -// ("console" encoder if stdout is an interactive terminal, "json" -// encoder otherwise). -// -// All defined logs accept all log entries by default, but you -// can filter by level and module/logger names. A logger's name -// is the same as the module's name, but a module may append to -// logger names for more specificity. For example, you can -// filter logs emitted only by HTTP handlers using the name -// "http.handlers", because all HTTP handler module names have -// that prefix. -// -// Caddy logs (except the sink) are zero-allocation, so they are -// very high-performing in terms of memory and CPU time. Enabling -// sampling can further increase throughput on extremely high-load -// servers. -type Logging struct { - // Sink is the destination for all unstructured logs emitted - // from Go's standard library logger. These logs are common - // in dependencies that are not designed specifically for use - // in Caddy. Because it is global and unstructured, the sink - // lacks most advanced features and customizations. - Sink *StandardLibLog `json:"sink,omitempty"` - - // Logs are your logs, keyed by an arbitrary name of your - // choosing. The default log can be customized by defining - // a log called "default". You can further define other logs - // and filter what kinds of entries they accept. - Logs map[string]*CustomLog `json:"logs,omitempty"` - - // a list of all keys for open writers; all writers - // that are opened to provision this logging config - // must have their keys added to this list so they - // can be closed when cleaning up - writerKeys []string -} - -// openLogs sets up the config and opens all the configured writers. -// It closes its logs when ctx is canceled, so it should clean up -// after itself. -func (logging *Logging) openLogs(ctx Context) error { - // make sure to deallocate resources when context is done - ctx.OnCancel(func() { - err := logging.closeLogs() - if err != nil { - Log().Error("closing logs", zap.Error(err)) - } - }) - - // set up the "sink" log first (std lib's default global logger) - if logging.Sink != nil { - err := logging.Sink.provision(ctx, logging) - if err != nil { - return fmt.Errorf("setting up sink log: %v", err) - } - } - - // as a special case, set up the default structured Caddy log next - if err := logging.setupNewDefault(ctx); err != nil { - return err - } - - // then set up any other custom logs - for name, l := range logging.Logs { - // the default log is already set up - if name == "default" { - continue - } - - err := l.provision(ctx, logging) - if err != nil { - return fmt.Errorf("setting up custom log '%s': %v", name, err) - } - - // Any other logs that use the discard writer can be deleted - // entirely. This avoids encoding and processing of each - // log entry that would just be thrown away anyway. Notably, - // we do not reach this point for the default log, which MUST - // exist, otherwise core log emissions would panic because - // they use the Log() function directly which expects a non-nil - // logger. Even if we keep logs with a discard writer, they - // have a nop core, and keeping them at all seems unnecessary. - if _, ok := l.writerOpener.(*DiscardWriter); ok { - delete(logging.Logs, name) - continue - } - } - - return nil -} - -func (logging *Logging) setupNewDefault(ctx Context) error { - if logging.Logs == nil { - logging.Logs = make(map[string]*CustomLog) - } - - // extract the user-defined default log, if any - newDefault := new(defaultCustomLog) - if userDefault, ok := logging.Logs["default"]; ok { - newDefault.CustomLog = userDefault - } else { - // if none, make one with our own default settings - var err error - newDefault, err = newDefaultProductionLog() - if err != nil { - return fmt.Errorf("setting up default Caddy log: %v", err) - } - logging.Logs["default"] = newDefault.CustomLog - } - - // set up this new log - err := newDefault.CustomLog.provision(ctx, logging) - if err != nil { - return fmt.Errorf("setting up default log: %v", err) - } - newDefault.logger = zap.New(newDefault.CustomLog.core) - - // redirect the default caddy logs - defaultLoggerMu.Lock() - oldDefault := defaultLogger - defaultLogger = newDefault - defaultLoggerMu.Unlock() - - // if the new writer is different, indicate it in the logs for convenience - var newDefaultLogWriterKey, currentDefaultLogWriterKey string - var newDefaultLogWriterStr, currentDefaultLogWriterStr string - if newDefault.writerOpener != nil { - newDefaultLogWriterKey = newDefault.writerOpener.WriterKey() - newDefaultLogWriterStr = newDefault.writerOpener.String() - } - if oldDefault.writerOpener != nil { - currentDefaultLogWriterKey = oldDefault.writerOpener.WriterKey() - currentDefaultLogWriterStr = oldDefault.writerOpener.String() - } - if newDefaultLogWriterKey != currentDefaultLogWriterKey { - oldDefault.logger.Info("redirected default logger", - zap.String("from", currentDefaultLogWriterStr), - zap.String("to", newDefaultLogWriterStr), - ) - } - - return nil -} - -// closeLogs cleans up resources allocated during openLogs. -// A successful call to openLogs calls this automatically -// when the context is canceled. -func (logging *Logging) closeLogs() error { - for _, key := range logging.writerKeys { - _, err := writers.Delete(key) - if err != nil { - log.Printf("[ERROR] Closing log writer %v: %v", key, err) - } - } - return nil -} - -// Logger returns a logger that is ready for the module to use. -func (logging *Logging) Logger(mod Module) *zap.Logger { - modID := string(mod.CaddyModule().ID) - var cores []zapcore.Core - - if logging != nil { - for _, l := range logging.Logs { - if l.matchesModule(modID) { - if len(l.Include) == 0 && len(l.Exclude) == 0 { - cores = append(cores, l.core) - continue - } - cores = append(cores, &filteringCore{Core: l.core, cl: l}) - } - } - } - - multiCore := zapcore.NewTee(cores...) - - return zap.New(multiCore).Named(modID) -} - -// openWriter opens a writer using opener, and returns true if -// the writer is new, or false if the writer already exists. -func (logging *Logging) openWriter(opener WriterOpener) (io.WriteCloser, bool, error) { - key := opener.WriterKey() - writer, loaded, err := writers.LoadOrNew(key, func() (Destructor, error) { - w, err := opener.OpenWriter() - return writerDestructor{w}, err - }) - if err != nil { - return nil, false, err - } - logging.writerKeys = append(logging.writerKeys, key) - return writer.(io.WriteCloser), !loaded, nil -} - -// WriterOpener is a module that can open a log writer. -// It can return a human-readable string representation -// of itself so that operators can understand where -// the logs are going. -type WriterOpener interface { - fmt.Stringer - - // WriterKey is a string that uniquely identifies this - // writer configuration. It is not shown to humans. - WriterKey() string - - // OpenWriter opens a log for writing. The writer - // should be safe for concurrent use but need not - // be synchronous. - OpenWriter() (io.WriteCloser, error) -} - -type writerDestructor struct { - io.WriteCloser -} - -func (wdest writerDestructor) Destruct() error { - return wdest.Close() -} - -// StandardLibLog configures the default Go standard library -// global logger in the log package. This is necessary because -// module dependencies which are not built specifically for -// Caddy will use the standard logger. This is also known as -// the "sink" logger. -type StandardLibLog struct { - // The module that writes out log entries for the sink. - WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"` - - writer io.WriteCloser -} - -func (sll *StandardLibLog) provision(ctx Context, logging *Logging) error { - if sll.WriterRaw != nil { - mod, err := ctx.LoadModule(sll, "WriterRaw") - if err != nil { - return fmt.Errorf("loading sink log writer module: %v", err) - } - wo := mod.(WriterOpener) - - var isNew bool - sll.writer, isNew, err = logging.openWriter(wo) - if err != nil { - return fmt.Errorf("opening sink log writer %#v: %v", mod, err) - } - - if isNew { - log.Printf("[INFO] Redirecting sink to: %s", wo) - log.SetOutput(sll.writer) - log.Printf("[INFO] Redirected sink to here (%s)", wo) - } - } - - return nil -} - -// CustomLog represents a custom logger configuration. -// -// By default, a log will emit all log entries. Some entries -// will be skipped if sampling is enabled. Further, the Include -// and Exclude parameters define which loggers (by name) are -// allowed or rejected from emitting in this log. If both Include -// and Exclude are populated, their values must be mutually -// exclusive, and longer namespaces have priority. If neither -// are populated, all logs are emitted. -type CustomLog struct { - // The writer defines where log entries are emitted. - WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"` - - // The encoder is how the log entries are formatted or encoded. - EncoderRaw json.RawMessage `json:"encoder,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"` - - // Level is the minimum level to emit, and is inclusive. - // Possible levels: DEBUG, INFO, WARN, ERROR, PANIC, and FATAL - Level string `json:"level,omitempty"` - - // Sampling configures log entry sampling. If enabled, - // only some log entries will be emitted. This is useful - // for improving performance on extremely high-pressure - // servers. - Sampling *LogSampling `json:"sampling,omitempty"` - - // Include defines the names of loggers to emit in this - // log. For example, to include only logs emitted by the - // admin API, you would include "admin.api". - Include []string `json:"include,omitempty"` - - // Exclude defines the names of loggers that should be - // skipped by this log. For example, to exclude only - // HTTP access logs, you would exclude "http.log.access". - Exclude []string `json:"exclude,omitempty"` - - writerOpener WriterOpener - writer io.WriteCloser - encoder zapcore.Encoder - levelEnabler zapcore.LevelEnabler - core zapcore.Core -} - -func (cl *CustomLog) provision(ctx Context, logging *Logging) error { - // Replace placeholder for log level - repl := NewReplacer() - level, err := repl.ReplaceOrErr(cl.Level, true, true) - if err != nil { - return fmt.Errorf("invalid log level: %v", err) - } - level = strings.ToLower(level) - - // set up the log level - switch level { - case "debug": - cl.levelEnabler = zapcore.DebugLevel - case "", "info": - cl.levelEnabler = zapcore.InfoLevel - case "warn": - cl.levelEnabler = zapcore.WarnLevel - case "error": - cl.levelEnabler = zapcore.ErrorLevel - case "panic": - cl.levelEnabler = zapcore.PanicLevel - case "fatal": - cl.levelEnabler = zapcore.FatalLevel - default: - return fmt.Errorf("unrecognized log level: %s", cl.Level) - } - - // If both Include and Exclude lists are populated, then each item must - // be a superspace or subspace of an item in the other list, because - // populating both lists means that any given item is either a rule - // or an exception to another rule. But if the item is not a super- - // or sub-space of any item in the other list, it is neither a rule - // nor an exception, and is a contradiction. Ensure, too, that the - // sets do not intersect, which is also a contradiction. - if len(cl.Include) > 0 && len(cl.Exclude) > 0 { - // prevent intersections - for _, allow := range cl.Include { - for _, deny := range cl.Exclude { - if allow == deny { - return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow) - } - } - } - - // ensure namespaces are nested - outer: - for _, allow := range cl.Include { - for _, deny := range cl.Exclude { - if strings.HasPrefix(allow+".", deny+".") || - strings.HasPrefix(deny+".", allow+".") { - continue outer - } - } - return fmt.Errorf("when both include and exclude are populated, each element must be a superspace or subspace of one in the other list; check '%s' in include", allow) - } - } - - if cl.WriterRaw != nil { - mod, err := ctx.LoadModule(cl, "WriterRaw") - if err != nil { - return fmt.Errorf("loading log writer module: %v", err) - } - cl.writerOpener = mod.(WriterOpener) - } - if cl.writerOpener == nil { - cl.writerOpener = StderrWriter{} - } - - cl.writer, _, err = logging.openWriter(cl.writerOpener) - if err != nil { - return fmt.Errorf("opening log writer using %#v: %v", cl.writerOpener, err) - } - - if cl.EncoderRaw != nil { - mod, err := ctx.LoadModule(cl, "EncoderRaw") - if err != nil { - return fmt.Errorf("loading log encoder module: %v", err) - } - cl.encoder = mod.(zapcore.Encoder) - } - if cl.encoder == nil { - // only allow colorized output if this log is going to stdout or stderr - var colorize bool - switch cl.writerOpener.(type) { - case StdoutWriter, StderrWriter, - *StdoutWriter, *StderrWriter: - colorize = true - } - cl.encoder = newDefaultProductionLogEncoder(colorize) - } - - cl.buildCore() - - return nil -} - -func (cl *CustomLog) buildCore() { - // logs which only discard their output don't need - // to perform encoding or any other processing steps - // at all, so just shorcut to a nop core instead - if _, ok := cl.writerOpener.(*DiscardWriter); ok { - cl.core = zapcore.NewNopCore() - return - } - c := zapcore.NewCore( - cl.encoder, - zapcore.AddSync(cl.writer), - cl.levelEnabler, - ) - if cl.Sampling != nil { - if cl.Sampling.Interval == 0 { - cl.Sampling.Interval = 1 * time.Second - } - if cl.Sampling.First == 0 { - cl.Sampling.First = 100 - } - if cl.Sampling.Thereafter == 0 { - cl.Sampling.Thereafter = 100 - } - c = zapcore.NewSamplerWithOptions(c, cl.Sampling.Interval, - cl.Sampling.First, cl.Sampling.Thereafter) - } - cl.core = c -} - -func (cl *CustomLog) matchesModule(moduleID string) bool { - return cl.loggerAllowed(moduleID, true) -} - -// loggerAllowed returns true if name is allowed to emit -// to cl. isModule should be true if name is the name of -// a module and you want to see if ANY of that module's -// logs would be permitted. -func (cl *CustomLog) loggerAllowed(name string, isModule bool) bool { - // accept all loggers by default - if len(cl.Include) == 0 && len(cl.Exclude) == 0 { - return true - } - - // append a dot so that partial names don't match - // (i.e. we don't want "foo.b" to match "foo.bar"); we - // will also have to append a dot when we do HasPrefix - // below to compensate for when when namespaces are equal - if name != "" && name != "*" && name != "." { - name += "." - } - - var longestAccept, longestReject int - - if len(cl.Include) > 0 { - for _, namespace := range cl.Include { - var hasPrefix bool - if isModule { - hasPrefix = strings.HasPrefix(namespace+".", name) - } else { - hasPrefix = strings.HasPrefix(name, namespace+".") - } - if hasPrefix && len(namespace) > longestAccept { - longestAccept = len(namespace) - } - } - // the include list was populated, meaning that - // a match in this list is absolutely required - // if we are to accept the entry - if longestAccept == 0 { - return false - } - } - - if len(cl.Exclude) > 0 { - for _, namespace := range cl.Exclude { - // * == all logs emitted by modules - // . == all logs emitted by core - if (namespace == "*" && name != ".") || - (namespace == "." && name == ".") { - return false - } - if strings.HasPrefix(name, namespace+".") && - len(namespace) > longestReject { - longestReject = len(namespace) - } - } - // the reject list is populated, so we have to - // reject this entry if its match is better - // than the best from the accept list - if longestReject > longestAccept { - return false - } - } - - return (longestAccept > longestReject) || - (len(cl.Include) == 0 && longestReject == 0) -} - -// filteringCore filters log entries based on logger name, -// according to the rules of a CustomLog. -type filteringCore struct { - zapcore.Core - cl *CustomLog -} - -// With properly wraps With. -func (fc *filteringCore) With(fields []zapcore.Field) zapcore.Core { - return &filteringCore{ - Core: fc.Core.With(fields), - cl: fc.cl, - } -} - -// Check only allows the log entry if its logger name -// is allowed from the include/exclude rules of fc.cl. -func (fc *filteringCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { - if fc.cl.loggerAllowed(e.LoggerName, false) { - return fc.Core.Check(e, ce) - } - return ce -} - -// LogSampling configures log entry sampling. -type LogSampling struct { - // The window over which to conduct sampling. - Interval time.Duration `json:"interval,omitempty"` - - // Log this many entries within a given level and - // message for each interval. - First int `json:"first,omitempty"` - - // If more entries with the same level and message - // are seen during the same interval, keep one in - // this many entries until the end of the interval. - Thereafter int `json:"thereafter,omitempty"` -} - -type ( - // StdoutWriter writes logs to standard out. - StdoutWriter struct{} - - // StderrWriter writes logs to standard error. - StderrWriter struct{} - - // DiscardWriter discards all writes. - DiscardWriter struct{} -) - -// CaddyModule returns the Caddy module information. -func (StdoutWriter) CaddyModule() ModuleInfo { - return ModuleInfo{ - ID: "caddy.logging.writers.stdout", - New: func() Module { return new(StdoutWriter) }, - } -} - -// CaddyModule returns the Caddy module information. -func (StderrWriter) CaddyModule() ModuleInfo { - return ModuleInfo{ - ID: "caddy.logging.writers.stderr", - New: func() Module { return new(StderrWriter) }, - } -} - -// CaddyModule returns the Caddy module information. -func (DiscardWriter) CaddyModule() ModuleInfo { - return ModuleInfo{ - ID: "caddy.logging.writers.discard", - New: func() Module { return new(DiscardWriter) }, - } -} - -func (StdoutWriter) String() string { return "stdout" } -func (StderrWriter) String() string { return "stderr" } -func (DiscardWriter) String() string { return "discard" } - -// WriterKey returns a unique key representing stdout. -func (StdoutWriter) WriterKey() string { return "std:out" } - -// WriterKey returns a unique key representing stderr. -func (StderrWriter) WriterKey() string { return "std:err" } - -// WriterKey returns a unique key representing discard. -func (DiscardWriter) WriterKey() string { return "discard" } - -// OpenWriter returns os.Stdout that can't be closed. -func (StdoutWriter) OpenWriter() (io.WriteCloser, error) { - return notClosable{os.Stdout}, nil -} - -// OpenWriter returns os.Stderr that can't be closed. -func (StderrWriter) OpenWriter() (io.WriteCloser, error) { - return notClosable{os.Stderr}, nil -} - -// OpenWriter returns ioutil.Discard that can't be closed. -func (DiscardWriter) OpenWriter() (io.WriteCloser, error) { - return notClosable{ioutil.Discard}, nil -} - -// notClosable is an io.WriteCloser that can't be closed. -type notClosable struct{ io.Writer } - -func (fc notClosable) Close() error { return nil } - -type defaultCustomLog struct { - *CustomLog - logger *zap.Logger -} - -// newDefaultProductionLog configures a custom log that is -// intended for use by default if no other log is specified -// in a config. It writes to stderr, uses the console encoder, -// and enables INFO-level logs and higher. -func newDefaultProductionLog() (*defaultCustomLog, error) { - cl := new(CustomLog) - cl.writerOpener = StderrWriter{} - var err error - cl.writer, err = cl.writerOpener.OpenWriter() - if err != nil { - return nil, err - } - cl.encoder = newDefaultProductionLogEncoder(true) - cl.levelEnabler = zapcore.InfoLevel - - cl.buildCore() - - return &defaultCustomLog{ - CustomLog: cl, - logger: zap.New(cl.core), - }, nil -} - -func newDefaultProductionLogEncoder(colorize bool) zapcore.Encoder { - encCfg := zap.NewProductionEncoderConfig() - if term.IsTerminal(int(os.Stdout.Fd())) { - // if interactive terminal, make output more human-readable by default - encCfg.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { - encoder.AppendString(ts.UTC().Format("2006/01/02 15:04:05.000")) - } - if colorize { - encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder - } - return zapcore.NewConsoleEncoder(encCfg) - } - return zapcore.NewJSONEncoder(encCfg) -} - -// Log returns the current default logger. -func Log() *zap.Logger { - defaultLoggerMu.RLock() - defer defaultLoggerMu.RUnlock() - return defaultLogger.logger -} - -var ( - defaultLogger, _ = newDefaultProductionLog() - defaultLoggerMu sync.RWMutex -) - -var writers = NewUsagePool() - -// Interface guards -var ( - _ io.WriteCloser = (*notClosable)(nil) - _ WriterOpener = (*StdoutWriter)(nil) - _ WriterOpener = (*StderrWriter)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/metrics.go b/vendor/github.com/caddyserver/caddy/v2/metrics.go deleted file mode 100644 index ab9d7978..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/metrics.go +++ /dev/null @@ -1,78 +0,0 @@ -package caddy - -import ( - "net/http" - "strconv" - "strings" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/collectors" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -// define and register the metrics used in this package. -func init() { - prometheus.MustRegister(collectors.NewBuildInfoCollector()) - - const ns, sub = "caddy", "admin" - - adminMetrics.requestCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: sub, - Name: "http_requests_total", - Help: "Counter of requests made to the Admin API's HTTP endpoints.", - }, []string{"handler", "path", "code", "method"}) - adminMetrics.requestErrors = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: sub, - Name: "http_request_errors_total", - Help: "Number of requests resulting in middleware errors.", - }, []string{"handler", "path", "method"}) -} - -// adminMetrics is a collection of metrics that can be tracked for the admin API. -var adminMetrics = struct { - requestCount *prometheus.CounterVec - requestErrors *prometheus.CounterVec -}{} - -// Similar to promhttp.InstrumentHandlerCounter, but upper-cases method names -// instead of lower-casing them. -// -// Unlike promhttp.InstrumentHandlerCounter, this assumes a "code" and "method" -// label is present, and will panic otherwise. -func instrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w) - next.ServeHTTP(d, r) - counter.With(prometheus.Labels{ - "code": sanitizeCode(d.status), - "method": strings.ToUpper(r.Method), - }).Inc() - }) -} - -func newDelegator(w http.ResponseWriter) *delegator { - return &delegator{ - ResponseWriter: w, - } -} - -type delegator struct { - http.ResponseWriter - status int -} - -func (d *delegator) WriteHeader(code int) { - d.status = code - d.ResponseWriter.WriteHeader(code) -} - -func sanitizeCode(s int) string { - switch s { - case 0, 200: - return "200" - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules.go b/vendor/github.com/caddyserver/caddy/v2/modules.go deleted file mode 100644 index 0f4a563b..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules.go +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "sort" - "strings" - "sync" -) - -// Module is a type that is used as a Caddy module. In -// addition to this interface, most modules will implement -// some interface expected by their host module in order -// to be useful. To learn which interface(s) to implement, -// see the documentation for the host module. At a bare -// minimum, this interface, when implemented, only provides -// the module's ID and constructor function. -// -// Modules will often implement additional interfaces -// including Provisioner, Validator, and CleanerUpper. -// If a module implements these interfaces, their -// methods are called during the module's lifespan. -// -// When a module is loaded by a host module, the following -// happens: 1) ModuleInfo.New() is called to get a new -// instance of the module. 2) The module's configuration is -// unmarshaled into that instance. 3) If the module is a -// Provisioner, the Provision() method is called. 4) If the -// module is a Validator, the Validate() method is called. -// 5) The module will probably be type-asserted from -// interface{} to some other, more useful interface expected -// by the host module. For example, HTTP handler modules are -// type-asserted as caddyhttp.MiddlewareHandler values. -// 6) When a module's containing Context is canceled, if it is -// a CleanerUpper, its Cleanup() method is called. -type Module interface { - // This method indicates that the type is a Caddy - // module. The returned ModuleInfo must have both - // a name and a constructor function. This method - // must not have any side-effects. - CaddyModule() ModuleInfo -} - -// ModuleInfo represents a registered Caddy module. -type ModuleInfo struct { - // ID is the "full name" of the module. It - // must be unique and properly namespaced. - ID ModuleID - - // New returns a pointer to a new, empty - // instance of the module's type. This - // method must not have any side-effects, - // and no other initialization should - // occur within it. Any initialization - // of the returned value should be done - // in a Provision() method (see the - // Provisioner interface). - New func() Module -} - -// ModuleID is a string that uniquely identifies a Caddy module. A -// module ID is lightly structured. It consists of dot-separated -// labels which form a simple hierarchy from left to right. The last -// label is the module name, and the labels before that constitute -// the namespace (or scope). -// -// Thus, a module ID has the form: . -// -// An ID with no dot has the empty namespace, which is appropriate -// for app modules (these are "top-level" modules that Caddy core -// loads and runs). -// -// Module IDs should be lowercase and use underscores (_) instead of -// spaces. -// -// Examples of valid IDs: -// - http -// - http.handlers.file_server -// - caddy.logging.encoders.json -type ModuleID string - -// Namespace returns the namespace (or scope) portion of a module ID, -// which is all but the last label of the ID. If the ID has only one -// label, then the namespace is empty. -func (id ModuleID) Namespace() string { - lastDot := strings.LastIndex(string(id), ".") - if lastDot < 0 { - return "" - } - return string(id)[:lastDot] -} - -// Name returns the Name (last element) of a module ID. -func (id ModuleID) Name() string { - if id == "" { - return "" - } - parts := strings.Split(string(id), ".") - return parts[len(parts)-1] -} - -func (mi ModuleInfo) String() string { return string(mi.ID) } - -// ModuleMap is a map that can contain multiple modules, -// where the map key is the module's name. (The namespace -// is usually read from an associated field's struct tag.) -// Because the module's name is given as the key in a -// module map, the name does not have to be given in the -// json.RawMessage. -type ModuleMap map[string]json.RawMessage - -// RegisterModule registers a module by receiving a -// plain/empty value of the module. For registration to -// be properly recorded, this should be called in the -// init phase of runtime. Typically, the module package -// will do this as a side-effect of being imported. -// This function panics if the module's info is -// incomplete or invalid, or if the module is already -// registered. -func RegisterModule(instance Module) { - mod := instance.CaddyModule() - - if mod.ID == "" { - panic("module ID missing") - } - if mod.ID == "caddy" || mod.ID == "admin" { - panic(fmt.Sprintf("module ID '%s' is reserved", mod.ID)) - } - if mod.New == nil { - panic("missing ModuleInfo.New") - } - if val := mod.New(); val == nil { - panic("ModuleInfo.New must return a non-nil module instance") - } - - modulesMu.Lock() - defer modulesMu.Unlock() - - if _, ok := modules[string(mod.ID)]; ok { - panic(fmt.Sprintf("module already registered: %s", mod.ID)) - } - modules[string(mod.ID)] = mod -} - -// GetModule returns module information from its ID (full name). -func GetModule(name string) (ModuleInfo, error) { - modulesMu.RLock() - defer modulesMu.RUnlock() - m, ok := modules[name] - if !ok { - return ModuleInfo{}, fmt.Errorf("module not registered: %s", name) - } - return m, nil -} - -// GetModuleName returns a module's name (the last label of its ID) -// from an instance of its value. If the value is not a module, an -// empty string will be returned. -func GetModuleName(instance interface{}) string { - var name string - if mod, ok := instance.(Module); ok { - name = mod.CaddyModule().ID.Name() - } - return name -} - -// GetModuleID returns a module's ID from an instance of its value. -// If the value is not a module, an empty string will be returned. -func GetModuleID(instance interface{}) string { - var id string - if mod, ok := instance.(Module); ok { - id = string(mod.CaddyModule().ID) - } - return id -} - -// GetModules returns all modules in the given scope/namespace. -// For example, a scope of "foo" returns modules named "foo.bar", -// "foo.loo", but not "bar", "foo.bar.loo", etc. An empty scope -// returns top-level modules, for example "foo" or "bar". Partial -// scopes are not matched (i.e. scope "foo.ba" does not match -// name "foo.bar"). -// -// Because modules are registered to a map under the hood, the -// returned slice will be sorted to keep it deterministic. -func GetModules(scope string) []ModuleInfo { - modulesMu.RLock() - defer modulesMu.RUnlock() - - scopeParts := strings.Split(scope, ".") - - // handle the special case of an empty scope, which - // should match only the top-level modules - if scope == "" { - scopeParts = []string{} - } - - var mods []ModuleInfo -iterateModules: - for id, m := range modules { - modParts := strings.Split(id, ".") - - // match only the next level of nesting - if len(modParts) != len(scopeParts)+1 { - continue - } - - // specified parts must be exact matches - for i := range scopeParts { - if modParts[i] != scopeParts[i] { - continue iterateModules - } - } - - mods = append(mods, m) - } - - // make return value deterministic - sort.Slice(mods, func(i, j int) bool { - return mods[i].ID < mods[j].ID - }) - - return mods -} - -// Modules returns the names of all registered modules -// in ascending lexicographical order. -func Modules() []string { - modulesMu.RLock() - defer modulesMu.RUnlock() - - names := make([]string, 0, len(modules)) - for name := range modules { - names = append(names, name) - } - - sort.Strings(names) - - return names -} - -// getModuleNameInline loads the string value from raw of moduleNameKey, -// where raw must be a JSON encoding of a map. It returns that value, -// along with the result of removing that key from raw. -func getModuleNameInline(moduleNameKey string, raw json.RawMessage) (string, json.RawMessage, error) { - var tmp map[string]interface{} - err := json.Unmarshal(raw, &tmp) - if err != nil { - return "", nil, err - } - - moduleName, ok := tmp[moduleNameKey].(string) - if !ok || moduleName == "" { - return "", nil, fmt.Errorf("module name not specified with key '%s' in %+v", moduleNameKey, tmp) - } - - // remove key from the object, otherwise decoding it later - // will yield an error because the struct won't recognize it - // (this is only needed because we strictly enforce that - // all keys are recognized when loading modules) - delete(tmp, moduleNameKey) - result, err := json.Marshal(tmp) - if err != nil { - return "", nil, fmt.Errorf("re-encoding module configuration: %v", err) - } - - return moduleName, result, nil -} - -// Provisioner is implemented by modules which may need to perform -// some additional "setup" steps immediately after being loaded. -// Provisioning should be fast (imperceptible running time). If -// any side-effects result in the execution of this function (e.g. -// creating global state, any other allocations which require -// garbage collection, opening files, starting goroutines etc.), -// be sure to clean up properly by implementing the CleanerUpper -// interface to avoid leaking resources. -type Provisioner interface { - Provision(Context) error -} - -// Validator is implemented by modules which can verify that their -// configurations are valid. This method will be called after -// Provision() (if implemented). Validation should always be fast -// (imperceptible running time) and an error must be returned if -// the module's configuration is invalid. -type Validator interface { - Validate() error -} - -// CleanerUpper is implemented by modules which may have side-effects -// such as opened files, spawned goroutines, or allocated some sort -// of non-stack state when they were provisioned. This method should -// deallocate/cleanup those resources to prevent memory leaks. Cleanup -// should be fast and efficient. Cleanup should work even if Provision -// returns an error, to allow cleaning up from partial provisionings. -type CleanerUpper interface { - Cleanup() error -} - -// ParseStructTag parses a caddy struct tag into its keys and values. -// It is very simple. The expected syntax is: -// `caddy:"key1=val1 key2=val2 ..."` -func ParseStructTag(tag string) (map[string]string, error) { - results := make(map[string]string) - pairs := strings.Split(tag, " ") - for i, pair := range pairs { - if pair == "" { - continue - } - parts := strings.SplitN(pair, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("missing key in '%s' (pair %d)", pair, i) - } - results[parts[0]] = parts[1] - } - return results, nil -} - -// strictUnmarshalJSON is like json.Unmarshal but returns an error -// if any of the fields are unrecognized. Useful when decoding -// module configurations, where you want to be more sure they're -// correct. -func strictUnmarshalJSON(data []byte, v interface{}) error { - dec := json.NewDecoder(bytes.NewReader(data)) - dec.DisallowUnknownFields() - return dec.Decode(v) -} - -// isJSONRawMessage returns true if the type is encoding/json.RawMessage. -func isJSONRawMessage(typ reflect.Type) bool { - return typ.PkgPath() == "encoding/json" && typ.Name() == "RawMessage" -} - -// isModuleMapType returns true if the type is map[string]json.RawMessage. -// It assumes that the string key is the module name, but this is not -// always the case. To know for sure, this function must return true, but -// also the struct tag where this type appears must NOT define an inline_key -// attribute, which would mean that the module names appear inline with the -// values, not in the key. -func isModuleMapType(typ reflect.Type) bool { - return typ.Kind() == reflect.Map && - typ.Key().Kind() == reflect.String && - isJSONRawMessage(typ.Elem()) -} - -var ( - modules = make(map[string]ModuleInfo) - modulesMu sync.RWMutex -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/app.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/app.go deleted file mode 100644 index 8285200f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/app.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "strconv" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "github.com/lucas-clemente/quic-go/http3" - "go.uber.org/zap" - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" -) - -func init() { - caddy.RegisterModule(App{}) -} - -// App is a robust, production-ready HTTP server. -// -// HTTPS is enabled by default if host matchers with qualifying names are used -// in any of routes; certificates are automatically provisioned and renewed. -// Additionally, automatic HTTPS will also enable HTTPS for servers that listen -// only on the HTTPS port but which do not have any TLS connection policies -// defined by adding a good, default TLS connection policy. -// -// In HTTP routes, additional placeholders are available (replace any `*`): -// -// Placeholder | Description -// ------------|--------------- -// `{http.request.body}` | The request body (âš ï¸ inefficient; use only for debugging) -// `{http.request.cookie.*}` | HTTP request cookie -// `{http.request.duration}` | Time up to now spent handling the request (after decoding headers from client) -// `{http.request.header.*}` | Specific request header field -// `{http.request.host.labels.*}` | Request host labels (0-based from right); e.g. for foo.example.com: 0=com, 1=example, 2=foo -// `{http.request.host}` | The host part of the request's Host header -// `{http.request.hostport}` | The host and port from the request's Host header -// `{http.request.method}` | The request method -// `{http.request.orig_method}` | The request's original method -// `{http.request.orig_uri.path.dir}` | The request's original directory -// `{http.request.orig_uri.path.file}` | The request's original filename -// `{http.request.orig_uri.path}` | The request's original path -// `{http.request.orig_uri.query}` | The request's original query string (without `?`) -// `{http.request.orig_uri}` | The request's original URI -// `{http.request.port}` | The port part of the request's Host header -// `{http.request.proto}` | The protocol of the request -// `{http.request.remote.host}` | The host part of the remote client's address -// `{http.request.remote.port}` | The port part of the remote client's address -// `{http.request.remote}` | The address of the remote client -// `{http.request.scheme}` | The request scheme -// `{http.request.tls.version}` | The TLS version name -// `{http.request.tls.cipher_suite}` | The TLS cipher suite -// `{http.request.tls.resumed}` | The TLS connection resumed a previous connection -// `{http.request.tls.proto}` | The negotiated next protocol -// `{http.request.tls.proto_mutual}` | The negotiated next protocol was advertised by the server -// `{http.request.tls.server_name}` | The server name requested by the client, if any -// `{http.request.tls.client.fingerprint}` | The SHA256 checksum of the client certificate -// `{http.request.tls.client.public_key}` | The public key of the client certificate. -// `{http.request.tls.client.public_key_sha256}` | The SHA256 checksum of the client's public key. -// `{http.request.tls.client.certificate_pem}` | The PEM-encoded value of the certificate. -// `{http.request.tls.client.issuer}` | The issuer DN of the client certificate -// `{http.request.tls.client.serial}` | The serial number of the client certificate -// `{http.request.tls.client.subject}` | The subject DN of the client certificate -// `{http.request.tls.client.san.dns_names.*}` | SAN DNS names(index optional) -// `{http.request.tls.client.san.emails.*}` | SAN email addresses (index optional) -// `{http.request.tls.client.san.ips.*}` | SAN IP addresses (index optional) -// `{http.request.tls.client.san.uris.*}` | SAN URIs (index optional) -// `{http.request.uri.path.*}` | Parts of the path, split by `/` (0-based from left) -// `{http.request.uri.path.dir}` | The directory, excluding leaf filename -// `{http.request.uri.path.file}` | The filename of the path, excluding directory -// `{http.request.uri.path}` | The path component of the request URI -// `{http.request.uri.query.*}` | Individual query string value -// `{http.request.uri.query}` | The query string (without `?`) -// `{http.request.uri}` | The full request URI -// `{http.response.header.*}` | Specific response header field -// `{http.vars.*}` | Custom variables in the HTTP handler chain -type App struct { - // HTTPPort specifies the port to use for HTTP (as opposed to HTTPS), - // which is used when setting up HTTP->HTTPS redirects or ACME HTTP - // challenge solvers. Default: 80. - HTTPPort int `json:"http_port,omitempty"` - - // HTTPSPort specifies the port to use for HTTPS, which is used when - // solving the ACME TLS-ALPN challenges, or whenever HTTPS is needed - // but no specific port number is given. Default: 443. - HTTPSPort int `json:"https_port,omitempty"` - - // GracePeriod is how long to wait for active connections when shutting - // down the server. Once the grace period is over, connections will - // be forcefully closed. - GracePeriod caddy.Duration `json:"grace_period,omitempty"` - - // Servers is the list of servers, keyed by arbitrary names chosen - // at your discretion for your own convenience; the keys do not - // affect functionality. - Servers map[string]*Server `json:"servers,omitempty"` - - servers []*http.Server - h3servers []*http3.Server - h3listeners []net.PacketConn - - ctx caddy.Context - logger *zap.Logger - tlsApp *caddytls.TLS - - // used temporarily between phases 1 and 2 of auto HTTPS - allCertDomains []string -} - -// CaddyModule returns the Caddy module information. -func (App) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http", - New: func() caddy.Module { return new(App) }, - } -} - -// Provision sets up the app. -func (app *App) Provision(ctx caddy.Context) error { - // store some references - tlsAppIface, err := ctx.App("tls") - if err != nil { - return fmt.Errorf("getting tls app: %v", err) - } - app.tlsApp = tlsAppIface.(*caddytls.TLS) - app.ctx = ctx - app.logger = ctx.Logger(app) - - repl := caddy.NewReplacer() - - // this provisions the matchers for each route, - // and prepares auto HTTP->HTTPS redirects, and - // is required before we provision each server - err = app.automaticHTTPSPhase1(ctx, repl) - if err != nil { - return err - } - - // prepare each server - for srvName, srv := range app.Servers { - srv.name = srvName - srv.tlsApp = app.tlsApp - srv.logger = app.logger.Named("log") - srv.errorLogger = app.logger.Named("log.error") - - // only enable access logs if configured - if srv.Logs != nil { - srv.accessLogger = app.logger.Named("log.access") - } - - // if not explicitly configured by the user, disallow TLS - // client auth bypass (domain fronting) which could - // otherwise be exploited by sending an unprotected SNI - // value during a TLS handshake, then putting a protected - // domain in the Host header after establishing connection; - // this is a safe default, but we allow users to override - // it for example in the case of running a proxy where - // domain fronting is desired and access is not restricted - // based on hostname - if srv.StrictSNIHost == nil && srv.hasTLSClientAuth() { - app.logger.Warn("enabling strict SNI-Host enforcement because TLS client auth is configured", - zap.String("server_id", srvName), - ) - trueBool := true - srv.StrictSNIHost = &trueBool - } - - // process each listener address - for i := range srv.Listen { - lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true) - if err != nil { - return fmt.Errorf("server %s, listener %d: %v", - srvName, i, err) - } - srv.Listen[i] = lnOut - } - - // set up each listener modifier - if srv.ListenerWrappersRaw != nil { - vals, err := ctx.LoadModule(srv, "ListenerWrappersRaw") - if err != nil { - return fmt.Errorf("loading listener wrapper modules: %v", err) - } - var hasTLSPlaceholder bool - for i, val := range vals.([]interface{}) { - if _, ok := val.(*tlsPlaceholderWrapper); ok { - if i == 0 { - // putting the tls placeholder wrapper first is nonsensical because - // that is the default, implicit setting: without it, all wrappers - // will go after the TLS listener anyway - return fmt.Errorf("it is unnecessary to specify the TLS listener wrapper in the first position because that is the default") - } - if hasTLSPlaceholder { - return fmt.Errorf("TLS listener wrapper can only be specified once") - } - hasTLSPlaceholder = true - } - srv.listenerWrappers = append(srv.listenerWrappers, val.(caddy.ListenerWrapper)) - } - // if any wrappers were configured but the TLS placeholder wrapper is - // absent, prepend it so all defined wrappers come after the TLS - // handshake; this simplifies logic when starting the server, since we - // can simply assume the TLS placeholder will always be there - if !hasTLSPlaceholder && len(srv.listenerWrappers) > 0 { - srv.listenerWrappers = append([]caddy.ListenerWrapper{new(tlsPlaceholderWrapper)}, srv.listenerWrappers...) - } - } - - // pre-compile the primary handler chain, and be sure to wrap it in our - // route handler so that important security checks are done, etc. - primaryRoute := emptyHandler - if srv.Routes != nil { - err := srv.Routes.ProvisionHandlers(ctx) - if err != nil { - return fmt.Errorf("server %s: setting up route handlers: %v", srvName, err) - } - primaryRoute = srv.Routes.Compile(emptyHandler) - } - srv.primaryHandlerChain = srv.wrapPrimaryRoute(primaryRoute) - - // pre-compile the error handler chain - if srv.Errors != nil { - err := srv.Errors.Routes.Provision(ctx) - if err != nil { - return fmt.Errorf("server %s: setting up server error handling routes: %v", srvName, err) - } - srv.errorHandlerChain = srv.Errors.Routes.Compile(errorEmptyHandler) - } - - // prepare the TLS connection policies - err = srv.TLSConnPolicies.Provision(ctx) - if err != nil { - return fmt.Errorf("server %s: setting up TLS connection policies: %v", srvName, err) - } - - // if there is no idle timeout, set a sane default; users have complained - // before that aggressive CDNs leave connections open until the server - // closes them, so if we don't close them it leads to resource exhaustion - if srv.IdleTimeout == 0 { - srv.IdleTimeout = defaultIdleTimeout - } - } - - return nil -} - -// Validate ensures the app's configuration is valid. -func (app *App) Validate() error { - // each server must use distinct listener addresses - lnAddrs := make(map[string]string) - for srvName, srv := range app.Servers { - for _, addr := range srv.Listen { - listenAddr, err := caddy.ParseNetworkAddress(addr) - if err != nil { - return fmt.Errorf("invalid listener address '%s': %v", addr, err) - } - // check that every address in the port range is unique to this server; - // we do not use <= here because PortRangeSize() adds 1 to EndPort for us - for i := uint(0); i < listenAddr.PortRangeSize(); i++ { - addr := caddy.JoinNetworkAddress(listenAddr.Network, listenAddr.Host, strconv.Itoa(int(listenAddr.StartPort+i))) - if sn, ok := lnAddrs[addr]; ok { - return fmt.Errorf("server %s: listener address repeated: %s (already claimed by server '%s')", srvName, addr, sn) - } - lnAddrs[addr] = srvName - } - } - } - return nil -} - -// Start runs the app. It finishes automatic HTTPS if enabled, -// including management of certificates. -func (app *App) Start() error { - // get a logger compatible with http.Server - serverLogger, err := zap.NewStdLogAt(app.logger.Named("stdlib"), zap.DebugLevel) - if err != nil { - return fmt.Errorf("failed to set up server logger: %v", err) - } - - for srvName, srv := range app.Servers { - s := &http.Server{ - ReadTimeout: time.Duration(srv.ReadTimeout), - ReadHeaderTimeout: time.Duration(srv.ReadHeaderTimeout), - WriteTimeout: time.Duration(srv.WriteTimeout), - IdleTimeout: time.Duration(srv.IdleTimeout), - MaxHeaderBytes: srv.MaxHeaderBytes, - Handler: srv, - ErrorLog: serverLogger, - } - - // enable h2c if configured - if srv.AllowH2C { - h2server := &http2.Server{ - IdleTimeout: time.Duration(srv.IdleTimeout), - } - s.Handler = h2c.NewHandler(srv, h2server) - } - - for _, lnAddr := range srv.Listen { - listenAddr, err := caddy.ParseNetworkAddress(lnAddr) - if err != nil { - return fmt.Errorf("%s: parsing listen address '%s': %v", srvName, lnAddr, err) - } - for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ { - // create the listener for this socket - hostport := listenAddr.JoinHostPort(portOffset) - ln, err := caddy.Listen(listenAddr.Network, hostport) - if err != nil { - return fmt.Errorf("%s: listening on %s: %v", listenAddr.Network, hostport, err) - } - - // wrap listener before TLS (up to the TLS placeholder wrapper) - var lnWrapperIdx int - for i, lnWrapper := range srv.listenerWrappers { - if _, ok := lnWrapper.(*tlsPlaceholderWrapper); ok { - lnWrapperIdx = i + 1 // mark the next wrapper's spot - break - } - ln = lnWrapper.WrapListener(ln) - } - - // enable TLS if there is a policy and if this is not the HTTP port - useTLS := len(srv.TLSConnPolicies) > 0 && int(listenAddr.StartPort+portOffset) != app.httpPort() - if useTLS { - // create TLS listener - tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx) - ln = tls.NewListener(ln, tlsCfg) - - ///////// - // TODO: HTTP/3 support is experimental for now - if srv.ExperimentalHTTP3 { - app.logger.Info("enabling experimental HTTP/3 listener", - zap.String("addr", hostport), - ) - h3ln, err := caddy.ListenPacket("udp", hostport) - if err != nil { - return fmt.Errorf("getting HTTP/3 UDP listener: %v", err) - } - h3srv := &http3.Server{ - Server: &http.Server{ - Addr: hostport, - Handler: srv, - TLSConfig: tlsCfg, - ErrorLog: serverLogger, - }, - } - //nolint:errcheck - go h3srv.Serve(h3ln) - app.h3servers = append(app.h3servers, h3srv) - app.h3listeners = append(app.h3listeners, h3ln) - srv.h3server = h3srv - } - ///////// - } - - // finish wrapping listener where we left off before TLS - for i := lnWrapperIdx; i < len(srv.listenerWrappers); i++ { - ln = srv.listenerWrappers[i].WrapListener(ln) - } - - // if binding to port 0, the OS chooses a port for us; - // but the user won't know the port unless we print it - if listenAddr.StartPort == 0 && listenAddr.EndPort == 0 { - app.logger.Info("port 0 listener", - zap.String("input_address", lnAddr), - zap.String("actual_address", ln.Addr().String()), - ) - } - - app.logger.Debug("starting server loop", - zap.String("address", ln.Addr().String()), - zap.Bool("http3", srv.ExperimentalHTTP3), - zap.Bool("tls", useTLS), - ) - - //nolint:errcheck - go s.Serve(ln) - app.servers = append(app.servers, s) - } - } - } - - // finish automatic HTTPS by finally beginning - // certificate management - err = app.automaticHTTPSPhase2() - if err != nil { - return fmt.Errorf("finalizing automatic HTTPS: %v", err) - } - - return nil -} - -// Stop gracefully shuts down the HTTP server. -func (app *App) Stop() error { - ctx := context.Background() - if app.GracePeriod > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(app.GracePeriod)) - defer cancel() - } - for _, s := range app.servers { - err := s.Shutdown(ctx) - if err != nil { - return err - } - } - - // close the http3 servers; it's unclear whether the bug reported in - // https://github.com/caddyserver/caddy/pull/2727#issuecomment-526856566 - // was ever truly fixed, since it seemed racey/nondeterministic; but - // recent tests in 2020 were unable to replicate the issue again after - // repeated attempts (the bug manifested after a config reload; i.e. - // reusing a http3 server or listener was problematic), but it seems - // to be working fine now - for _, s := range app.h3servers { - // TODO: CloseGracefully, once implemented upstream - // (see https://github.com/lucas-clemente/quic-go/issues/2103) - err := s.Close() - if err != nil { - return err - } - } - - // closing an http3.Server does not close their underlying listeners - // since apparently the listener can be used both by servers and - // clients at the same time; so we need to manually call Close() - // on the underlying h3 listeners (see lucas-clemente/quic-go#2103) - for _, pc := range app.h3listeners { - err := pc.Close() - if err != nil { - return err - } - } - return nil -} - -func (app *App) httpPort() int { - if app.HTTPPort == 0 { - return DefaultHTTPPort - } - return app.HTTPPort -} - -func (app *App) httpsPort() int { - if app.HTTPSPort == 0 { - return DefaultHTTPSPort - } - return app.HTTPSPort -} - -// defaultIdleTimeout is the default HTTP server timeout -// for closing idle connections; useful to avoid resource -// exhaustion behind hungry CDNs, for example (we've had -// several complaints without this). -const defaultIdleTimeout = caddy.Duration(5 * time.Minute) - -// Interface guards -var ( - _ caddy.App = (*App)(nil) - _ caddy.Provisioner = (*App)(nil) - _ caddy.Validator = (*App)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/autohttps.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/autohttps.go deleted file mode 100644 index da4428db..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/autohttps.go +++ /dev/null @@ -1,656 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "github.com/caddyserver/certmagic" - "go.uber.org/zap" -) - -// AutoHTTPSConfig is used to disable automatic HTTPS -// or certain aspects of it for a specific server. -// HTTPS is enabled automatically and by default when -// qualifying hostnames are available from the config. -type AutoHTTPSConfig struct { - // If true, automatic HTTPS will be entirely disabled. - Disabled bool `json:"disable,omitempty"` - - // If true, only automatic HTTP->HTTPS redirects will - // be disabled. - DisableRedir bool `json:"disable_redirects,omitempty"` - - // Hosts/domain names listed here will not be included - // in automatic HTTPS (they will not have certificates - // loaded nor redirects applied). - Skip []string `json:"skip,omitempty"` - - // Hosts/domain names listed here will still be enabled - // for automatic HTTPS (unless in the Skip list), except - // that certificates will not be provisioned and managed - // for these names. - SkipCerts []string `json:"skip_certificates,omitempty"` - - // By default, automatic HTTPS will obtain and renew - // certificates for qualifying hostnames. However, if - // a certificate with a matching SAN is already loaded - // into the cache, certificate management will not be - // enabled. To force automated certificate management - // regardless of loaded certificates, set this to true. - IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"` -} - -// Skipped returns true if name is in skipSlice, which -// should be either the Skip or SkipCerts field on ahc. -func (ahc AutoHTTPSConfig) Skipped(name string, skipSlice []string) bool { - for _, n := range skipSlice { - if name == n { - return true - } - } - return false -} - -// automaticHTTPSPhase1 provisions all route matchers, determines -// which domain names found in the routes qualify for automatic -// HTTPS, and sets up HTTP->HTTPS redirects. This phase must occur -// at the beginning of provisioning, because it may add routes and -// even servers to the app, which still need to be set up with the -// rest of them during provisioning. -func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) error { - // this map acts as a set to store the domain names - // for which we will manage certificates automatically - uniqueDomainsForCerts := make(map[string]struct{}) - - // this maps domain names for automatic HTTP->HTTPS - // redirects to their destination server addresses - // (there might be more than 1 if bind is used; see - // https://github.com/caddyserver/caddy/issues/3443) - redirDomains := make(map[string][]caddy.NetworkAddress) - - for srvName, srv := range app.Servers { - // as a prerequisite, provision route matchers; this is - // required for all routes on all servers, and must be - // done before we attempt to do phase 1 of auto HTTPS, - // since we have to access the decoded host matchers the - // handlers will be provisioned later - if srv.Routes != nil { - err := srv.Routes.ProvisionMatchers(ctx) - if err != nil { - return fmt.Errorf("server %s: setting up route matchers: %v", srvName, err) - } - } - - // prepare for automatic HTTPS - if srv.AutoHTTPS == nil { - srv.AutoHTTPS = new(AutoHTTPSConfig) - } - if srv.AutoHTTPS.Disabled { - continue - } - - // skip if all listeners use the HTTP port - if !srv.listenersUseAnyPortOtherThan(app.httpPort()) { - app.logger.Info("server is listening only on the HTTP port, so no automatic HTTPS will be applied to this server", - zap.String("server_name", srvName), - zap.Int("http_port", app.httpPort()), - ) - srv.AutoHTTPS.Disabled = true - continue - } - - // if all listeners are on the HTTPS port, make sure - // there is at least one TLS connection policy; it - // should be obvious that they want to use TLS without - // needing to specify one empty policy to enable it - if srv.TLSConnPolicies == nil && - !srv.listenersUseAnyPortOtherThan(app.httpsPort()) { - app.logger.Info("server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS", - zap.String("server_name", srvName), - zap.Int("https_port", app.httpsPort()), - ) - srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)} - } - - // find all qualifying domain names (deduplicated) in this server - // (this is where we need the provisioned, decoded request matchers) - serverDomainSet := make(map[string]struct{}) - for routeIdx, route := range srv.Routes { - for matcherSetIdx, matcherSet := range route.MatcherSets { - for matcherIdx, m := range matcherSet { - if hm, ok := m.(*MatchHost); ok { - for hostMatcherIdx, d := range *hm { - var err error - d, err = repl.ReplaceOrErr(d, true, false) - if err != nil { - return fmt.Errorf("%s: route %d, matcher set %d, matcher %d, host matcher %d: %v", - srvName, routeIdx, matcherSetIdx, matcherIdx, hostMatcherIdx, err) - } - if !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.Skip) { - serverDomainSet[d] = struct{}{} - } - } - } - } - } - } - - // nothing more to do here if there are no domains that qualify for - // automatic HTTPS and there are no explicit TLS connection policies: - // if there is at least one domain but no TLS conn policy (F&&T), we'll - // add one below; if there are no domains but at least one TLS conn - // policy (meaning TLS is enabled) (T&&F), it could be a catch-all with - // on-demand TLS -- and in that case we would still need HTTP->HTTPS - // redirects, which we set up below; hence these two conditions - if len(serverDomainSet) == 0 && len(srv.TLSConnPolicies) == 0 { - continue - } - - // for all the hostnames we found, filter them so we have - // a deduplicated list of names for which to obtain certs - for d := range serverDomainSet { - if certmagic.SubjectQualifiesForCert(d) && - !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.SkipCerts) { - // if a certificate for this name is already loaded, - // don't obtain another one for it, unless we are - // supposed to ignore loaded certificates - if !srv.AutoHTTPS.IgnoreLoadedCerts && - len(app.tlsApp.AllMatchingCertificates(d)) > 0 { - app.logger.Info("skipping automatic certificate management because one or more matching certificates are already loaded", - zap.String("domain", d), - zap.String("server_name", srvName), - ) - continue - } - - // most clients don't accept wildcards like *.tld... we - // can handle that, but as a courtesy, warn the user - if strings.Contains(d, "*") && - strings.Count(strings.Trim(d, "."), ".") == 1 { - app.logger.Warn("most clients do not trust second-level wildcard certificates (*.tld)", - zap.String("domain", d)) - } - - uniqueDomainsForCerts[d] = struct{}{} - } - } - - // tell the server to use TLS if it is not already doing so - if srv.TLSConnPolicies == nil { - srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)} - } - - // nothing left to do if auto redirects are disabled - if srv.AutoHTTPS.DisableRedir { - continue - } - - app.logger.Info("enabling automatic HTTP->HTTPS redirects", - zap.String("server_name", srvName), - ) - - // create HTTP->HTTPS redirects - for _, addr := range srv.Listen { - // figure out the address we will redirect to... - addr, err := caddy.ParseNetworkAddress(addr) - if err != nil { - return fmt.Errorf("%s: invalid listener address: %v", srvName, addr) - } - - // this address might not have a hostname, i.e. might be a - // catch-all address for a particular port; we need to keep - // track if it is, so we can set up redirects for it anyway - // (e.g. the user might have enabled on-demand TLS); we use - // an empty string to indicate a catch-all, which we have to - // treat special later - if len(serverDomainSet) == 0 { - redirDomains[""] = append(redirDomains[""], addr) - continue - } - - // ...and associate it with each domain in this server - for d := range serverDomainSet { - // if this domain is used on more than one HTTPS-enabled - // port, we'll have to choose one, so prefer the HTTPS port - if _, ok := redirDomains[d]; !ok || - addr.StartPort == uint(app.httpsPort()) { - redirDomains[d] = append(redirDomains[d], addr) - } - } - } - } - - // we now have a list of all the unique names for which we need certs; - // turn the set into a slice so that phase 2 can use it - app.allCertDomains = make([]string, 0, len(uniqueDomainsForCerts)) - var internal []string -uniqueDomainsLoop: - for d := range uniqueDomainsForCerts { - // whether or not there is already an automation policy for this - // name, we should add it to the list to manage a cert for it - app.allCertDomains = append(app.allCertDomains, d) - - // some names we've found might already have automation policies - // explicitly specified for them; we should exclude those from - // our hidden/implicit policy, since applying a name to more than - // one automation policy would be confusing and an error - if app.tlsApp.Automation != nil { - for _, ap := range app.tlsApp.Automation.Policies { - for _, apHost := range ap.Subjects { - if apHost == d { - continue uniqueDomainsLoop - } - } - } - } - - // if no automation policy exists for the name yet, we - // will associate it with an implicit one - if !certmagic.SubjectQualifiesForPublicCert(d) { - internal = append(internal, d) - } - } - - // ensure there is an automation policy to handle these certs - err := app.createAutomationPolicies(ctx, internal) - if err != nil { - return err - } - - // we need to reduce the mapping, i.e. group domains by address - // since new routes are appended to servers by their address - domainsByAddr := make(map[string][]string) - for domain, addrs := range redirDomains { - for _, addr := range addrs { - addrStr := addr.String() - domainsByAddr[addrStr] = append(domainsByAddr[addrStr], domain) - } - } - - // these keep track of the redirect server address(es) - // and the routes for those servers which actually - // respond with the redirects - redirServerAddrs := make(map[string]struct{}) - redirServers := make(map[string][]Route) - var redirRoutes RouteList - - for addrStr, domains := range domainsByAddr { - // build the matcher set for this redirect route; (note that we happen - // to bypass Provision and Validate steps for these matcher modules) - matcherSet := MatcherSet{MatchProtocol("http")} - // match on known domain names, unless it's our special case of a - // catch-all which is an empty string (common among catch-all sites - // that enable on-demand TLS for yet-unknown domain names) - if !(len(domains) == 1 && domains[0] == "") { - matcherSet = append(matcherSet, MatchHost(domains)) - } - - addr, err := caddy.ParseNetworkAddress(addrStr) - if err != nil { - return err - } - redirRoute := app.makeRedirRoute(addr.StartPort, matcherSet) - - // use the network/host information from the address, - // but change the port to the HTTP port then rebuild - redirAddr := addr - redirAddr.StartPort = uint(app.httpPort()) - redirAddr.EndPort = redirAddr.StartPort - redirAddrStr := redirAddr.String() - - redirServers[redirAddrStr] = append(redirServers[redirAddrStr], redirRoute) - } - - // on-demand TLS means that hostnames may be used which are not - // explicitly defined in the config, and we still need to redirect - // those; so we can append a single catch-all route (notice there - // is no Host matcher) after the other redirect routes which will - // allow us to handle unexpected/new hostnames... however, it's - // not entirely clear what the redirect destination should be, - // so I'm going to just hard-code the app's HTTPS port and call - // it good for now... - // TODO: This implies that all plaintext requests will be blindly - // redirected to their HTTPS equivalent, even if this server - // doesn't handle that hostname at all; I don't think this is a - // bad thing, and it also obscures the actual hostnames that this - // server is configured to match on, which may be desirable, but - // it's not something that should be relied on. We can change this - // if we want to. - appendCatchAll := func(routes []Route) []Route { - return append(routes, app.makeRedirRoute(uint(app.httpsPort()), MatcherSet{MatchProtocol("http")})) - } - -redirServersLoop: - for redirServerAddr, routes := range redirServers { - // for each redirect listener, see if there's already a - // server configured to listen on that exact address; if so, - // insert the redirect route to the end of its route list - // after any other routes with host matchers; otherwise, - // we'll create a new server for all the listener addresses - // that are unused and serve the remaining redirects from it - for _, srv := range app.Servers { - if srv.hasListenerAddress(redirServerAddr) { - // find the index of the route after the last route with a host - // matcher, then insert the redirects there, but before any - // user-defined catch-all routes - // see https://github.com/caddyserver/caddy/issues/3212 - insertIndex := srv.findLastRouteWithHostMatcher() - srv.Routes = append(srv.Routes[:insertIndex], append(routes, srv.Routes[insertIndex:]...)...) - - // append our catch-all route in case the user didn't define their own - srv.Routes = appendCatchAll(srv.Routes) - - continue redirServersLoop - } - } - - // no server with this listener address exists; - // save this address and route for custom server - redirServerAddrs[redirServerAddr] = struct{}{} - redirRoutes = append(redirRoutes, routes...) - } - - // if there are routes remaining which do not belong - // in any existing server, make our own to serve the - // rest of the redirects - if len(redirServerAddrs) > 0 { - redirServerAddrsList := make([]string, 0, len(redirServerAddrs)) - for a := range redirServerAddrs { - redirServerAddrsList = append(redirServerAddrsList, a) - } - app.Servers["remaining_auto_https_redirects"] = &Server{ - Listen: redirServerAddrsList, - Routes: appendCatchAll(redirRoutes), - } - } - - return nil -} - -func (app *App) makeRedirRoute(redirToPort uint, matcherSet MatcherSet) Route { - redirTo := "https://{http.request.host}" - - // since this is an external redirect, we should only append an explicit - // port if we know it is not the officially standardized HTTPS port, and, - // notably, also not the port that Caddy thinks is the HTTPS port (the - // configurable HTTPSPort parameter) - we can't change the standard HTTPS - // port externally, so that config parameter is for internal use only; - // we also do not append the port if it happens to be the HTTP port as - // well, obviously (for example, user defines the HTTP port explicitly - // in the list of listen addresses for a server) - if redirToPort != uint(app.httpPort()) && - redirToPort != uint(app.httpsPort()) && - redirToPort != DefaultHTTPPort && - redirToPort != DefaultHTTPSPort { - redirTo += ":" + strconv.Itoa(int(redirToPort)) - } - - redirTo += "{http.request.uri}" - return Route{ - MatcherSets: []MatcherSet{matcherSet}, - Handlers: []MiddlewareHandler{ - StaticResponse{ - StatusCode: WeakString(strconv.Itoa(http.StatusPermanentRedirect)), - Headers: http.Header{ - "Location": []string{redirTo}, - }, - Close: true, - }, - }, - } -} - -// createAutomationPolicy ensures that automated certificates for this -// app are managed properly. This adds up to two automation policies: -// one for the public names, and one for the internal names. If a catch-all -// automation policy exists, it will be shallow-copied and used as the -// base for the new ones (this is important for preserving behavior the -// user intends to be "defaults"). -func (app *App) createAutomationPolicies(ctx caddy.Context, internalNames []string) error { - // before we begin, loop through the existing automation policies - // and, for any ACMEIssuers we find, make sure they're filled in - // with default values that might be specified in our HTTP app; also - // look for a base (or "catch-all" / default) automation policy, - // which we're going to essentially require, to make sure it has - // those defaults, too - var basePolicy *caddytls.AutomationPolicy - var foundBasePolicy bool - if app.tlsApp.Automation == nil { - // we will expect this to not be nil from now on - app.tlsApp.Automation = new(caddytls.AutomationConfig) - } - for _, ap := range app.tlsApp.Automation.Policies { - // set up default issuer -- honestly, this is only - // really necessary because the HTTP app is opinionated - // and has settings which could be inferred as new - // defaults for the ACMEIssuer in the TLS app (such as - // what the HTTP and HTTPS ports are) - if ap.Issuers == nil { - var err error - ap.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx) - if err != nil { - return err - } - } - for _, iss := range ap.Issuers { - if acmeIssuer, ok := iss.(acmeCapable); ok { - err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer()) - if err != nil { - return err - } - } - } - - // while we're here, is this the catch-all/base policy? - if !foundBasePolicy && len(ap.Subjects) == 0 { - basePolicy = ap - foundBasePolicy = true - } - } - - if basePolicy == nil { - // no base policy found, we will make one! - basePolicy = new(caddytls.AutomationPolicy) - } - - // if the basePolicy has an existing ACMEIssuer (particularly to - // include any type that embeds/wraps an ACMEIssuer), let's use it - // (I guess we just use the first one?), otherwise we'll make one - var baseACMEIssuer *caddytls.ACMEIssuer - for _, iss := range basePolicy.Issuers { - if acmeWrapper, ok := iss.(acmeCapable); ok { - baseACMEIssuer = acmeWrapper.GetACMEIssuer() - break - } - } - if baseACMEIssuer == nil { - // note that this happens if basePolicy.Issuer is nil - // OR if it is not nil but is not an ACMEIssuer - baseACMEIssuer = new(caddytls.ACMEIssuer) - } - - // if there was a base policy to begin with, we already - // filled in its issuer's defaults; if there wasn't, we - // still need to do that - if !foundBasePolicy { - err := app.fillInACMEIssuer(baseACMEIssuer) - if err != nil { - return err - } - } - - // never overwrite any other issuer that might already be configured - if basePolicy.Issuers == nil { - var err error - basePolicy.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx) - if err != nil { - return err - } - for _, iss := range basePolicy.Issuers { - if acmeIssuer, ok := iss.(acmeCapable); ok { - err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer()) - if err != nil { - return err - } - } - } - } - - if !foundBasePolicy { - // there was no base policy to begin with, so add - // our base/catch-all policy - this will serve the - // public-looking names as well as any other names - // that don't match any other policy - err := app.tlsApp.AddAutomationPolicy(basePolicy) - if err != nil { - return err - } - } else { - // a base policy already existed; we might have - // changed it, so re-provision it - err := basePolicy.Provision(app.tlsApp) - if err != nil { - return err - } - } - - // public names will be taken care of by the base (catch-all) - // policy, which we've ensured exists if not already specified; - // internal names, however, need to be handled by an internal - // issuer, which we need to make a new policy for, scoped to - // just those names (yes, this logic is a bit asymmetric, but - // it works, because our assumed/natural default issuer is an - // ACME issuer) - if len(internalNames) > 0 { - internalIssuer := new(caddytls.InternalIssuer) - - // shallow-copy the base policy; we want to inherit - // from it, not replace it... this takes two lines to - // overrule compiler optimizations - policyCopy := *basePolicy - newPolicy := &policyCopy - - // very important to provision the issuer, since we - // are bypassing the JSON-unmarshaling step - if err := internalIssuer.Provision(ctx); err != nil { - return err - } - - // this policy should apply only to the given names - // and should use our issuer -- yes, this overrides - // any issuer that may have been set in the base - // policy, but we do this because these names do not - // already have a policy associated with them, which - // is easy to do; consider the case of a Caddyfile - // that has only "localhost" as a name, but sets the - // default/global ACME CA to the Let's Encrypt staging - // endpoint... they probably don't intend to change the - // fundamental set of names that setting applies to, - // rather they just want to change the CA for the set - // of names that would normally use the production API; - // anyway, that gets into the weeds a bit... - newPolicy.Subjects = internalNames - newPolicy.Issuers = []certmagic.Issuer{internalIssuer} - err := app.tlsApp.AddAutomationPolicy(newPolicy) - if err != nil { - return err - } - } - - // we just changed a lot of stuff, so double-check that it's all good - err := app.tlsApp.Validate() - if err != nil { - return err - } - - return nil -} - -// fillInACMEIssuer fills in default values into acmeIssuer that -// are defined in app; these values at time of writing are just -// app.HTTPPort and app.HTTPSPort, which are used by ACMEIssuer. -// Sure, we could just use the global/CertMagic defaults, but if -// a user has configured those ports in the HTTP app, it makes -// sense to use them in the TLS app too, even if they forgot (or -// were too lazy, like me) to set it in each automation policy -// that uses it -- this just makes things a little less tedious -// for the user, so they don't have to repeat those ports in -// potentially many places. This function never steps on existing -// config values. If any changes are made, acmeIssuer is -// reprovisioned. acmeIssuer must not be nil. -func (app *App) fillInACMEIssuer(acmeIssuer *caddytls.ACMEIssuer) error { - if app.HTTPPort > 0 || app.HTTPSPort > 0 { - if acmeIssuer.Challenges == nil { - acmeIssuer.Challenges = new(caddytls.ChallengesConfig) - } - } - if app.HTTPPort > 0 { - if acmeIssuer.Challenges.HTTP == nil { - acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig) - } - // don't overwrite existing explicit config - if acmeIssuer.Challenges.HTTP.AlternatePort == 0 { - acmeIssuer.Challenges.HTTP.AlternatePort = app.HTTPPort - } - } - if app.HTTPSPort > 0 { - if acmeIssuer.Challenges.TLSALPN == nil { - acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig) - } - // don't overwrite existing explicit config - if acmeIssuer.Challenges.TLSALPN.AlternatePort == 0 { - acmeIssuer.Challenges.TLSALPN.AlternatePort = app.HTTPSPort - } - } - // we must provision all ACME issuers, even if nothing - // was changed, because we don't know if they are new - // and haven't been provisioned yet; if an ACME issuer - // never gets provisioned, its Agree field stays false, - // which leads to, um, problems later on - return acmeIssuer.Provision(app.ctx) -} - -// automaticHTTPSPhase2 begins certificate management for -// all names in the qualifying domain set for each server. -// This phase must occur after provisioning and at the end -// of app start, after all the servers have been started. -// Doing this last ensures that there won't be any race -// for listeners on the HTTP or HTTPS ports when management -// is async (if CertMagic's solvers bind to those ports -// first, then our servers would fail to bind to them, -// which would be bad, since CertMagic's bindings are -// temporary and don't serve the user's sites!). -func (app *App) automaticHTTPSPhase2() error { - if len(app.allCertDomains) == 0 { - return nil - } - app.logger.Info("enabling automatic TLS certificate management", - zap.Strings("domains", app.allCertDomains), - ) - err := app.tlsApp.Manage(app.allCertDomains) - if err != nil { - return fmt.Errorf("managing certificates for %v: %s", app.allCertDomains, err) - } - app.allCertDomains = nil // no longer needed; allow GC to deallocate - return nil -} - -type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer } diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/caddyhttp.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/caddyhttp.go deleted file mode 100644 index 784b2b90..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/caddyhttp.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "bytes" - "encoding/json" - "io" - "net" - "net/http" - "path/filepath" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" -) - -func init() { - caddy.RegisterModule(tlsPlaceholderWrapper{}) -} - -// RequestMatcher is a type that can match to a request. -// A route matcher MUST NOT modify the request, with the -// only exception being its context. -type RequestMatcher interface { - Match(*http.Request) bool -} - -// Handler is like http.Handler except ServeHTTP may return an error. -// -// If any handler encounters an error, it should be returned for proper -// handling. Return values should be propagated down the middleware chain -// by returning it unchanged. Returned errors should not be re-wrapped -// if they are already HandlerError values. -type Handler interface { - ServeHTTP(http.ResponseWriter, *http.Request) error -} - -// HandlerFunc is a convenience type like http.HandlerFunc. -type HandlerFunc func(http.ResponseWriter, *http.Request) error - -// ServeHTTP implements the Handler interface. -func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error { - return f(w, r) -} - -// Middleware chains one Handler to the next by being passed -// the next Handler in the chain. -type Middleware func(Handler) Handler - -// MiddlewareHandler is like Handler except it takes as a third -// argument the next handler in the chain. The next handler will -// never be nil, but may be a no-op handler if this is the last -// handler in the chain. Handlers which act as middleware should -// call the next handler's ServeHTTP method so as to propagate -// the request down the chain properly. Handlers which act as -// responders (content origins) need not invoke the next handler, -// since the last handler in the chain should be the first to -// write the response. -type MiddlewareHandler interface { - ServeHTTP(http.ResponseWriter, *http.Request, Handler) error -} - -// emptyHandler is used as a no-op handler. -var emptyHandler Handler = HandlerFunc(func(http.ResponseWriter, *http.Request) error { return nil }) - -// An implicit suffix middleware that, if reached, sets the StatusCode to the -// error stored in the ErrorCtxKey. This is to prevent situations where the -// Error chain does not actually handle the error (for instance, it matches only -// on some errors). See #3053 -var errorEmptyHandler Handler = HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - httpError := r.Context().Value(ErrorCtxKey) - if handlerError, ok := httpError.(HandlerError); ok { - w.WriteHeader(handlerError.StatusCode) - } else { - w.WriteHeader(http.StatusInternalServerError) - } - return nil -}) - -// ResponseHandler pairs a response matcher with custom handling -// logic. Either the status code can be changed to something else -// while using the original response body, or, if a status code -// is not set, it can execute a custom route list; this is useful -// for executing handler routes based on the properties of an HTTP -// response that has not been written out to the client yet. -// -// To use this type, provision it at module load time, then when -// ready to use, match the response against its matcher; if it -// matches (or doesn't have a matcher), change the status code on -// the response if configured; otherwise invoke the routes by -// calling `rh.Routes.Compile(next).ServeHTTP(rw, req)` (or similar). -type ResponseHandler struct { - // The response matcher for this handler. If empty/nil, - // it always matches. - Match *ResponseMatcher `json:"match,omitempty"` - - // To write the original response body but with a different - // status code, set this field to the desired status code. - // If set, this takes priority over routes. - StatusCode WeakString `json:"status_code,omitempty"` - - // The list of HTTP routes to execute if no status code is - // specified. If evaluated, the original response body - // will not be written. - Routes RouteList `json:"routes,omitempty"` -} - -// Provision sets up the routse in rh. -func (rh *ResponseHandler) Provision(ctx caddy.Context) error { - if rh.Routes != nil { - err := rh.Routes.Provision(ctx) - if err != nil { - return err - } - } - return nil -} - -// WeakString is a type that unmarshals any JSON value -// as a string literal, with the following exceptions: -// -// 1. actual string values are decoded as strings; and -// 2. null is decoded as empty string; -// -// and provides methods for getting the value as various -// primitive types. However, using this type removes any -// type safety as far as deserializing JSON is concerned. -type WeakString string - -// UnmarshalJSON satisfies json.Unmarshaler according to -// this type's documentation. -func (ws *WeakString) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - return io.EOF - } - if b[0] == byte('"') && b[len(b)-1] == byte('"') { - var s string - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *ws = WeakString(s) - return nil - } - if bytes.Equal(b, []byte("null")) { - return nil - } - *ws = WeakString(b) - return nil -} - -// MarshalJSON marshals was a boolean if true or false, -// a number if an integer, or a string otherwise. -func (ws WeakString) MarshalJSON() ([]byte, error) { - if ws == "true" { - return []byte("true"), nil - } - if ws == "false" { - return []byte("false"), nil - } - if num, err := strconv.Atoi(string(ws)); err == nil { - return json.Marshal(num) - } - return json.Marshal(string(ws)) -} - -// Int returns ws as an integer. If ws is not an -// integer, 0 is returned. -func (ws WeakString) Int() int { - num, _ := strconv.Atoi(string(ws)) - return num -} - -// Float64 returns ws as a float64. If ws is not a -// float value, the zero value is returned. -func (ws WeakString) Float64() float64 { - num, _ := strconv.ParseFloat(string(ws), 64) - return num -} - -// Bool returns ws as a boolean. If ws is not a -// boolean, false is returned. -func (ws WeakString) Bool() bool { - return string(ws) == "true" -} - -// String returns ws as a string. -func (ws WeakString) String() string { - return string(ws) -} - -// StatusCodeMatches returns true if a real HTTP status code matches -// the configured status code, which may be either a real HTTP status -// code or an integer representing a class of codes (e.g. 4 for all -// 4xx statuses). -func StatusCodeMatches(actual, configured int) bool { - if actual == configured { - return true - } - if configured < 100 && - actual >= configured*100 && - actual < (configured+1)*100 { - return true - } - return false -} - -// SanitizedPathJoin performs filepath.Join(root, reqPath) that -// is safe against directory traversal attacks. It uses logic -// similar to that in the Go standard library, specifically -// in the implementation of http.Dir. The root is assumed to -// be a trusted path, but reqPath is not; and the output will -// never be outside of root. The resulting path can be used -// with the local file system. -func SanitizedPathJoin(root, reqPath string) string { - if root == "" { - root = "." - } - - path := filepath.Join(root, filepath.Clean("/"+reqPath)) - - // filepath.Join also cleans the path, and cleaning strips - // the trailing slash, so we need to re-add it afterwards. - // if the length is 1, then it's a path to the root, - // and that should return ".", so we don't append the separator. - if strings.HasSuffix(reqPath, "/") && len(reqPath) > 1 { - path += separator - } - - return path -} - -// tlsPlaceholderWrapper is a no-op listener wrapper that marks -// where the TLS listener should be in a chain of listener wrappers. -// It should only be used if another listener wrapper must be placed -// in front of the TLS handshake. -type tlsPlaceholderWrapper struct{} - -func (tlsPlaceholderWrapper) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "caddy.listeners.tls", - New: func() caddy.Module { return new(tlsPlaceholderWrapper) }, - } -} - -func (tlsPlaceholderWrapper) WrapListener(ln net.Listener) net.Listener { return ln } - -func (tlsPlaceholderWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil } - -const ( - // DefaultHTTPPort is the default port for HTTP. - DefaultHTTPPort = 80 - - // DefaultHTTPSPort is the default port for HTTPS. - DefaultHTTPSPort = 443 -) - -const separator = string(filepath.Separator) - -// Interface guard -var _ caddy.ListenerWrapper = (*tlsPlaceholderWrapper)(nil) -var _ caddyfile.Unmarshaler = (*tlsPlaceholderWrapper)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/celmatcher.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/celmatcher.go deleted file mode 100644 index d7d55d84..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/celmatcher.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "crypto/x509/pkix" - "encoding/json" - "fmt" - "net/http" - "reflect" - "regexp" - "strings" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/google/cel-go/cel" - "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - "github.com/google/cel-go/ext" - "github.com/google/cel-go/interpreter/functions" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" - "google.golang.org/protobuf/proto" -) - -func init() { - caddy.RegisterModule(MatchExpression{}) -} - -// MatchExpression matches requests by evaluating a -// [CEL](https://github.com/google/cel-spec) expression. -// This enables complex logic to be expressed using a comfortable, -// familiar syntax. Please refer to -// [the standard definitions of CEL functions and operators](https://github.com/google/cel-spec/blob/master/doc/langdef.md#standard-definitions). -// -// This matcher's JSON interface is actually a string, not a struct. -// The generated docs are not correct because this type has custom -// marshaling logic. -// -// COMPATIBILITY NOTE: This module is still experimental and is not -// subject to Caddy's compatibility guarantee. -type MatchExpression struct { - // The CEL expression to evaluate. Any Caddy placeholders - // will be expanded and situated into proper CEL function - // calls before evaluating. - Expr string - - expandedExpr string - prg cel.Program - ta ref.TypeAdapter -} - -// CaddyModule returns the Caddy module information. -func (MatchExpression) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.expression", - New: func() caddy.Module { return new(MatchExpression) }, - } -} - -// MarshalJSON marshals m's expression. -func (m MatchExpression) MarshalJSON() ([]byte, error) { - return json.Marshal(m.Expr) -} - -// UnmarshalJSON unmarshals m's expression. -func (m *MatchExpression) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &m.Expr) -} - -// Provision sets ups m. -func (m *MatchExpression) Provision(_ caddy.Context) error { - // replace placeholders with a function call - this is just some - // light (and possibly naïve) syntactic sugar - m.expandedExpr = placeholderRegexp.ReplaceAllString(m.Expr, placeholderExpansion) - - // our type adapter expands CEL's standard type support - m.ta = celTypeAdapter{} - - // create the CEL environment - env, err := cel.NewEnv( - cel.Declarations( - decls.NewVar("request", httpRequestObjectType), - decls.NewFunction(placeholderFuncName, - decls.NewOverload(placeholderFuncName+"_httpRequest_string", - []*exprpb.Type{httpRequestObjectType, decls.String}, - decls.Any)), - ), - cel.CustomTypeAdapter(m.ta), - ext.Strings(), - ) - if err != nil { - return fmt.Errorf("setting up CEL environment: %v", err) - } - - // parse and type-check the expression - checked, issues := env.Compile(m.expandedExpr) - if issues != nil && issues.Err() != nil { - return fmt.Errorf("compiling CEL program: %s", issues.Err()) - } - - // request matching is a boolean operation, so we don't really know - // what to do if the expression returns a non-boolean type - if !proto.Equal(checked.ResultType(), decls.Bool) { - return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.ResultType()) - } - - // compile the "program" - m.prg, err = env.Program(checked, - cel.Functions( - &functions.Overload{ - Operator: placeholderFuncName, - Binary: m.caddyPlaceholderFunc, - }, - ), - ) - - if err != nil { - return fmt.Errorf("compiling CEL program: %s", err) - } - return nil -} - -// Match returns true if r matches m. -func (m MatchExpression) Match(r *http.Request) bool { - out, _, _ := m.prg.Eval(map[string]interface{}{ - "request": celHTTPRequest{r}, - }) - if outBool, ok := out.Value().(bool); ok { - return outBool - } - return false - -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchExpression) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - m.Expr = strings.Join(d.RemainingArgs(), " ") - } - return nil -} - -// caddyPlaceholderFunc implements the custom CEL function that accesses the -// Replacer on a request and gets values from it. -func (m MatchExpression) caddyPlaceholderFunc(lhs, rhs ref.Val) ref.Val { - celReq, ok := lhs.(celHTTPRequest) - if !ok { - return types.NewErr( - "invalid request of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)", - lhs.Type()) - } - phStr, ok := rhs.(types.String) - if !ok { - return types.NewErr( - "invalid placeholder variable name of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)", - rhs.Type()) - } - - repl := celReq.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - val, _ := repl.Get(string(phStr)) - - return m.ta.NativeToValue(val) -} - -// httpRequestCELType is the type representation of a native HTTP request. -var httpRequestCELType = types.NewTypeValue("http.Request", traits.ReceiverType) - -// cellHTTPRequest wraps an http.Request with -// methods to satisfy the ref.Val interface. -type celHTTPRequest struct{ *http.Request } - -func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - return cr.Request, nil -} -func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val { - panic("not implemented") -} -func (cr celHTTPRequest) Equal(other ref.Val) ref.Val { - if o, ok := other.Value().(celHTTPRequest); ok { - return types.Bool(o.Request == cr.Request) - } - return types.ValOrErr(other, "%v is not comparable type", other) -} -func (celHTTPRequest) Type() ref.Type { return httpRequestCELType } -func (cr celHTTPRequest) Value() interface{} { return cr } - -var pkixNameCELType = types.NewTypeValue("pkix.Name", traits.ReceiverType) - -// celPkixName wraps an pkix.Name with -// methods to satisfy the ref.Val interface. -type celPkixName struct{ *pkix.Name } - -func (pn celPkixName) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { - return pn.Name, nil -} -func (celPkixName) ConvertToType(typeVal ref.Type) ref.Val { - panic("not implemented") -} -func (pn celPkixName) Equal(other ref.Val) ref.Val { - if o, ok := other.Value().(string); ok { - return types.Bool(pn.Name.String() == o) - } - return types.ValOrErr(other, "%v is not comparable type", other) -} -func (celPkixName) Type() ref.Type { return pkixNameCELType } -func (pn celPkixName) Value() interface{} { return pn } - -// celTypeAdapter can adapt our custom types to a CEL value. -type celTypeAdapter struct{} - -func (celTypeAdapter) NativeToValue(value interface{}) ref.Val { - switch v := value.(type) { - case celHTTPRequest: - return v - case pkix.Name: - return celPkixName{&v} - case time.Time: - return types.Timestamp{Time: v} - case error: - types.NewErr(v.Error()) - } - return types.DefaultTypeAdapter.NativeToValue(value) -} - -// Variables used for replacing Caddy placeholders in CEL -// expressions with a proper CEL function call; this is -// just for syntactic sugar. -var ( - placeholderRegexp = regexp.MustCompile(`{([\w.-]+)}`) - placeholderExpansion = `caddyPlaceholder(request, "${1}")` -) - -var httpRequestObjectType = decls.NewObjectType("http.Request") - -// The name of the CEL function which accesses Replacer values. -const placeholderFuncName = "caddyPlaceholder" - -// Interface guards -var ( - _ caddy.Provisioner = (*MatchExpression)(nil) - _ RequestMatcher = (*MatchExpression)(nil) - _ caddyfile.Unmarshaler = (*MatchExpression)(nil) - _ json.Marshaler = (*MatchExpression)(nil) - _ json.Unmarshaler = (*MatchExpression)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/errors.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/errors.go deleted file mode 100644 index 85dc3dfb..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/errors.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "fmt" - weakrand "math/rand" - "path" - "runtime" - "strings" - "time" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - weakrand.Seed(time.Now().UnixNano()) -} - -// Error is a convenient way for a Handler to populate the -// essential fields of a HandlerError. If err is itself a -// HandlerError, then any essential fields that are not -// set will be populated. -func Error(statusCode int, err error) HandlerError { - const idLen = 9 - if he, ok := err.(HandlerError); ok { - if he.ID == "" { - he.ID = randString(idLen, true) - } - if he.Trace == "" { - he.Trace = trace() - } - if he.StatusCode == 0 { - he.StatusCode = statusCode - } - return he - } - return HandlerError{ - ID: randString(idLen, true), - StatusCode: statusCode, - Err: err, - Trace: trace(), - } -} - -// HandlerError is a serializable representation of -// an error from within an HTTP handler. -type HandlerError struct { - Err error // the original error value and message - StatusCode int // the HTTP status code to associate with this error - - ID string // generated; for identifying this error in logs - Trace string // produced from call stack -} - -func (e HandlerError) Error() string { - var s string - if e.ID != "" { - s += fmt.Sprintf("{id=%s}", e.ID) - } - if e.Trace != "" { - s += " " + e.Trace - } - if e.StatusCode != 0 { - s += fmt.Sprintf(": HTTP %d", e.StatusCode) - } - if e.Err != nil { - s += ": " + e.Err.Error() - } - return strings.TrimSpace(s) -} - -// randString returns a string of n random characters. -// It is not even remotely secure OR a proper distribution. -// But it's good enough for some things. It excludes certain -// confusing characters like I, l, 1, 0, O, etc. If sameCase -// is true, then uppercase letters are excluded. -func randString(n int, sameCase bool) string { - if n <= 0 { - return "" - } - dict := []byte("abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRTUVWXY23456789") - if sameCase { - dict = []byte("abcdefghijkmnpqrstuvwxyz0123456789") - } - b := make([]byte, n) - for i := range b { - //nolint:gosec - b[i] = dict[weakrand.Int63()%int64(len(dict))] - } - return string(b) -} - -func trace() string { - if pc, file, line, ok := runtime.Caller(2); ok { - filename := path.Base(file) - pkgAndFuncName := path.Base(runtime.FuncForPC(pc).Name()) - return fmt.Sprintf("%s (%s:%d)", pkgAndFuncName, filename, line) - } - return "" -} - -// ErrorCtxKey is the context key to use when storing -// an error (for use with context.Context). -const ErrorCtxKey = caddy.CtxKey("handler_chain_error") diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/caddyfile.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/caddyfile.go deleted file mode 100644 index c6ea2fb0..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/caddyfile.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package headers - -import ( - "fmt" - "net/http" - "reflect" - "strings" - - "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" -) - -func init() { - httpcaddyfile.RegisterDirective("header", parseCaddyfile) - httpcaddyfile.RegisterDirective("request_header", parseReqHdrCaddyfile) -} - -// parseCaddyfile sets up the handler for response headers from -// Caddyfile tokens. Syntax: -// -// header [] [[+|-|?] [] []] { -// [+] [ []] -// ? -// - -// [defer] -// } -// -// Either a block can be opened or a single header field can be configured -// in the first line, but not both in the same directive. Header operations -// are deferred to write-time if any headers are being deleted or if the -// 'defer' subdirective is used. + appends a header value, - deletes a field, -// and ? conditionally sets a value only if the header field is not already -// set. -func parseCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { - if !h.Next() { - return nil, h.ArgErr() - } - - matcherSet, err := h.ExtractMatcherSet() - if err != nil { - return nil, err - } - - makeHandler := func() Handler { - return Handler{ - Response: &RespHeaderOps{ - HeaderOps: &HeaderOps{}, - }, - } - } - handler, handlerWithRequire := makeHandler(), makeHandler() - - for h.Next() { - // first see if headers are in the initial line - var hasArgs bool - if h.NextArg() { - hasArgs = true - field := h.Val() - var value, replacement string - if h.NextArg() { - value = h.Val() - } - if h.NextArg() { - replacement = h.Val() - } - err := applyHeaderOp( - handler.Response.HeaderOps, - handler.Response, - field, - value, - replacement, - ) - if err != nil { - return nil, h.Err(err.Error()) - } - if len(handler.Response.HeaderOps.Delete) > 0 { - handler.Response.Deferred = true - } - } - - // if not, they should be in a block - for h.NextBlock(0) { - field := h.Val() - if field == "defer" { - handler.Response.Deferred = true - continue - } - if hasArgs { - return nil, h.Err("cannot specify headers in both arguments and block") // because it would be weird - } - - // sometimes it is habitual for users to suffix a field name with a colon, - // as if they were writing a curl command or something; see - // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19 - field = strings.TrimSuffix(field, ":") - - var value, replacement string - if h.NextArg() { - value = h.Val() - } - if h.NextArg() { - replacement = h.Val() - } - - handlerToUse := handler - if strings.HasPrefix(field, "?") { - handlerToUse = handlerWithRequire - } - - err := applyHeaderOp( - handlerToUse.Response.HeaderOps, - handlerToUse.Response, - field, - value, - replacement, - ) - if err != nil { - return nil, h.Err(err.Error()) - } - } - } - - var configValues []httpcaddyfile.ConfigValue - if !reflect.DeepEqual(handler, makeHandler()) { - configValues = append(configValues, h.NewRoute(matcherSet, handler)...) - } - if !reflect.DeepEqual(handlerWithRequire, makeHandler()) { - configValues = append(configValues, h.NewRoute(matcherSet, handlerWithRequire)...) - } - - return configValues, nil -} - -// parseReqHdrCaddyfile sets up the handler for request headers -// from Caddyfile tokens. Syntax: -// -// request_header [] [[+|-] [] []] -// -func parseReqHdrCaddyfile(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error) { - if !h.Next() { - return nil, h.ArgErr() - } - - matcherSet, err := h.ExtractMatcherSet() - if err != nil { - return nil, err - } - - configValues := []httpcaddyfile.ConfigValue{} - - for h.Next() { - if !h.NextArg() { - return nil, h.ArgErr() - } - field := h.Val() - - hdr := Handler{ - Request: &HeaderOps{}, - } - - // sometimes it is habitual for users to suffix a field name with a colon, - // as if they were writing a curl command or something; see - // https://caddy.community/t/v2-reverse-proxy-please-add-cors-example-to-the-docs/7349/19 - field = strings.TrimSuffix(field, ":") - - var value, replacement string - if h.NextArg() { - value = h.Val() - } - if h.NextArg() { - replacement = h.Val() - if h.NextArg() { - return nil, h.ArgErr() - } - } - - if hdr.Request == nil { - hdr.Request = new(HeaderOps) - } - if err := CaddyfileHeaderOp(hdr.Request, field, value, replacement); err != nil { - return nil, h.Err(err.Error()) - } - - configValues = append(configValues, h.NewRoute(matcherSet, hdr)...) - - if h.NextArg() { - return nil, h.ArgErr() - } - } - return configValues, nil -} - -// CaddyfileHeaderOp applies a new header operation according to -// field, value, and replacement. The field can be prefixed with -// "+" or "-" to specify adding or removing; otherwise, the value -// will be set (overriding any previous value). If replacement is -// non-empty, value will be treated as a regular expression which -// will be used to search and then replacement will be used to -// complete the substring replacement; in that case, any + or - -// prefix to field will be ignored. -func CaddyfileHeaderOp(ops *HeaderOps, field, value, replacement string) error { - return applyHeaderOp(ops, nil, field, value, replacement) -} - -func applyHeaderOp(ops *HeaderOps, respHeaderOps *RespHeaderOps, field, value, replacement string) error { - switch { - case strings.HasPrefix(field, "+"): // append - if ops.Add == nil { - ops.Add = make(http.Header) - } - ops.Add.Set(field[1:], value) - - case strings.HasPrefix(field, "-"): // delete - ops.Delete = append(ops.Delete, field[1:]) - if respHeaderOps != nil { - respHeaderOps.Deferred = true - } - - case strings.HasPrefix(field, "?"): // default (conditional on not existing) - response headers only - if respHeaderOps == nil { - return fmt.Errorf("%v: the default header modifier ('?') can only be used on response headers; for conditional manipulation of request headers, use matchers", field) - } - if respHeaderOps.Require == nil { - respHeaderOps.Require = &caddyhttp.ResponseMatcher{ - Headers: make(http.Header), - } - } - field = strings.TrimPrefix(field, "?") - respHeaderOps.Require.Headers[field] = nil - if respHeaderOps.Set == nil { - respHeaderOps.Set = make(http.Header) - } - respHeaderOps.Set.Set(field, value) - - case replacement != "": // replace - if ops.Replace == nil { - ops.Replace = make(map[string][]Replacement) - } - field = strings.TrimLeft(field, "+-?") - ops.Replace[field] = append( - ops.Replace[field], - Replacement{ - SearchRegexp: value, - Replace: replacement, - }, - ) - - default: // set (overwrite) - if ops.Set == nil { - ops.Set = make(http.Header) - } - ops.Set.Set(field, value) - } - - return nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/headers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/headers.go deleted file mode 100644 index 3571dd92..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/headers/headers.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package headers - -import ( - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" -) - -func init() { - caddy.RegisterModule(Handler{}) -} - -// Handler is a middleware which modifies request and response headers. -// -// Changes to headers are applied immediately, except for the response -// headers when Deferred is true or when Required is set. In those cases, -// the changes are applied when the headers are written to the response. -// Note that deferred changes do not take effect if an error occurs later -// in the middleware chain. -// -// Properties in this module accept placeholders. -// -// Response header operations can be conditioned upon response status code -// and/or other header values. -type Handler struct { - Request *HeaderOps `json:"request,omitempty"` - Response *RespHeaderOps `json:"response,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (Handler) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.handlers.headers", - New: func() caddy.Module { return new(Handler) }, - } -} - -// Provision sets up h's configuration. -func (h *Handler) Provision(ctx caddy.Context) error { - if h.Request != nil { - err := h.Request.Provision(ctx) - if err != nil { - return err - } - } - if h.Response != nil { - err := h.Response.Provision(ctx) - if err != nil { - return err - } - } - return nil -} - -// Validate ensures h's configuration is valid. -func (h Handler) Validate() error { - if h.Request != nil { - err := h.Request.validate() - if err != nil { - return err - } - } - if h.Response != nil { - err := h.Response.validate() - if err != nil { - return err - } - } - return nil -} - -func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - - if h.Request != nil { - h.Request.ApplyToRequest(r) - } - - if h.Response != nil { - if h.Response.Deferred || h.Response.Require != nil { - w = &responseWriterWrapper{ - ResponseWriterWrapper: &caddyhttp.ResponseWriterWrapper{ResponseWriter: w}, - replacer: repl, - require: h.Response.Require, - headerOps: h.Response.HeaderOps, - } - } else { - h.Response.ApplyTo(w.Header(), repl) - } - } - - return next.ServeHTTP(w, r) -} - -// HeaderOps defines manipulations for HTTP headers. -type HeaderOps struct { - // Adds HTTP headers; does not replace any existing header fields. - Add http.Header `json:"add,omitempty"` - - // Sets HTTP headers; replaces existing header fields. - Set http.Header `json:"set,omitempty"` - - // Names of HTTP header fields to delete. - Delete []string `json:"delete,omitempty"` - - // Performs substring replacements of HTTP headers in-situ. - Replace map[string][]Replacement `json:"replace,omitempty"` -} - -// Provision sets up the header operations. -func (ops *HeaderOps) Provision(_ caddy.Context) error { - for fieldName, replacements := range ops.Replace { - for i, r := range replacements { - if r.SearchRegexp != "" { - re, err := regexp.Compile(r.SearchRegexp) - if err != nil { - return fmt.Errorf("replacement %d for header field '%s': %v", i, fieldName, err) - } - replacements[i].re = re - } - } - } - return nil -} - -func (ops HeaderOps) validate() error { - for fieldName, replacements := range ops.Replace { - for _, r := range replacements { - if r.Search != "" && r.SearchRegexp != "" { - return fmt.Errorf("cannot specify both a substring search and a regular expression search for field '%s'", fieldName) - } - } - } - return nil -} - -// Replacement describes a string replacement, -// either a simple and fast substring search -// or a slower but more powerful regex search. -type Replacement struct { - // The substring to search for. - Search string `json:"search,omitempty"` - - // The regular expression to search with. - SearchRegexp string `json:"search_regexp,omitempty"` - - // The string with which to replace matches. - Replace string `json:"replace,omitempty"` - - re *regexp.Regexp -} - -// RespHeaderOps defines manipulations for response headers. -type RespHeaderOps struct { - *HeaderOps - - // If set, header operations will be deferred until - // they are written out and only performed if the - // response matches these criteria. - Require *caddyhttp.ResponseMatcher `json:"require,omitempty"` - - // If true, header operations will be deferred until - // they are written out. Superceded if Require is set. - // Usually you will need to set this to true if any - // fields are being deleted. - Deferred bool `json:"deferred,omitempty"` -} - -// ApplyTo applies ops to hdr using repl. -func (ops HeaderOps) ApplyTo(hdr http.Header, repl *caddy.Replacer) { - // add - for fieldName, vals := range ops.Add { - fieldName = repl.ReplaceAll(fieldName, "") - for _, v := range vals { - hdr.Add(fieldName, repl.ReplaceAll(v, "")) - } - } - - // set - for fieldName, vals := range ops.Set { - fieldName = repl.ReplaceAll(fieldName, "") - var newVals []string - for i := range vals { - // append to new slice so we don't overwrite - // the original values in ops.Set - newVals = append(newVals, repl.ReplaceAll(vals[i], "")) - } - hdr.Set(fieldName, strings.Join(newVals, ",")) - } - - // delete - for _, fieldName := range ops.Delete { - hdr.Del(repl.ReplaceAll(fieldName, "")) - } - - // replace - for fieldName, replacements := range ops.Replace { - fieldName = repl.ReplaceAll(fieldName, "") - - // all fields... - if fieldName == "*" { - for _, r := range replacements { - search := repl.ReplaceAll(r.Search, "") - replace := repl.ReplaceAll(r.Replace, "") - for fieldName, vals := range hdr { - for i := range vals { - if r.re != nil { - hdr[fieldName][i] = r.re.ReplaceAllString(hdr[fieldName][i], replace) - } else { - hdr[fieldName][i] = strings.ReplaceAll(hdr[fieldName][i], search, replace) - } - } - } - } - continue - } - - // ...or only with the named field - for _, r := range replacements { - search := repl.ReplaceAll(r.Search, "") - replace := repl.ReplaceAll(r.Replace, "") - for i := range hdr[fieldName] { - if r.re != nil { - hdr[fieldName][i] = r.re.ReplaceAllString(hdr[fieldName][i], replace) - } else { - hdr[fieldName][i] = strings.ReplaceAll(hdr[fieldName][i], search, replace) - } - } - } - } -} - -// ApplyToRequest applies ops to r, specially handling the Host -// header which the standard library does not include with the -// header map with all the others. This method mutates r.Host. -func (ops HeaderOps) ApplyToRequest(r *http.Request) { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - - // capture the current Host header so we can - // reset to it when we're done - origHost, hadHost := r.Header["Host"] - - // append r.Host; this way, we know that our value - // was last in the list, and if an Add operation - // appended something else after it, that's probably - // fine because it's weird to have multiple Host - // headers anyway and presumably the one they added - // is the one they wanted - r.Header["Host"] = append(r.Header["Host"], r.Host) - - // apply header operations - ops.ApplyTo(r.Header, repl) - - // retrieve the last Host value (likely the one we appended) - if len(r.Header["Host"]) > 0 { - r.Host = r.Header["Host"][len(r.Header["Host"])-1] - } else { - r.Host = "" - } - - // reset the Host header slice - if hadHost { - r.Header["Host"] = origHost - } else { - delete(r.Header, "Host") - } -} - -// responseWriterWrapper defers response header -// operations until WriteHeader is called. -type responseWriterWrapper struct { - *caddyhttp.ResponseWriterWrapper - replacer *caddy.Replacer - require *caddyhttp.ResponseMatcher - headerOps *HeaderOps - wroteHeader bool -} - -func (rww *responseWriterWrapper) WriteHeader(status int) { - if rww.wroteHeader { - return - } - rww.wroteHeader = true - if rww.require == nil || rww.require.Match(status, rww.ResponseWriterWrapper.Header()) { - if rww.headerOps != nil { - rww.headerOps.ApplyTo(rww.ResponseWriterWrapper.Header(), rww.replacer) - } - } - rww.ResponseWriterWrapper.WriteHeader(status) -} - -func (rww *responseWriterWrapper) Write(d []byte) (int, error) { - if !rww.wroteHeader { - rww.WriteHeader(http.StatusOK) - } - return rww.ResponseWriterWrapper.Write(d) -} - -// Interface guards -var ( - _ caddy.Provisioner = (*Handler)(nil) - _ caddyhttp.MiddlewareHandler = (*Handler)(nil) - _ caddyhttp.HTTPInterfaces = (*responseWriterWrapper)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/marshalers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/marshalers.go deleted file mode 100644 index 8001bd8f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/marshalers.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "crypto/tls" - "net/http" - - "go.uber.org/zap/zapcore" -) - -// LoggableHTTPRequest makes an HTTP request loggable with zap.Object(). -type LoggableHTTPRequest struct{ *http.Request } - -// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. -func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error { - enc.AddString("remote_addr", r.RemoteAddr) - enc.AddString("proto", r.Proto) - enc.AddString("method", r.Method) - enc.AddString("host", r.Host) - enc.AddString("uri", r.RequestURI) - enc.AddObject("headers", LoggableHTTPHeader(r.Header)) - if r.TLS != nil { - enc.AddObject("tls", LoggableTLSConnState(*r.TLS)) - } - return nil -} - -// LoggableHTTPHeader makes an HTTP header loggable with zap.Object(). -type LoggableHTTPHeader http.Header - -// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. -func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error { - if h == nil { - return nil - } - for key, val := range h { - enc.AddArray(key, LoggableStringArray(val)) - } - return nil -} - -// LoggableStringArray makes a slice of strings marshalable for logging. -type LoggableStringArray []string - -// MarshalLogArray satisfies the zapcore.ArrayMarshaler interface. -func (sa LoggableStringArray) MarshalLogArray(enc zapcore.ArrayEncoder) error { - if sa == nil { - return nil - } - for _, s := range sa { - enc.AppendString(s) - } - return nil -} - -// LoggableTLSConnState makes a TLS connection state loggable with zap.Object(). -type LoggableTLSConnState tls.ConnectionState - -// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface. -func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error { - enc.AddBool("resumed", t.DidResume) - enc.AddUint16("version", t.Version) - enc.AddUint16("cipher_suite", t.CipherSuite) - enc.AddString("proto", t.NegotiatedProtocol) - // NegotiatedProtocolIsMutual is deprecated - it's always true - enc.AddBool("proto_mutual", true) - enc.AddString("server_name", t.ServerName) - if len(t.PeerCertificates) > 0 { - enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName) - enc.AddString("client_serial", t.PeerCertificates[0].SerialNumber.String()) - } - return nil -} - -// Interface guards -var ( - _ zapcore.ObjectMarshaler = (*LoggableHTTPRequest)(nil) - _ zapcore.ObjectMarshaler = (*LoggableHTTPHeader)(nil) - _ zapcore.ArrayMarshaler = (*LoggableStringArray)(nil) - _ zapcore.ObjectMarshaler = (*LoggableTLSConnState)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/matchers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/matchers.go deleted file mode 100644 index b452d48e..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/matchers.go +++ /dev/null @@ -1,1023 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "encoding/json" - "fmt" - "net" - "net/http" - "net/textproto" - "net/url" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "go.uber.org/zap" -) - -type ( - // MatchHost matches requests by the Host value (case-insensitive). - // - // When used in a top-level HTTP route, - // [qualifying domain names](/docs/automatic-https#hostname-requirements) - // may trigger [automatic HTTPS](/docs/automatic-https), which automatically - // provisions and renews certificates for you. Before doing this, you - // should ensure that DNS records for these domains are properly configured, - // especially A/AAAA pointed at your server. - // - // Automatic HTTPS can be - // [customized or disabled](/docs/modules/http#servers/automatic_https). - // - // Wildcards (`*`) may be used to represent exactly one label of the - // hostname, in accordance with RFC 1034 (because host matchers are also - // used for automatic HTTPS which influences TLS certificates). Thus, - // a host of `*` matches hosts like `localhost` or `internal` but not - // `example.com`. To catch all hosts, omit the host matcher entirely. - // - // The wildcard can be useful for matching all subdomains, for example: - // `*.example.com` matches `foo.example.com` but not `foo.bar.example.com`. - // - // Duplicate entries will return an error. - MatchHost []string - - // MatchPath matches requests by the URI's path (case-insensitive). Path - // matches are exact, but wildcards may be used: - // - // - At the end, for a prefix match (`/prefix/*`) - // - At the beginning, for a suffix match (`*.suffix`) - // - On both sides, for a substring match (`*/contains/*`) - // - In the middle, for a globular match (`/accounts/*/info`) - // - // This matcher is fast, so it does not support regular expressions or - // capture groups. For slower but more powerful matching, use the - // path_regexp matcher. - MatchPath []string - - // MatchPathRE matches requests by a regular expression on the URI's path. - // - // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}` - // where `name` is the regular expression's name, and `capture_group` is either - // the named or positional capture group from the expression itself. If no name - // is given, then the placeholder omits the name: `{http.regexp.capture_group}` - // (potentially leading to collisions). - MatchPathRE struct{ MatchRegexp } - - // MatchMethod matches requests by the method. - MatchMethod []string - - // MatchQuery matches requests by the URI's query string. It takes a JSON object - // keyed by the query keys, with an array of string values to match for that key. - // Query key matches are exact, but wildcards may be used for value matches. Both - // keys and values may be placeholders. - // An example of the structure to match `?key=value&topic=api&query=something` is: - // - // ```json - // { - // "key": ["value"], - // "topic": ["api"], - // "query": ["*"] - // } - // ``` - MatchQuery url.Values - - // MatchHeader matches requests by header fields. It performs fast, - // exact string comparisons of the field values. Fast prefix, suffix, - // and substring matches can also be done by suffixing, prefixing, or - // surrounding the value with the wildcard `*` character, respectively. - // If a list is null, the header must not exist. If the list is empty, - // the field must simply exist, regardless of its value. - MatchHeader http.Header - - // MatchHeaderRE matches requests by a regular expression on header fields. - // - // Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}` - // where `name` is the regular expression's name, and `capture_group` is either - // the named or positional capture group from the expression itself. If no name - // is given, then the placeholder omits the name: `{http.regexp.capture_group}` - // (potentially leading to collisions). - MatchHeaderRE map[string]*MatchRegexp - - // MatchProtocol matches requests by protocol. - MatchProtocol string - - // MatchRemoteIP matches requests by client IP (or CIDR range). - MatchRemoteIP struct { - // The IPs or CIDR ranges to match. - Ranges []string `json:"ranges,omitempty"` - - // If true, prefer the first IP in the request's X-Forwarded-For - // header, if present, rather than the immediate peer's IP, as - // the reference IP against which to match. Note that it is easy - // to spoof request headers. Default: false - Forwarded bool `json:"forwarded,omitempty"` - - cidrs []*net.IPNet - logger *zap.Logger - } - - // MatchNot matches requests by negating the results of its matcher - // sets. A single "not" matcher takes one or more matcher sets. Each - // matcher set is OR'ed; in other words, if any matcher set returns - // true, the final result of the "not" matcher is false. Individual - // matchers within a set work the same (i.e. different matchers in - // the same set are AND'ed). - // - // Note that the generated docs which describe the structure of - // this module are wrong because of how this type unmarshals JSON - // in a custom way. The correct structure is: - // - // ```json - // [ - // {}, - // {} - // ] - // ``` - // - // where each of the array elements is a matcher set, i.e. an - // object keyed by matcher name. - MatchNot struct { - MatcherSetsRaw []caddy.ModuleMap `json:"-" caddy:"namespace=http.matchers"` - MatcherSets []MatcherSet `json:"-"` - } -) - -func init() { - caddy.RegisterModule(MatchHost{}) - caddy.RegisterModule(MatchPath{}) - caddy.RegisterModule(MatchPathRE{}) - caddy.RegisterModule(MatchMethod{}) - caddy.RegisterModule(MatchQuery{}) - caddy.RegisterModule(MatchHeader{}) - caddy.RegisterModule(MatchHeaderRE{}) - caddy.RegisterModule(new(MatchProtocol)) - caddy.RegisterModule(MatchRemoteIP{}) - caddy.RegisterModule(MatchNot{}) -} - -// CaddyModule returns the Caddy module information. -func (MatchHost) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.host", - New: func() caddy.Module { return new(MatchHost) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - *m = append(*m, d.RemainingArgs()...) - if d.NextBlock(0) { - return d.Err("malformed host matcher: blocks are not supported") - } - } - return nil -} - -// Provision sets up and validates m, including making it more efficient for large lists. -func (m MatchHost) Provision(_ caddy.Context) error { - // check for duplicates; they are nonsensical and reduce efficiency - // (we could just remove them, but the user should know their config is erroneous) - seen := make(map[string]int) - for i, h := range m { - h = strings.ToLower(h) - if firstI, ok := seen[h]; ok { - return fmt.Errorf("host at index %d is repeated at index %d: %s", firstI, i, h) - } - seen[h] = i - } - - if m.large() { - // sort the slice lexicographically, grouping "fuzzy" entries (wildcards and placeholders) - // at the front of the list; this allows us to use binary search for exact matches, which - // we have seen from experience is the most common kind of value in large lists; and any - // other kinds of values (wildcards and placeholders) are grouped in front so the linear - // search should find a match fairly quickly - sort.Slice(m, func(i, j int) bool { - iInexact, jInexact := m.fuzzy(m[i]), m.fuzzy(m[j]) - if iInexact && !jInexact { - return true - } - if !iInexact && jInexact { - return false - } - return m[i] < m[j] - }) - } - - return nil -} - -// Match returns true if r matches m. -func (m MatchHost) Match(r *http.Request) bool { - reqHost, _, err := net.SplitHostPort(r.Host) - if err != nil { - // OK; probably didn't have a port - reqHost = r.Host - - // make sure we strip the brackets from IPv6 addresses - reqHost = strings.TrimPrefix(reqHost, "[") - reqHost = strings.TrimSuffix(reqHost, "]") - } - - if m.large() { - // fast path: locate exact match using binary search (about 100-1000x faster for large lists) - pos := sort.Search(len(m), func(i int) bool { - if m.fuzzy(m[i]) { - return false - } - return m[i] >= reqHost - }) - if pos < len(m) && m[pos] == reqHost { - return true - } - } - - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - -outer: - for _, host := range m { - // fast path: if matcher is large, we already know we don't have an exact - // match, so we're only looking for fuzzy match now, which should be at the - // front of the list; if we have reached a value that is not fuzzy, there - // will be no match and we can short-circuit for efficiency - if m.large() && !m.fuzzy(host) { - break - } - - host = repl.ReplaceAll(host, "") - if strings.Contains(host, "*") { - patternParts := strings.Split(host, ".") - incomingParts := strings.Split(reqHost, ".") - if len(patternParts) != len(incomingParts) { - continue - } - for i := range patternParts { - if patternParts[i] == "*" { - continue - } - if !strings.EqualFold(patternParts[i], incomingParts[i]) { - continue outer - } - } - return true - } else if strings.EqualFold(reqHost, host) { - return true - } - } - - return false -} - -// fuzzy returns true if the given hostname h is not a specific -// hostname, e.g. has placeholders or wildcards. -func (MatchHost) fuzzy(h string) bool { return strings.ContainsAny(h, "{*") } - -// large returns true if m is considered to be large. Optimizing -// the matcher for smaller lists has diminishing returns. -// See related benchmark function in test file to conduct experiments. -func (m MatchHost) large() bool { return len(m) > 100 } - -// CaddyModule returns the Caddy module information. -func (MatchPath) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.path", - New: func() caddy.Module { return new(MatchPath) }, - } -} - -// Provision lower-cases the paths in m to ensure case-insensitive matching. -func (m MatchPath) Provision(_ caddy.Context) error { - for i := range m { - m[i] = strings.ToLower(m[i]) - } - return nil -} - -// Match returns true if r matches m. -func (m MatchPath) Match(r *http.Request) bool { - lowerPath := strings.ToLower(r.URL.Path) - - // see #2917; Windows ignores trailing dots and spaces - // when accessing files (sigh), potentially causing a - // security risk (cry) if PHP files end up being served - // as static files, exposing the source code, instead of - // being matched by *.php to be treated as PHP scripts - lowerPath = strings.TrimRight(lowerPath, ". ") - - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - - for _, matchPath := range m { - matchPath = repl.ReplaceAll(matchPath, "") - - // special case: whole path is wildcard; this is unnecessary - // as it matches all requests, which is the same as no matcher - if matchPath == "*" { - return true - } - - // special case: first and last characters are wildcard, - // treat it as a fast substring match - if len(matchPath) > 1 && - strings.HasPrefix(matchPath, "*") && - strings.HasSuffix(matchPath, "*") { - if strings.Contains(lowerPath, matchPath[1:len(matchPath)-1]) { - return true - } - continue - } - - // special case: first character is a wildcard, - // treat it as a fast suffix match - if strings.HasPrefix(matchPath, "*") { - if strings.HasSuffix(lowerPath, matchPath[1:]) { - return true - } - continue - } - - // special case: last character is a wildcard, - // treat it as a fast prefix match - if strings.HasSuffix(matchPath, "*") { - if strings.HasPrefix(lowerPath, matchPath[:len(matchPath)-1]) { - return true - } - continue - } - - // for everything else, try globular matching, which also - // is exact matching if there are no glob/wildcard chars; - // can ignore error here because we can't handle it anyway - matches, _ := filepath.Match(matchPath, lowerPath) - if matches { - return true - } - } - return false -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - *m = append(*m, d.RemainingArgs()...) - if d.NextBlock(0) { - return d.Err("malformed path matcher: blocks are not supported") - } - } - return nil -} - -// CaddyModule returns the Caddy module information. -func (MatchPathRE) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.path_regexp", - New: func() caddy.Module { return new(MatchPathRE) }, - } -} - -// Match returns true if r matches m. -func (m MatchPathRE) Match(r *http.Request) bool { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - return m.MatchRegexp.Match(r.URL.Path, repl) -} - -// CaddyModule returns the Caddy module information. -func (MatchMethod) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.method", - New: func() caddy.Module { return new(MatchMethod) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - *m = append(*m, d.RemainingArgs()...) - if d.NextBlock(0) { - return d.Err("malformed method matcher: blocks are not supported") - } - } - return nil -} - -// Match returns true if r matches m. -func (m MatchMethod) Match(r *http.Request) bool { - for _, method := range m { - if r.Method == method { - return true - } - } - return false -} - -// CaddyModule returns the Caddy module information. -func (MatchQuery) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.query", - New: func() caddy.Module { return new(MatchQuery) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - if *m == nil { - *m = make(map[string][]string) - } - for d.Next() { - for _, query := range d.RemainingArgs() { - if query == "" { - continue - } - parts := strings.SplitN(query, "=", 2) - if len(parts) != 2 { - return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val()) - } - url.Values(*m).Add(parts[0], parts[1]) - } - if d.NextBlock(0) { - return d.Err("malformed query matcher: blocks are not supported") - } - } - return nil -} - -// Match returns true if r matches m. An empty m matches an empty query string. -func (m MatchQuery) Match(r *http.Request) bool { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - for param, vals := range m { - param = repl.ReplaceAll(param, "") - paramVal, found := r.URL.Query()[param] - if found { - for _, v := range vals { - v = repl.ReplaceAll(v, "") - if paramVal[0] == v || v == "*" { - return true - } - } - } - } - return len(m) == 0 && len(r.URL.Query()) == 0 -} - -// CaddyModule returns the Caddy module information. -func (MatchHeader) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.header", - New: func() caddy.Module { return new(MatchHeader) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - if *m == nil { - *m = make(map[string][]string) - } - for d.Next() { - var field, val string - if !d.Args(&field) { - return d.Errf("malformed header matcher: expected field") - } - - if strings.HasPrefix(field, "!") { - if len(field) == 1 { - return d.Errf("malformed header matcher: must have field name following ! character") - } - - field = field[1:] - headers := *m - headers[field] = nil - m = &headers - if d.NextArg() { - return d.Errf("malformed header matcher: null matching headers cannot have a field value") - } - } else { - if !d.NextArg() { - return d.Errf("malformed header matcher: expected both field and value") - } - - // If multiple header matchers with the same header field are defined, - // we want to add the existing to the list of headers (will be OR'ed) - val = d.Val() - http.Header(*m).Add(field, val) - } - - if d.NextBlock(0) { - return d.Err("malformed header matcher: blocks are not supported") - } - } - return nil -} - -// Match returns true if r matches m. -func (m MatchHeader) Match(r *http.Request) bool { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - return matchHeaders(r.Header, http.Header(m), r.Host, repl) -} - -// getHeaderFieldVals returns the field values for the given fieldName from input. -// The host parameter should be obtained from the http.Request.Host field since -// net/http removes it from the header map. -func getHeaderFieldVals(input http.Header, fieldName, host string) []string { - fieldName = textproto.CanonicalMIMEHeaderKey(fieldName) - if fieldName == "Host" && host != "" { - return []string{host} - } - return input[fieldName] -} - -// matchHeaders returns true if input matches the criteria in against without regex. -// The host parameter should be obtained from the http.Request.Host field since -// net/http removes it from the header map. -func matchHeaders(input, against http.Header, host string, repl *caddy.Replacer) bool { - for field, allowedFieldVals := range against { - actualFieldVals := getHeaderFieldVals(input, field, host) - if allowedFieldVals != nil && len(allowedFieldVals) == 0 && actualFieldVals != nil { - // a non-nil but empty list of allowed values means - // match if the header field exists at all - continue - } - if allowedFieldVals == nil && actualFieldVals == nil { - // a nil list means match if the header does not exist at all - continue - } - var match bool - fieldVals: - for _, actualFieldVal := range actualFieldVals { - for _, allowedFieldVal := range allowedFieldVals { - if repl != nil { - allowedFieldVal = repl.ReplaceAll(allowedFieldVal, "") - } - switch { - case allowedFieldVal == "*": - match = true - case strings.HasPrefix(allowedFieldVal, "*") && strings.HasSuffix(allowedFieldVal, "*"): - match = strings.Contains(actualFieldVal, allowedFieldVal[1:len(allowedFieldVal)-1]) - case strings.HasPrefix(allowedFieldVal, "*"): - match = strings.HasSuffix(actualFieldVal, allowedFieldVal[1:]) - case strings.HasSuffix(allowedFieldVal, "*"): - match = strings.HasPrefix(actualFieldVal, allowedFieldVal[:len(allowedFieldVal)-1]) - default: - match = actualFieldVal == allowedFieldVal - } - if match { - break fieldVals - } - } - } - if !match { - return false - } - } - return true -} - -// CaddyModule returns the Caddy module information. -func (MatchHeaderRE) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.header_regexp", - New: func() caddy.Module { return new(MatchHeaderRE) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - if *m == nil { - *m = make(map[string]*MatchRegexp) - } - for d.Next() { - var first, second, third string - if !d.Args(&first, &second) { - return d.ArgErr() - } - - var name, field, val string - if d.Args(&third) { - name = first - field = second - val = third - } else { - field = first - val = second - } - - (*m)[field] = &MatchRegexp{Pattern: val, Name: name} - - if d.NextBlock(0) { - return d.Err("malformed header_regexp matcher: blocks are not supported") - } - } - return nil -} - -// Match returns true if r matches m. -func (m MatchHeaderRE) Match(r *http.Request) bool { - for field, rm := range m { - actualFieldVals := getHeaderFieldVals(r.Header, field, r.Host) - match := false - fieldVal: - for _, actualFieldVal := range actualFieldVals { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - if rm.Match(actualFieldVal, repl) { - match = true - break fieldVal - } - } - if !match { - return false - } - } - return true -} - -// Provision compiles m's regular expressions. -func (m MatchHeaderRE) Provision(ctx caddy.Context) error { - for _, rm := range m { - err := rm.Provision(ctx) - if err != nil { - return err - } - } - return nil -} - -// Validate validates m's regular expressions. -func (m MatchHeaderRE) Validate() error { - for _, rm := range m { - err := rm.Validate() - if err != nil { - return err - } - } - return nil -} - -// CaddyModule returns the Caddy module information. -func (MatchProtocol) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.protocol", - New: func() caddy.Module { return new(MatchProtocol) }, - } -} - -// Match returns true if r matches m. -func (m MatchProtocol) Match(r *http.Request) bool { - switch string(m) { - case "grpc": - return strings.HasPrefix(r.Header.Get("content-type"), "application/grpc") - case "https": - return r.TLS != nil - case "http": - return r.TLS == nil - } - return false -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - var proto string - if !d.Args(&proto) { - return d.Err("expected exactly one protocol") - } - *m = MatchProtocol(proto) - } - return nil -} - -// CaddyModule returns the Caddy module information. -func (MatchNot) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.not", - New: func() caddy.Module { return new(MatchNot) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - // first, unmarshal each matcher in the set from its tokens - type matcherPair struct { - raw caddy.ModuleMap - decoded MatcherSet - } - for d.Next() { - var mp matcherPair - matcherMap := make(map[string]RequestMatcher) - - // in case there are multiple instances of the same matcher, concatenate - // their tokens (we expect that UnmarshalCaddyfile should be able to - // handle more than one segment); otherwise, we'd overwrite other - // instances of the matcher in this set - tokensByMatcherName := make(map[string][]caddyfile.Token) - for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { - matcherName := d.Val() - tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...) - } - for matcherName, tokens := range tokensByMatcherName { - mod, err := caddy.GetModule("http.matchers." + matcherName) - if err != nil { - return d.Errf("getting matcher module '%s': %v", matcherName, err) - } - unm, ok := mod.New().(caddyfile.Unmarshaler) - if !ok { - return d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName) - } - err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens)) - if err != nil { - return err - } - rm, ok := unm.(RequestMatcher) - if !ok { - return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName) - } - matcherMap[matcherName] = rm - mp.decoded = append(mp.decoded, rm) - } - - // we should now have a functional 'not' matcher, but we also - // need to be able to marshal as JSON, otherwise config - // adaptation will be missing the matchers! - mp.raw = make(caddy.ModuleMap) - for name, matcher := range matcherMap { - jsonBytes, err := json.Marshal(matcher) - if err != nil { - return fmt.Errorf("marshaling %T matcher: %v", matcher, err) - } - mp.raw[name] = jsonBytes - } - m.MatcherSetsRaw = append(m.MatcherSetsRaw, mp.raw) - } - return nil -} - -// UnmarshalJSON satisfies json.Unmarshaler. It puts the JSON -// bytes directly into m's MatcherSetsRaw field. -func (m *MatchNot) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &m.MatcherSetsRaw) -} - -// MarshalJSON satisfies json.Marshaler by marshaling -// m's raw matcher sets. -func (m MatchNot) MarshalJSON() ([]byte, error) { - return json.Marshal(m.MatcherSetsRaw) -} - -// Provision loads the matcher modules to be negated. -func (m *MatchNot) Provision(ctx caddy.Context) error { - matcherSets, err := ctx.LoadModule(m, "MatcherSetsRaw") - if err != nil { - return fmt.Errorf("loading matcher sets: %v", err) - } - for _, modMap := range matcherSets.([]map[string]interface{}) { - var ms MatcherSet - for _, modIface := range modMap { - ms = append(ms, modIface.(RequestMatcher)) - } - m.MatcherSets = append(m.MatcherSets, ms) - } - return nil -} - -// Match returns true if r matches m. Since this matcher negates -// the embedded matchers, false is returned if any of its matcher -// sets return true. -func (m MatchNot) Match(r *http.Request) bool { - for _, ms := range m.MatcherSets { - if ms.Match(r) { - return false - } - } - return true -} - -// CaddyModule returns the Caddy module information. -func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.remote_ip", - New: func() caddy.Module { return new(MatchRemoteIP) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - for d.NextArg() { - if d.Val() == "forwarded" { - if len(m.Ranges) > 0 { - return d.Err("if used, 'forwarded' must be first argument") - } - m.Forwarded = true - continue - } - m.Ranges = append(m.Ranges, d.Val()) - } - if d.NextBlock(0) { - return d.Err("malformed remote_ip matcher: blocks are not supported") - } - } - return nil -} - -// Provision parses m's IP ranges, either from IP or CIDR expressions. -func (m *MatchRemoteIP) Provision(ctx caddy.Context) error { - m.logger = ctx.Logger(m) - for _, str := range m.Ranges { - if strings.Contains(str, "/") { - _, ipNet, err := net.ParseCIDR(str) - if err != nil { - return fmt.Errorf("parsing CIDR expression: %v", err) - } - m.cidrs = append(m.cidrs, ipNet) - } else { - ip := net.ParseIP(str) - if ip == nil { - return fmt.Errorf("invalid IP address: %s", str) - } - mask := len(ip) * 8 - m.cidrs = append(m.cidrs, &net.IPNet{ - IP: ip, - Mask: net.CIDRMask(mask, mask), - }) - } - } - return nil -} - -func (m MatchRemoteIP) getClientIP(r *http.Request) (net.IP, error) { - remote := r.RemoteAddr - if m.Forwarded { - if fwdFor := r.Header.Get("X-Forwarded-For"); fwdFor != "" { - remote = strings.TrimSpace(strings.Split(fwdFor, ",")[0]) - } - } - ipStr, _, err := net.SplitHostPort(remote) - if err != nil { - ipStr = remote // OK; probably didn't have a port - } - ip := net.ParseIP(ipStr) - if ip == nil { - return nil, fmt.Errorf("invalid client IP address: %s", ipStr) - } - return ip, nil -} - -// Match returns true if r matches m. -func (m MatchRemoteIP) Match(r *http.Request) bool { - clientIP, err := m.getClientIP(r) - if err != nil { - m.logger.Error("getting client IP", zap.Error(err)) - return false - } - for _, ipRange := range m.cidrs { - if ipRange.Contains(clientIP) { - return true - } - } - return false -} - -// MatchRegexp is an embedable type for matching -// using regular expressions. It adds placeholders -// to the request's replacer. -type MatchRegexp struct { - // A unique name for this regular expression. Optional, - // but useful to prevent overwriting captures from other - // regexp matchers. - Name string `json:"name,omitempty"` - - // The regular expression to evaluate, in RE2 syntax, - // which is the same general syntax used by Go, Perl, - // and Python. For details, see - // [Go's regexp package](https://golang.org/pkg/regexp/). - // Captures are accessible via placeholders. Unnamed - // capture groups are exposed as their numeric, 1-based - // index, while named capture groups are available by - // the capture group name. - Pattern string `json:"pattern"` - - compiled *regexp.Regexp - phPrefix string -} - -// Provision compiles the regular expression. -func (mre *MatchRegexp) Provision(caddy.Context) error { - re, err := regexp.Compile(mre.Pattern) - if err != nil { - return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err) - } - mre.compiled = re - mre.phPrefix = regexpPlaceholderPrefix - if mre.Name != "" { - mre.phPrefix += "." + mre.Name - } - return nil -} - -// Validate ensures mre is set up correctly. -func (mre *MatchRegexp) Validate() error { - if mre.Name != "" && !wordRE.MatchString(mre.Name) { - return fmt.Errorf("invalid regexp name (must contain only word characters): %s", mre.Name) - } - return nil -} - -// Match returns true if input matches the compiled regular -// expression in mre. It sets values on the replacer repl -// associated with capture groups, using the given scope -// (namespace). -func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool { - matches := mre.compiled.FindStringSubmatch(input) - if matches == nil { - return false - } - - // save all capture groups, first by index - for i, match := range matches { - key := mre.phPrefix + "." + strconv.Itoa(i) - repl.Set(key, match) - } - - // then by name - for i, name := range mre.compiled.SubexpNames() { - if i != 0 && name != "" { - key := mre.phPrefix + "." + name - repl.Set(key, matches[i]) - } - } - - return true -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - args := d.RemainingArgs() - switch len(args) { - case 1: - mre.Pattern = args[0] - case 2: - mre.Name = args[0] - mre.Pattern = args[1] - default: - return d.ArgErr() - } - if d.NextBlock(0) { - return d.Err("malformed path_regexp matcher: blocks are not supported") - } - } - return nil -} - -var wordRE = regexp.MustCompile(`\w+`) - -const regexpPlaceholderPrefix = "http.regexp" - -// Interface guards -var ( - _ RequestMatcher = (*MatchHost)(nil) - _ caddy.Provisioner = (*MatchHost)(nil) - _ RequestMatcher = (*MatchPath)(nil) - _ RequestMatcher = (*MatchPathRE)(nil) - _ caddy.Provisioner = (*MatchPathRE)(nil) - _ RequestMatcher = (*MatchMethod)(nil) - _ RequestMatcher = (*MatchQuery)(nil) - _ RequestMatcher = (*MatchHeader)(nil) - _ RequestMatcher = (*MatchHeaderRE)(nil) - _ caddy.Provisioner = (*MatchHeaderRE)(nil) - _ RequestMatcher = (*MatchProtocol)(nil) - _ RequestMatcher = (*MatchRemoteIP)(nil) - _ caddy.Provisioner = (*MatchRemoteIP)(nil) - _ RequestMatcher = (*MatchNot)(nil) - _ caddy.Provisioner = (*MatchNot)(nil) - _ caddy.Provisioner = (*MatchRegexp)(nil) - - _ caddyfile.Unmarshaler = (*MatchHost)(nil) - _ caddyfile.Unmarshaler = (*MatchPath)(nil) - _ caddyfile.Unmarshaler = (*MatchPathRE)(nil) - _ caddyfile.Unmarshaler = (*MatchMethod)(nil) - _ caddyfile.Unmarshaler = (*MatchQuery)(nil) - _ caddyfile.Unmarshaler = (*MatchHeader)(nil) - _ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil) - _ caddyfile.Unmarshaler = (*MatchProtocol)(nil) - _ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil) - _ caddyfile.Unmarshaler = (*VarsMatcher)(nil) - _ caddyfile.Unmarshaler = (*MatchVarsRE)(nil) - - _ json.Marshaler = (*MatchNot)(nil) - _ json.Unmarshaler = (*MatchNot)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/metrics.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/metrics.go deleted file mode 100644 index 3e5d6396..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/metrics.go +++ /dev/null @@ -1,186 +0,0 @@ -package caddyhttp - -import ( - "context" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -var httpMetrics = struct { - init sync.Once - requestInFlight *prometheus.GaugeVec - requestCount *prometheus.CounterVec - requestErrors *prometheus.CounterVec - requestDuration *prometheus.HistogramVec - requestSize *prometheus.HistogramVec - responseSize *prometheus.HistogramVec - responseDuration *prometheus.HistogramVec -}{ - init: sync.Once{}, -} - -func initHTTPMetrics() { - const ns, sub = "caddy", "http" - - basicLabels := []string{"server", "handler"} - httpMetrics.requestInFlight = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: ns, - Subsystem: sub, - Name: "requests_in_flight", - Help: "Number of requests currently handled by this server.", - }, basicLabels) - httpMetrics.requestErrors = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: sub, - Name: "request_errors_total", - Help: "Number of requests resulting in middleware errors.", - }, basicLabels) - httpMetrics.requestCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: ns, - Subsystem: sub, - Name: "requests_total", - Help: "Counter of HTTP(S) requests made.", - }, basicLabels) - - // TODO: allow these to be customized in the config - durationBuckets := prometheus.DefBuckets - sizeBuckets := prometheus.ExponentialBuckets(256, 4, 8) - - httpLabels := []string{"server", "handler", "code", "method"} - httpMetrics.requestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: sub, - Name: "request_duration_seconds", - Help: "Histogram of round-trip request durations.", - Buckets: durationBuckets, - }, httpLabels) - httpMetrics.requestSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: sub, - Name: "request_size_bytes", - Help: "Total size of the request. Includes body", - Buckets: sizeBuckets, - }, httpLabels) - httpMetrics.responseSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: sub, - Name: "response_size_bytes", - Help: "Size of the returned response.", - Buckets: sizeBuckets, - }, httpLabels) - httpMetrics.responseDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: ns, - Subsystem: sub, - Name: "response_duration_seconds", - Help: "Histogram of times to first byte in response bodies.", - Buckets: durationBuckets, - }, httpLabels) -} - -// serverNameFromContext extracts the current server name from the context. -// Returns "UNKNOWN" if none is available (should probably never happen). -func serverNameFromContext(ctx context.Context) string { - srv, ok := ctx.Value(ServerCtxKey).(*Server) - if !ok || srv == nil || srv.name == "" { - return "UNKNOWN" - } - return srv.name -} - -type metricsInstrumentedHandler struct { - handler string - mh MiddlewareHandler -} - -func newMetricsInstrumentedHandler(handler string, mh MiddlewareHandler) *metricsInstrumentedHandler { - httpMetrics.init.Do(func() { - initHTTPMetrics() - }) - - return &metricsInstrumentedHandler{handler, mh} -} - -func (h *metricsInstrumentedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error { - server := serverNameFromContext(r.Context()) - labels := prometheus.Labels{"server": server, "handler": h.handler} - method := strings.ToUpper(r.Method) - // the "code" value is set later, but initialized here to eliminate the possibility - // of a panic - statusLabels := prometheus.Labels{"server": server, "handler": h.handler, "method": method, "code": ""} - - inFlight := httpMetrics.requestInFlight.With(labels) - inFlight.Inc() - defer inFlight.Dec() - - start := time.Now() - - // This is a _bit_ of a hack - it depends on the ShouldBufferFunc always - // being called when the headers are written. - // Effectively the same behaviour as promhttp.InstrumentHandlerTimeToWriteHeader. - writeHeaderRecorder := ShouldBufferFunc(func(status int, header http.Header) bool { - statusLabels["code"] = sanitizeCode(status) - ttfb := time.Since(start).Seconds() - httpMetrics.responseDuration.With(statusLabels).Observe(ttfb) - return false - }) - wrec := NewResponseRecorder(w, nil, writeHeaderRecorder) - err := h.mh.ServeHTTP(wrec, r, next) - dur := time.Since(start).Seconds() - httpMetrics.requestCount.With(labels).Inc() - if err != nil { - httpMetrics.requestErrors.With(labels).Inc() - return err - } - - // If the code hasn't been set yet, and we didn't encounter an error, we're - // probably falling through with an empty handler. - if statusLabels["code"] == "" { - // we still sanitize it, even though it's likely to be 0. A 200 is - // returned on fallthrough so we want to reflect that. - statusLabels["code"] = sanitizeCode(wrec.Status()) - } - - httpMetrics.requestDuration.With(statusLabels).Observe(dur) - httpMetrics.requestSize.With(statusLabels).Observe(float64(computeApproximateRequestSize(r))) - httpMetrics.responseSize.With(statusLabels).Observe(float64(wrec.Size())) - - return nil -} - -func sanitizeCode(code int) string { - if code == 0 { - return "200" - } - return strconv.Itoa(code) -} - -// taken from https://github.com/prometheus/client_golang/blob/6007b2b5cae01203111de55f753e76d8dac1f529/prometheus/promhttp/instrument_server.go#L298 -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/replacer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/replacer.go deleted file mode 100644 index d0767f09..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/replacer.go +++ /dev/null @@ -1,410 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "encoding/asn1" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/textproto" - "net/url" - "path" - "strconv" - "strings" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddytls" -) - -// NewTestReplacer creates a replacer for an http.Request -// for use in tests that are not in this package -func NewTestReplacer(req *http.Request) *caddy.Replacer { - repl := caddy.NewReplacer() - ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl) - *req = *req.WithContext(ctx) - addHTTPVarsToReplacer(repl, req, nil) - return repl -} - -func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.ResponseWriter) { - SetVar(req.Context(), "start_time", time.Now()) - - httpVars := func(key string) (interface{}, bool) { - if req != nil { - // query string parameters - if strings.HasPrefix(key, reqURIQueryReplPrefix) { - vals := req.URL.Query()[key[len(reqURIQueryReplPrefix):]] - // always return true, since the query param might - // be present only in some requests - return strings.Join(vals, ","), true - } - - // request header fields - if strings.HasPrefix(key, reqHeaderReplPrefix) { - field := key[len(reqHeaderReplPrefix):] - vals := req.Header[textproto.CanonicalMIMEHeaderKey(field)] - // always return true, since the header field might - // be present only in some requests - return strings.Join(vals, ","), true - } - - // cookies - if strings.HasPrefix(key, reqCookieReplPrefix) { - name := key[len(reqCookieReplPrefix):] - for _, cookie := range req.Cookies() { - if strings.EqualFold(name, cookie.Name) { - // always return true, since the cookie might - // be present only in some requests - return cookie.Value, true - } - } - } - - // http.request.tls.* - if strings.HasPrefix(key, reqTLSReplPrefix) { - return getReqTLSReplacement(req, key) - } - - switch key { - case "http.request.method": - return req.Method, true - case "http.request.scheme": - if req.TLS != nil { - return "https", true - } - return "http", true - case "http.request.proto": - return req.Proto, true - case "http.request.host": - host, _, err := net.SplitHostPort(req.Host) - if err != nil { - return req.Host, true // OK; there probably was no port - } - return host, true - case "http.request.port": - _, port, _ := net.SplitHostPort(req.Host) - if portNum, err := strconv.Atoi(port); err == nil { - return portNum, true - } - return port, true - case "http.request.hostport": - return req.Host, true - case "http.request.remote": - return req.RemoteAddr, true - case "http.request.remote.host": - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - return req.RemoteAddr, true - } - return host, true - case "http.request.remote.port": - _, port, _ := net.SplitHostPort(req.RemoteAddr) - if portNum, err := strconv.Atoi(port); err == nil { - return portNum, true - } - return port, true - - // current URI, including any internal rewrites - case "http.request.uri": - return req.URL.RequestURI(), true - case "http.request.uri.path": - return req.URL.Path, true - case "http.request.uri.path.file": - _, file := path.Split(req.URL.Path) - return file, true - case "http.request.uri.path.dir": - dir, _ := path.Split(req.URL.Path) - return dir, true - case "http.request.uri.query": - return req.URL.RawQuery, true - case "http.request.duration": - start := GetVar(req.Context(), "start_time").(time.Time) - return time.Since(start), true - case "http.request.body": - if req.Body == nil { - return "", true - } - // normally net/http will close the body for us, but since we - // are replacing it with a fake one, we have to ensure we close - // the real body ourselves when we're done - defer req.Body.Close() - // read the request body into a buffer (can't pool because we - // don't know its lifetime and would have to make a copy anyway) - buf := new(bytes.Buffer) - _, err := io.Copy(buf, req.Body) - if err != nil { - return "", true - } - // replace real body with buffered data - req.Body = ioutil.NopCloser(buf) - return buf.String(), true - - // original request, before any internal changes - case "http.request.orig_method": - or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) - return or.Method, true - case "http.request.orig_uri": - or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) - return or.RequestURI, true - case "http.request.orig_uri.path": - or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) - return or.URL.Path, true - case "http.request.orig_uri.path.file": - or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) - _, file := path.Split(or.URL.Path) - return file, true - case "http.request.orig_uri.path.dir": - or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) - dir, _ := path.Split(or.URL.Path) - return dir, true - case "http.request.orig_uri.query": - or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request) - return or.URL.RawQuery, true - } - - // hostname labels - if strings.HasPrefix(key, reqHostLabelsReplPrefix) { - idxStr := key[len(reqHostLabelsReplPrefix):] - idx, err := strconv.Atoi(idxStr) - if err != nil || idx < 0 { - return "", false - } - reqHost, _, err := net.SplitHostPort(req.Host) - if err != nil { - reqHost = req.Host // OK; assume there was no port - } - hostLabels := strings.Split(reqHost, ".") - if idx >= len(hostLabels) { - return "", true - } - return hostLabels[len(hostLabels)-idx-1], true - } - - // path parts - if strings.HasPrefix(key, reqURIPathReplPrefix) { - idxStr := key[len(reqURIPathReplPrefix):] - idx, err := strconv.Atoi(idxStr) - if err != nil { - return "", false - } - pathParts := strings.Split(req.URL.Path, "/") - if len(pathParts) > 0 && pathParts[0] == "" { - pathParts = pathParts[1:] - } - if idx < 0 { - return "", false - } - if idx >= len(pathParts) { - return "", true - } - return pathParts[idx], true - } - - // middleware variables - if strings.HasPrefix(key, varsReplPrefix) { - varName := key[len(varsReplPrefix):] - tbl := req.Context().Value(VarsCtxKey).(map[string]interface{}) - raw := tbl[varName] - // variables can be dynamic, so always return true - // even when it may not be set; treat as empty then - return raw, true - } - } - - if w != nil { - // response header fields - if strings.HasPrefix(key, respHeaderReplPrefix) { - field := key[len(respHeaderReplPrefix):] - vals := w.Header()[textproto.CanonicalMIMEHeaderKey(field)] - // always return true, since the header field might - // be present only in some responses - return strings.Join(vals, ","), true - } - } - - return nil, false - } - - repl.Map(httpVars) -} - -func getReqTLSReplacement(req *http.Request, key string) (interface{}, bool) { - if req == nil || req.TLS == nil { - return nil, false - } - - if len(key) < len(reqTLSReplPrefix) { - return nil, false - } - - field := strings.ToLower(key[len(reqTLSReplPrefix):]) - - if strings.HasPrefix(field, "client.") { - cert := getTLSPeerCert(req.TLS) - if cert == nil { - return nil, false - } - - // subject alternate names (SANs) - if strings.HasPrefix(field, "client.san.") { - field = field[len("client.san."):] - var fieldName string - var fieldValue interface{} - switch { - case strings.HasPrefix(field, "dns_names"): - fieldName = "dns_names" - fieldValue = cert.DNSNames - case strings.HasPrefix(field, "emails"): - fieldName = "emails" - fieldValue = cert.EmailAddresses - case strings.HasPrefix(field, "ips"): - fieldName = "ips" - fieldValue = cert.IPAddresses - case strings.HasPrefix(field, "uris"): - fieldName = "uris" - fieldValue = cert.URIs - default: - return nil, false - } - field = field[len(fieldName):] - - // if no index was specified, return the whole list - if field == "" { - return fieldValue, true - } - if len(field) < 2 || field[0] != '.' { - return nil, false - } - field = field[1:] // trim '.' between field name and index - - // get the numeric index - idx, err := strconv.Atoi(field) - if err != nil || idx < 0 { - return nil, false - } - - // access the indexed element and return it - switch v := fieldValue.(type) { - case []string: - if idx >= len(v) { - return nil, true - } - return v[idx], true - case []net.IP: - if idx >= len(v) { - return nil, true - } - return v[idx], true - case []*url.URL: - if idx >= len(v) { - return nil, true - } - return v[idx], true - } - } - - switch field { - case "client.fingerprint": - return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)), true - case "client.public_key", "client.public_key_sha256": - if cert.PublicKey == nil { - return nil, true - } - pubKeyBytes, err := marshalPublicKey(cert.PublicKey) - if err != nil { - return nil, true - } - if strings.HasSuffix(field, "_sha256") { - return fmt.Sprintf("%x", sha256.Sum256(pubKeyBytes)), true - } - return fmt.Sprintf("%x", pubKeyBytes), true - case "client.issuer": - return cert.Issuer, true - case "client.serial": - return cert.SerialNumber, true - case "client.subject": - return cert.Subject, true - case "client.certificate_pem": - block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw} - return pem.EncodeToMemory(&block), true - default: - return nil, false - } - } - - switch field { - case "version": - return caddytls.ProtocolName(req.TLS.Version), true - case "cipher_suite": - return tls.CipherSuiteName(req.TLS.CipherSuite), true - case "resumed": - return req.TLS.DidResume, true - case "proto": - return req.TLS.NegotiatedProtocol, true - case "proto_mutual": - // req.TLS.NegotiatedProtocolIsMutual is deprecated - it's always true. - return true, true - case "server_name": - return req.TLS.ServerName, true - } - return nil, false -} - -// marshalPublicKey returns the byte encoding of pubKey. -func marshalPublicKey(pubKey interface{}) ([]byte, error) { - switch key := pubKey.(type) { - case *rsa.PublicKey: - return asn1.Marshal(key) - case *ecdsa.PublicKey: - return elliptic.Marshal(key.Curve, key.X, key.Y), nil - case ed25519.PublicKey: - return key, nil - } - return nil, fmt.Errorf("unrecognized public key type: %T", pubKey) -} - -// getTLSPeerCert retrieves the first peer certificate from a TLS session. -// Returns nil if no peer cert is in use. -func getTLSPeerCert(cs *tls.ConnectionState) *x509.Certificate { - if len(cs.PeerCertificates) == 0 { - return nil - } - return cs.PeerCertificates[0] -} - -const ( - reqCookieReplPrefix = "http.request.cookie." - reqHeaderReplPrefix = "http.request.header." - reqHostLabelsReplPrefix = "http.request.host.labels." - reqTLSReplPrefix = "http.request.tls." - reqURIPathReplPrefix = "http.request.uri.path." - reqURIQueryReplPrefix = "http.request.uri.query." - respHeaderReplPrefix = "http.response.header." - varsReplPrefix = "http.vars." -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsematchers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsematchers.go deleted file mode 100644 index d9ad8480..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsematchers.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "net/http" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" -) - -// ResponseMatcher is a type which can determine if an -// HTTP response matches some criteria. -type ResponseMatcher struct { - // If set, one of these status codes would be required. - // A one-digit status can be used to represent all codes - // in that class (e.g. 3 for all 3xx codes). - StatusCode []int `json:"status_code,omitempty"` - - // If set, each header specified must be one of the - // specified values, with the same logic used by the - // request header matcher. - Headers http.Header `json:"headers,omitempty"` -} - -// Match returns true if the given statusCode and hdr match rm. -func (rm ResponseMatcher) Match(statusCode int, hdr http.Header) bool { - if !rm.matchStatusCode(statusCode) { - return false - } - return matchHeaders(hdr, rm.Headers, "", nil) -} - -func (rm ResponseMatcher) matchStatusCode(statusCode int) bool { - if rm.StatusCode == nil { - return true - } - for _, code := range rm.StatusCode { - if StatusCodeMatches(statusCode, code) { - return true - } - } - return false -} - -// ParseNamedResponseMatcher parses the tokens of a named response matcher. -// -// @name { -// header [] -// status -// } -// -// Or, single line syntax: -// -// @name [header []] | [status ] -// -func ParseNamedResponseMatcher(d *caddyfile.Dispenser, matchers map[string]ResponseMatcher) error { - for d.Next() { - definitionName := d.Val() - - if _, ok := matchers[definitionName]; ok { - return d.Errf("matcher is defined more than once: %s", definitionName) - } - - matcher := ResponseMatcher{} - for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); { - switch d.Val() { - case "header": - if matcher.Headers == nil { - matcher.Headers = http.Header{} - } - - // reuse the header request matcher's unmarshaler - headerMatcher := MatchHeader(matcher.Headers) - err := headerMatcher.UnmarshalCaddyfile(d.NewFromNextSegment()) - if err != nil { - return err - } - - matcher.Headers = http.Header(headerMatcher) - case "status": - if matcher.StatusCode == nil { - matcher.StatusCode = []int{} - } - - args := d.RemainingArgs() - if len(args) == 0 { - return d.ArgErr() - } - - for _, arg := range args { - if len(arg) == 3 && strings.HasSuffix(arg, "xx") { - arg = arg[:1] - } - statusNum, err := strconv.Atoi(arg) - if err != nil { - return d.Errf("bad status value '%s': %v", arg, err) - } - matcher.StatusCode = append(matcher.StatusCode, statusNum) - } - default: - return d.Errf("unrecognized response matcher %s", d.Val()) - } - } - - matchers[definitionName] = matcher - } - return nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsewriter.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsewriter.go deleted file mode 100644 index 0ffb9320..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/responsewriter.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "bufio" - "bytes" - "fmt" - "io" - "net" - "net/http" -) - -// ResponseWriterWrapper wraps an underlying ResponseWriter and -// promotes its Pusher/Flusher/Hijacker methods as well. To use -// this type, embed a pointer to it within your own struct type -// that implements the http.ResponseWriter interface, then call -// methods on the embedded value. You can make sure your type -// wraps correctly by asserting that it implements the -// HTTPInterfaces interface. -type ResponseWriterWrapper struct { - http.ResponseWriter -} - -// Hijack implements http.Hijacker. It simply calls the underlying -// ResponseWriter's Hijack method if there is one, or returns -// ErrNotImplemented otherwise. -func (rww *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if hj, ok := rww.ResponseWriter.(http.Hijacker); ok { - return hj.Hijack() - } - return nil, nil, ErrNotImplemented -} - -// Flush implements http.Flusher. It simply calls the underlying -// ResponseWriter's Flush method if there is one. -func (rww *ResponseWriterWrapper) Flush() { - if f, ok := rww.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} - -// Push implements http.Pusher. It simply calls the underlying -// ResponseWriter's Push method if there is one, or returns -// ErrNotImplemented otherwise. -func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) error { - if pusher, ok := rww.ResponseWriter.(http.Pusher); ok { - return pusher.Push(target, opts) - } - return ErrNotImplemented -} - -// HTTPInterfaces mix all the interfaces that middleware ResponseWriters need to support. -type HTTPInterfaces interface { - http.ResponseWriter - http.Pusher - http.Flusher - http.Hijacker -} - -// ErrNotImplemented is returned when an underlying -// ResponseWriter does not implement the required method. -var ErrNotImplemented = fmt.Errorf("method not implemented") - -type responseRecorder struct { - *ResponseWriterWrapper - statusCode int - buf *bytes.Buffer - shouldBuffer ShouldBufferFunc - size int - wroteHeader bool - stream bool -} - -// NewResponseRecorder returns a new ResponseRecorder that can be -// used instead of a standard http.ResponseWriter. The recorder is -// useful for middlewares which need to buffer a response and -// potentially process its entire body before actually writing the -// response to the underlying writer. Of course, buffering the entire -// body has a memory overhead, but sometimes there is no way to avoid -// buffering the whole response, hence the existence of this type. -// Still, if at all practical, handlers should strive to stream -// responses by wrapping Write and WriteHeader methods instead of -// buffering whole response bodies. -// -// Buffering is actually optional. The shouldBuffer function will -// be called just before the headers are written. If it returns -// true, the headers and body will be buffered by this recorder -// and not written to the underlying writer; if false, the headers -// will be written immediately and the body will be streamed out -// directly to the underlying writer. If shouldBuffer is nil, -// the response will never be buffered and will always be streamed -// directly to the writer. -// -// You can know if shouldBuffer returned true by calling Buffered(). -// -// The provided buffer buf should be obtained from a pool for best -// performance (see the sync.Pool type). -// -// Proper usage of a recorder looks like this: -// -// rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuffer) -// err := next.ServeHTTP(rec, req) -// if err != nil { -// return err -// } -// if !rec.Buffered() { -// return nil -// } -// // process the buffered response here -// -// The header map is not buffered; i.e. the ResponseRecorder's Header() -// method returns the same header map of the underlying ResponseWriter. -// This is a crucial design decision to allow HTTP trailers to be -// flushed properly (https://github.com/caddyserver/caddy/issues/3236). -// -// Once you are ready to write the response, there are two ways you can -// do it. The easier way is to have the recorder do it: -// -// rec.WriteResponse() -// -// This writes the recorded response headers as well as the buffered body. -// Or, you may wish to do it yourself, especially if you manipulated the -// buffered body. First you will need to write the headers with the -// recorded status code, then write the body (this example writes the -// recorder's body buffer, but you might have your own body to write -// instead): -// -// w.WriteHeader(rec.Status()) -// io.Copy(w, rec.Buffer()) -// -func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer ShouldBufferFunc) ResponseRecorder { - return &responseRecorder{ - ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w}, - buf: buf, - shouldBuffer: shouldBuffer, - } -} - -func (rr *responseRecorder) WriteHeader(statusCode int) { - if rr.wroteHeader { - return - } - rr.statusCode = statusCode - rr.wroteHeader = true - - // decide whether we should buffer the response - if rr.shouldBuffer == nil { - rr.stream = true - } else { - rr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header()) - } - - // if not buffered, immediately write header - if rr.stream { - rr.ResponseWriterWrapper.WriteHeader(rr.statusCode) - } -} - -func (rr *responseRecorder) Write(data []byte) (int, error) { - rr.WriteHeader(http.StatusOK) - var n int - var err error - if rr.stream { - n, err = rr.ResponseWriterWrapper.Write(data) - } else { - n, err = rr.buf.Write(data) - } - if err == nil { - rr.size += n - } - return n, err -} - -// Status returns the status code that was written, if any. -func (rr *responseRecorder) Status() int { - return rr.statusCode -} - -// Size returns the number of bytes written, -// not including the response headers. -func (rr *responseRecorder) Size() int { - return rr.size -} - -// Buffer returns the body buffer that rr was created with. -// You should still have your original pointer, though. -func (rr *responseRecorder) Buffer() *bytes.Buffer { - return rr.buf -} - -// Buffered returns whether rr has decided to buffer the response. -func (rr *responseRecorder) Buffered() bool { - return !rr.stream -} - -func (rr *responseRecorder) WriteResponse() error { - if rr.stream { - return nil - } - if rr.statusCode == 0 { - // could happen if no handlers actually wrote anything, - // and this prevents a panic; status must be > 0 - rr.statusCode = http.StatusOK - } - rr.ResponseWriterWrapper.WriteHeader(rr.statusCode) - _, err := io.Copy(rr.ResponseWriterWrapper, rr.buf) - return err -} - -// ResponseRecorder is a http.ResponseWriter that records -// responses instead of writing them to the client. See -// docs for NewResponseRecorder for proper usage. -type ResponseRecorder interface { - HTTPInterfaces - Status() int - Buffer() *bytes.Buffer - Buffered() bool - Size() int - WriteResponse() error -} - -// ShouldBufferFunc is a function that returns true if the -// response should be buffered, given the pending HTTP status -// code and response headers. -type ShouldBufferFunc func(status int, header http.Header) bool - -// Interface guards -var ( - _ HTTPInterfaces = (*ResponseWriterWrapper)(nil) - _ ResponseRecorder = (*responseRecorder)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/admin.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/admin.go deleted file mode 100644 index 25685a3a..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/admin.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - caddy.RegisterModule(adminUpstreams{}) -} - -// adminUpstreams is a module that provides the -// /reverse_proxy/upstreams endpoint for the Caddy admin -// API. This allows for checking the health of configured -// reverse proxy upstreams in the pool. -type adminUpstreams struct{} - -// upstreamResults holds the status of a particular upstream -type upstreamStatus struct { - Address string `json:"address"` - Healthy bool `json:"healthy"` - NumRequests int `json:"num_requests"` - Fails int `json:"fails"` -} - -// CaddyModule returns the Caddy module information. -func (adminUpstreams) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "admin.api.reverse_proxy", - New: func() caddy.Module { return new(adminUpstreams) }, - } -} - -// Routes returns a route for the /reverse_proxy/upstreams endpoint. -func (al adminUpstreams) Routes() []caddy.AdminRoute { - return []caddy.AdminRoute{ - { - Pattern: "/reverse_proxy/upstreams", - Handler: caddy.AdminHandlerFunc(al.handleUpstreams), - }, - } -} - -// handleUpstreams reports the status of the reverse proxy -// upstream pool. -func (adminUpstreams) handleUpstreams(w http.ResponseWriter, r *http.Request) error { - if r.Method != http.MethodGet { - return caddy.APIError{ - HTTPStatus: http.StatusMethodNotAllowed, - Err: fmt.Errorf("method not allowed"), - } - } - - // Prep for a JSON response - w.Header().Set("Content-Type", "application/json") - enc := json.NewEncoder(w) - - // Collect the results to respond with - results := []upstreamStatus{} - - // Iterate over the upstream pool (needs to be fast) - var rangeErr error - hosts.Range(func(key, val interface{}) bool { - address, ok := key.(string) - if !ok { - rangeErr = caddy.APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: fmt.Errorf("could not type assert upstream address"), - } - return false - } - - upstream, ok := val.(*upstreamHost) - if !ok { - rangeErr = caddy.APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: fmt.Errorf("could not type assert upstream struct"), - } - return false - } - - results = append(results, upstreamStatus{ - Address: address, - Healthy: !upstream.Unhealthy(), - NumRequests: upstream.NumRequests(), - Fails: upstream.Fails(), - }) - return true - }) - - // If an error happened during the range, return it - if rangeErr != nil { - return rangeErr - } - - err := enc.Encode(results) - if err != nil { - return caddy.APIError{ - HTTPStatus: http.StatusInternalServerError, - Err: err, - } - } - - return nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/caddyfile.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/caddyfile.go deleted file mode 100644 index c7f555f8..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/caddyfile.go +++ /dev/null @@ -1,1033 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "log" - "net" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" - "github.com/dustin/go-humanize" -) - -func init() { - httpcaddyfile.RegisterHandlerDirective("reverse_proxy", parseCaddyfile) -} - -func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) { - rp := new(Handler) - err := rp.UnmarshalCaddyfile(h.Dispenser) - if err != nil { - return nil, err - } - err = rp.FinalizeUnmarshalCaddyfile(h) - if err != nil { - return nil, err - } - return rp, nil -} - -// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: -// -// reverse_proxy [] [] { -// # upstreams -// to -// -// # load balancing -// lb_policy [] -// lb_try_duration -// lb_try_interval -// -// # active health checking -// health_uri -// health_port -// health_interval -// health_timeout -// health_status -// health_body -// health_headers { -// [] -// } -// -// # passive health checking -// max_fails -// fail_duration -// max_conns -// unhealthy_status -// unhealthy_latency -// -// # streaming -// flush_interval -// buffer_requests -// -// # header manipulation -// header_up [+|-] [ []] -// header_down [+|-] [ []] -// -// # round trip -// transport { -// ... -// } -// -// # handle responses -// @name { -// status -// header [] -// } -// handle_response [] [status_code] { -// -// } -// } -// -// Proxy upstream addresses should be network dial addresses such -// as `host:port`, or a URL such as `scheme://host:port`. Scheme -// and port may be inferred from other parts of the address/URL; if -// either are missing, defaults to HTTP. -// -// The FinalizeUnmarshalCaddyfile method should be called after this -// to finalize parsing of "handle_response" blocks, if possible. -func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - // currently, all backends must use the same scheme/protocol (the - // underlying JSON does not yet support per-backend transports) - var commonScheme string - - // we'll wait until the very end of parsing before - // validating and encoding the transport - var transport http.RoundTripper - var transportModuleName string - - // collect the response matchers defined as subdirectives - // prefixed with "@" for use with "handle_response" blocks - h.responseMatchers = make(map[string]caddyhttp.ResponseMatcher) - - // TODO: the logic in this function is kind of sensitive, we need - // to write tests before making any more changes to it - upstreamDialAddress := func(upstreamAddr string) (string, error) { - var network, scheme, host, port string - - if strings.Contains(upstreamAddr, "://") { - // we get a parsing error if a placeholder is specified - // so we return a more user-friendly error message instead - // to explain what to do instead - if strings.Contains(upstreamAddr, "{") { - return "", d.Err("due to parsing difficulties, placeholders are not allowed when an upstream address contains a scheme") - } - - toURL, err := url.Parse(upstreamAddr) - if err != nil { - return "", d.Errf("parsing upstream URL: %v", err) - } - - // there is currently no way to perform a URL rewrite between choosing - // a backend and proxying to it, so we cannot allow extra components - // in backend URLs - if toURL.Path != "" || toURL.RawQuery != "" || toURL.Fragment != "" { - return "", d.Err("for now, URLs for proxy upstreams only support scheme, host, and port components") - } - - // ensure the port and scheme aren't in conflict - urlPort := toURL.Port() - if toURL.Scheme == "http" && urlPort == "443" { - return "", d.Err("upstream address has conflicting scheme (http://) and port (:443, the HTTPS port)") - } - if toURL.Scheme == "https" && urlPort == "80" { - return "", d.Err("upstream address has conflicting scheme (https://) and port (:80, the HTTP port)") - } - if toURL.Scheme == "h2c" && urlPort == "443" { - return "", d.Err("upstream address has conflicting scheme (h2c://) and port (:443, the HTTPS port)") - } - - // if port is missing, attempt to infer from scheme - if toURL.Port() == "" { - var toPort string - switch toURL.Scheme { - case "", "http", "h2c": - toPort = "80" - case "https": - toPort = "443" - } - toURL.Host = net.JoinHostPort(toURL.Hostname(), toPort) - } - - scheme, host, port = toURL.Scheme, toURL.Hostname(), toURL.Port() - } else { - // extract network manually, since caddy.ParseNetworkAddress() will always add one - if idx := strings.Index(upstreamAddr, "/"); idx >= 0 { - network = strings.ToLower(strings.TrimSpace(upstreamAddr[:idx])) - upstreamAddr = upstreamAddr[idx+1:] - } - var err error - host, port, err = net.SplitHostPort(upstreamAddr) - if err != nil { - host = upstreamAddr - } - // we can assume a port if only a hostname is specified, but use of a - // placeholder without a port likely means a port will be filled in - if port == "" && !strings.Contains(host, "{") { - port = "80" - } - } - - // the underlying JSON does not yet support different - // transports (protocols or schemes) to each backend, - // so we remember the last one we see and compare them - if commonScheme != "" && scheme != commonScheme { - return "", d.Errf("for now, all proxy upstreams must use the same scheme (transport protocol); expecting '%s://' but got '%s://'", - commonScheme, scheme) - } - commonScheme = scheme - - // for simplest possible config, we only need to include - // the network portion if the user specified one - if network != "" { - return caddy.JoinNetworkAddress(network, host, port), nil - } - - // if the host is a placeholder, then we don't want to join with an empty port, - // because that would just append an extra ':' at the end of the address. - if port == "" && strings.Contains(host, "{") { - return host, nil - } - - return net.JoinHostPort(host, port), nil - } - - // appendUpstream creates an upstream for address and adds - // it to the list. If the address starts with "srv+" it is - // treated as a SRV-based upstream, and any port will be - // dropped. - appendUpstream := func(address string) error { - isSRV := strings.HasPrefix(address, "srv+") - if isSRV { - address = strings.TrimPrefix(address, "srv+") - } - dialAddr, err := upstreamDialAddress(address) - if err != nil { - return err - } - if isSRV { - if host, _, err := net.SplitHostPort(dialAddr); err == nil { - dialAddr = host - } - h.Upstreams = append(h.Upstreams, &Upstream{LookupSRV: dialAddr}) - } else { - h.Upstreams = append(h.Upstreams, &Upstream{Dial: dialAddr}) - } - return nil - } - - for d.Next() { - for _, up := range d.RemainingArgs() { - err := appendUpstream(up) - if err != nil { - return err - } - } - - for d.NextBlock(0) { - // if the subdirective has an "@" prefix then we - // parse it as a response matcher for use with "handle_response" - if strings.HasPrefix(d.Val(), matcherPrefix) { - err := caddyhttp.ParseNamedResponseMatcher(d.NewFromNextSegment(), h.responseMatchers) - if err != nil { - return err - } - continue - } - - switch d.Val() { - case "to": - args := d.RemainingArgs() - if len(args) == 0 { - return d.ArgErr() - } - for _, up := range args { - err := appendUpstream(up) - if err != nil { - return err - } - } - - case "lb_policy": - if !d.NextArg() { - return d.ArgErr() - } - if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil { - return d.Err("load balancing selection policy already specified") - } - name := d.Val() - modID := "http.reverse_proxy.selection_policies." + name - unm, err := caddyfile.UnmarshalModule(d, modID) - if err != nil { - return err - } - sel, ok := unm.(Selector) - if !ok { - return d.Errf("module %s (%T) is not a reverseproxy.Selector", modID, unm) - } - if h.LoadBalancing == nil { - h.LoadBalancing = new(LoadBalancing) - } - h.LoadBalancing.SelectionPolicyRaw = caddyconfig.JSONModuleObject(sel, "policy", name, nil) - - case "lb_try_duration": - if !d.NextArg() { - return d.ArgErr() - } - if h.LoadBalancing == nil { - h.LoadBalancing = new(LoadBalancing) - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad duration value %s: %v", d.Val(), err) - } - h.LoadBalancing.TryDuration = caddy.Duration(dur) - - case "lb_try_interval": - if !d.NextArg() { - return d.ArgErr() - } - if h.LoadBalancing == nil { - h.LoadBalancing = new(LoadBalancing) - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad interval value '%s': %v", d.Val(), err) - } - h.LoadBalancing.TryInterval = caddy.Duration(dur) - - case "health_uri": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - h.HealthChecks.Active.URI = d.Val() - - case "health_path": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - h.HealthChecks.Active.Path = d.Val() - caddy.Log().Named("config.adapter.caddyfile").Warn("the 'health_path' subdirective is deprecated, please use 'health_uri' instead!") - - case "health_port": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - portNum, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("bad port number '%s': %v", d.Val(), err) - } - h.HealthChecks.Active.Port = portNum - - case "health_headers": - healthHeaders := make(http.Header) - for d.Next() { - for d.NextBlock(0) { - key := d.Val() - values := d.RemainingArgs() - if len(values) == 0 { - values = append(values, "") - } - healthHeaders[key] = values - } - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - h.HealthChecks.Active.Headers = healthHeaders - - case "health_interval": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad interval value %s: %v", d.Val(), err) - } - h.HealthChecks.Active.Interval = caddy.Duration(dur) - - case "health_timeout": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad timeout value %s: %v", d.Val(), err) - } - h.HealthChecks.Active.Timeout = caddy.Duration(dur) - - case "health_status": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - val := d.Val() - if len(val) == 3 && strings.HasSuffix(val, "xx") { - val = val[:1] - } - statusNum, err := strconv.Atoi(val) - if err != nil { - return d.Errf("bad status value '%s': %v", d.Val(), err) - } - h.HealthChecks.Active.ExpectStatus = statusNum - - case "health_body": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Active == nil { - h.HealthChecks.Active = new(ActiveHealthChecks) - } - h.HealthChecks.Active.ExpectBody = d.Val() - - case "max_fails": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Passive == nil { - h.HealthChecks.Passive = new(PassiveHealthChecks) - } - maxFails, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("invalid maximum fail count '%s': %v", d.Val(), err) - } - h.HealthChecks.Passive.MaxFails = maxFails - - case "fail_duration": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Passive == nil { - h.HealthChecks.Passive = new(PassiveHealthChecks) - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad duration value '%s': %v", d.Val(), err) - } - h.HealthChecks.Passive.FailDuration = caddy.Duration(dur) - - case "unhealthy_request_count": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Passive == nil { - h.HealthChecks.Passive = new(PassiveHealthChecks) - } - maxConns, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("invalid maximum connection count '%s': %v", d.Val(), err) - } - h.HealthChecks.Passive.UnhealthyRequestCount = maxConns - - case "unhealthy_status": - args := d.RemainingArgs() - if len(args) == 0 { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Passive == nil { - h.HealthChecks.Passive = new(PassiveHealthChecks) - } - for _, arg := range args { - if len(arg) == 3 && strings.HasSuffix(arg, "xx") { - arg = arg[:1] - } - statusNum, err := strconv.Atoi(arg) - if err != nil { - return d.Errf("bad status value '%s': %v", d.Val(), err) - } - h.HealthChecks.Passive.UnhealthyStatus = append(h.HealthChecks.Passive.UnhealthyStatus, statusNum) - } - - case "unhealthy_latency": - if !d.NextArg() { - return d.ArgErr() - } - if h.HealthChecks == nil { - h.HealthChecks = new(HealthChecks) - } - if h.HealthChecks.Passive == nil { - h.HealthChecks.Passive = new(PassiveHealthChecks) - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad duration value '%s': %v", d.Val(), err) - } - h.HealthChecks.Passive.UnhealthyLatency = caddy.Duration(dur) - - case "flush_interval": - if !d.NextArg() { - return d.ArgErr() - } - if fi, err := strconv.Atoi(d.Val()); err == nil { - h.FlushInterval = caddy.Duration(fi) - } else { - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad duration value '%s': %v", d.Val(), err) - } - h.FlushInterval = caddy.Duration(dur) - } - - case "buffer_requests": - if d.NextArg() { - return d.ArgErr() - } - h.BufferRequests = true - - case "buffer_responses": - if d.NextArg() { - return d.ArgErr() - } - h.BufferResponses = true - - case "max_buffer_size": - if !d.NextArg() { - return d.ArgErr() - } - size, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("invalid size (bytes): %s", d.Val()) - } - if d.NextArg() { - return d.ArgErr() - } - h.MaxBufferSize = int64(size) - - case "header_up": - var err error - - if h.Headers == nil { - h.Headers = new(headers.Handler) - } - if h.Headers.Request == nil { - h.Headers.Request = new(headers.HeaderOps) - } - args := d.RemainingArgs() - - switch len(args) { - case 1: - err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], "", "") - case 2: - // some lint checks, I guess - if strings.EqualFold(args[0], "host") && (args[1] == "{hostport}" || args[1] == "{http.request.hostport}") { - log.Printf("[WARNING] Unnecessary header_up ('Host' field): the reverse proxy's default behavior is to pass headers to the upstream") - } - if strings.EqualFold(args[0], "x-forwarded-proto") && (args[1] == "{scheme}" || args[1] == "{http.request.scheme}") { - log.Printf("[WARNING] Unnecessary header_up ('X-Forwarded-Proto' field): the reverse proxy's default behavior is to pass headers to the upstream") - } - err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], "") - case 3: - err = headers.CaddyfileHeaderOp(h.Headers.Request, args[0], args[1], args[2]) - default: - return d.ArgErr() - } - - if err != nil { - return d.Err(err.Error()) - } - - case "header_down": - var err error - - if h.Headers == nil { - h.Headers = new(headers.Handler) - } - if h.Headers.Response == nil { - h.Headers.Response = &headers.RespHeaderOps{ - HeaderOps: new(headers.HeaderOps), - } - } - args := d.RemainingArgs() - switch len(args) { - case 1: - err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], "", "") - case 2: - err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], "") - case 3: - err = headers.CaddyfileHeaderOp(h.Headers.Response.HeaderOps, args[0], args[1], args[2]) - default: - return d.ArgErr() - } - - if err != nil { - return d.Err(err.Error()) - } - - case "transport": - if !d.NextArg() { - return d.ArgErr() - } - if h.TransportRaw != nil { - return d.Err("transport already specified") - } - transportModuleName = d.Val() - modID := "http.reverse_proxy.transport." + transportModuleName - unm, err := caddyfile.UnmarshalModule(d, modID) - if err != nil { - return err - } - rt, ok := unm.(http.RoundTripper) - if !ok { - return d.Errf("module %s (%T) is not a RoundTripper", modID, unm) - } - transport = rt - - case "handle_response": - // delegate the parsing of handle_response to the caller, - // since we need the httpcaddyfile.Helper to parse subroutes. - // See h.FinalizeUnmarshalCaddyfile - h.handleResponseSegments = append(h.handleResponseSegments, d.NewFromNextSegment()) - - default: - return d.Errf("unrecognized subdirective %s", d.Val()) - } - } - } - - // if the scheme inferred from the backends' addresses is - // HTTPS, we will need a non-nil transport to enable TLS, - // or if H2C, to set the transport versions. - if (commonScheme == "https" || commonScheme == "h2c") && transport == nil { - transport = new(HTTPTransport) - transportModuleName = "http" - } - - // verify transport configuration, and finally encode it - if transport != nil { - if te, ok := transport.(TLSTransport); ok { - if commonScheme == "https" && !te.TLSEnabled() { - err := te.EnableTLS(new(TLSConfig)) - if err != nil { - return err - } - } - if commonScheme == "http" && te.TLSEnabled() { - return d.Errf("upstream address scheme is HTTP but transport is configured for HTTP+TLS (HTTPS)") - } - if te, ok := transport.(*HTTPTransport); ok && commonScheme == "h2c" { - te.Versions = []string{"h2c", "2"} - } - } else if commonScheme == "https" { - return d.Errf("upstreams are configured for HTTPS but transport module does not support TLS: %T", transport) - } - - // no need to encode empty default transport - if !reflect.DeepEqual(transport, new(HTTPTransport)) { - h.TransportRaw = caddyconfig.JSONModuleObject(transport, "protocol", transportModuleName, nil) - } - } - - return nil -} - -// FinalizeUnmarshalCaddyfile finalizes the Caddyfile parsing which -// requires having an httpcaddyfile.Helper to function, to parse subroutes. -func (h *Handler) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error { - for _, d := range h.handleResponseSegments { - // consume the "handle_response" token - d.Next() - - var matcher *caddyhttp.ResponseMatcher - args := d.RemainingArgs() - - // the first arg should be a matcher (optional) - // the second arg should be a status code (optional) - // any more than that isn't currently supported - if len(args) > 2 { - return d.Errf("too many arguments for 'handle_response': %s", args) - } - - // the first arg should always be a matcher. - // it doesn't really make sense to support status code without a matcher. - if len(args) > 0 { - if !strings.HasPrefix(args[0], matcherPrefix) { - return d.Errf("must use a named response matcher, starting with '@'") - } - - foundMatcher, ok := h.responseMatchers[args[0]] - if !ok { - return d.Errf("no named response matcher defined with name '%s'", args[0][1:]) - } - matcher = &foundMatcher - } - - // a second arg should be a status code, in which case - // we skip parsing the block for routes - if len(args) == 2 { - _, err := strconv.Atoi(args[1]) - if err != nil { - return d.Errf("bad integer value '%s': %v", args[1], err) - } - - // make sure there's no block, cause it doesn't make sense - if d.NextBlock(1) { - return d.Errf("cannot define routes for 'handle_response' when changing the status code") - } - - h.HandleResponse = append( - h.HandleResponse, - caddyhttp.ResponseHandler{ - Match: matcher, - StatusCode: caddyhttp.WeakString(args[1]), - }, - ) - continue - } - - // parse the block as routes - handler, err := httpcaddyfile.ParseSegmentAsSubroute(helper.WithDispenser(d.NewFromNextSegment())) - if err != nil { - return err - } - subroute, ok := handler.(*caddyhttp.Subroute) - if !ok { - return helper.Errf("segment was not parsed as a subroute") - } - h.HandleResponse = append( - h.HandleResponse, - caddyhttp.ResponseHandler{ - Match: matcher, - Routes: subroute.Routes, - }, - ) - } - - // move the handle_response entries without a matcher to the end. - // we can't use sort.SliceStable because it will reorder the rest of the - // entries which may be undesirable because we don't have a good - // heuristic to use for sorting. - withoutMatchers := []caddyhttp.ResponseHandler{} - withMatchers := []caddyhttp.ResponseHandler{} - for _, hr := range h.HandleResponse { - if hr.Match == nil { - withoutMatchers = append(withoutMatchers, hr) - } else { - withMatchers = append(withMatchers, hr) - } - } - h.HandleResponse = append(withMatchers, withoutMatchers...) - - // clean up the bits we only needed for adapting - h.handleResponseSegments = nil - h.responseMatchers = nil - - return nil -} - -// UnmarshalCaddyfile deserializes Caddyfile tokens into h. -// -// transport http { -// read_buffer -// write_buffer -// max_response_header -// dial_timeout -// dial_fallback_delay -// response_header_timeout -// expect_continue_timeout -// tls -// tls_client_auth | -// tls_insecure_skip_verify -// tls_timeout -// tls_trusted_ca_certs -// tls_server_name -// keepalive [off|] -// keepalive_idle_conns -// versions -// compression off -// max_conns_per_host -// max_idle_conns_per_host -// } -// -func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - for d.NextBlock(0) { - switch d.Val() { - case "read_buffer": - if !d.NextArg() { - return d.ArgErr() - } - size, err := humanize.ParseBytes(d.Val()) - if err != nil { - return d.Errf("invalid read buffer size '%s': %v", d.Val(), err) - } - h.ReadBufferSize = int(size) - - case "write_buffer": - if !d.NextArg() { - return d.ArgErr() - } - size, err := humanize.ParseBytes(d.Val()) - if err != nil { - return d.Errf("invalid write buffer size '%s': %v", d.Val(), err) - } - h.WriteBufferSize = int(size) - - case "max_response_header": - if !d.NextArg() { - return d.ArgErr() - } - size, err := humanize.ParseBytes(d.Val()) - if err != nil { - return d.Errf("invalid max response header size '%s': %v", d.Val(), err) - } - h.MaxResponseHeaderSize = int64(size) - - case "dial_timeout": - if !d.NextArg() { - return d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad timeout value '%s': %v", d.Val(), err) - } - h.DialTimeout = caddy.Duration(dur) - - case "dial_fallback_delay": - if !d.NextArg() { - return d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad fallback delay value '%s': %v", d.Val(), err) - } - h.FallbackDelay = caddy.Duration(dur) - - case "response_header_timeout": - if !d.NextArg() { - return d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad timeout value '%s': %v", d.Val(), err) - } - h.ResponseHeaderTimeout = caddy.Duration(dur) - - case "expect_continue_timeout": - if !d.NextArg() { - return d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad timeout value '%s': %v", d.Val(), err) - } - h.ExpectContinueTimeout = caddy.Duration(dur) - - case "tls_client_auth": - if h.TLS == nil { - h.TLS = new(TLSConfig) - } - args := d.RemainingArgs() - switch len(args) { - case 1: - h.TLS.ClientCertificateAutomate = args[0] - case 2: - h.TLS.ClientCertificateFile = args[0] - h.TLS.ClientCertificateKeyFile = args[1] - default: - return d.ArgErr() - } - - case "tls": - if h.TLS == nil { - h.TLS = new(TLSConfig) - } - - case "tls_insecure_skip_verify": - if d.NextArg() { - return d.ArgErr() - } - if h.TLS == nil { - h.TLS = new(TLSConfig) - } - h.TLS.InsecureSkipVerify = true - - case "tls_timeout": - if !d.NextArg() { - return d.ArgErr() - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad timeout value '%s': %v", d.Val(), err) - } - if h.TLS == nil { - h.TLS = new(TLSConfig) - } - h.TLS.HandshakeTimeout = caddy.Duration(dur) - - case "tls_trusted_ca_certs": - args := d.RemainingArgs() - if len(args) == 0 { - return d.ArgErr() - } - if h.TLS == nil { - h.TLS = new(TLSConfig) - } - h.TLS.RootCAPEMFiles = args - - case "tls_server_name": - if !d.NextArg() { - return d.ArgErr() - } - if h.TLS == nil { - h.TLS = new(TLSConfig) - } - h.TLS.ServerName = d.Val() - - case "keepalive": - if !d.NextArg() { - return d.ArgErr() - } - if h.KeepAlive == nil { - h.KeepAlive = new(KeepAlive) - } - if d.Val() == "off" { - var disable bool - h.KeepAlive.Enabled = &disable - break - } - dur, err := caddy.ParseDuration(d.Val()) - if err != nil { - return d.Errf("bad duration value '%s': %v", d.Val(), err) - } - h.KeepAlive.IdleConnTimeout = caddy.Duration(dur) - - case "keepalive_idle_conns": - if !d.NextArg() { - return d.ArgErr() - } - num, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("bad integer value '%s': %v", d.Val(), err) - } - if h.KeepAlive == nil { - h.KeepAlive = new(KeepAlive) - } - h.KeepAlive.MaxIdleConns = num - - case "keepalive_idle_conns_per_host": - if !d.NextArg() { - return d.ArgErr() - } - num, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("bad integer value '%s': %v", d.Val(), err) - } - if h.KeepAlive == nil { - h.KeepAlive = new(KeepAlive) - } - h.KeepAlive.MaxIdleConnsPerHost = num - - case "versions": - h.Versions = d.RemainingArgs() - if len(h.Versions) == 0 { - return d.ArgErr() - } - - case "compression": - if d.NextArg() { - if d.Val() == "off" { - var disable bool - h.Compression = &disable - } - } - - case "max_conns_per_host": - if !d.NextArg() { - return d.ArgErr() - } - num, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("bad integer value '%s': %v", d.Val(), err) - } - h.MaxConnsPerHost = num - - default: - return d.Errf("unrecognized subdirective %s", d.Val()) - } - } - } - return nil -} - -const matcherPrefix = "@" - -// Interface guards -var ( - _ caddyfile.Unmarshaler = (*Handler)(nil) - _ caddyfile.Unmarshaler = (*HTTPTransport)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/command.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/command.go deleted file mode 100644 index 4a6739e5..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/command.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "encoding/json" - "flag" - "fmt" - "net" - "net/http" - "strconv" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile" - caddycmd "github.com/caddyserver/caddy/v2/cmd" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" -) - -func init() { - caddycmd.RegisterCommand(caddycmd.Command{ - Name: "reverse-proxy", - Func: cmdReverseProxy, - Usage: "[--from ] [--to ] [--change-host-header]", - Short: "A quick and production-ready reverse proxy", - Long: ` -A simple but production-ready reverse proxy. Useful for quick deployments, -demos, and development. - -Simply shuttles HTTP(S) traffic from the --from address to the --to address. - -Unless otherwise specified in the addresses, the --from address will be -assumed to be HTTPS if a hostname is given, and the --to address will be -assumed to be HTTP. - -If the --from address has a host or IP, Caddy will attempt to serve the -proxy over HTTPS with a certificate (unless overridden by the HTTP scheme -or port). - -If --change-host-header is set, the Host header on the request will be modified -from its original incoming value to the address of the upstream. (Otherwise, by -default, all incoming headers are passed through unmodified.) -`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("reverse-proxy", flag.ExitOnError) - fs.String("from", "localhost", "Address on which to receive traffic") - fs.String("to", "", "Upstream address to which to to proxy traffic") - fs.Bool("change-host-header", false, "Set upstream Host header to address of upstream") - fs.Bool("insecure", false, "Disable TLS verification (WARNING: DISABLES SECURITY, WHY ARE YOU EVEN USING TLS?)") - return fs - }(), - }) -} - -func cmdReverseProxy(fs caddycmd.Flags) (int, error) { - caddy.TrapSignals() - - from := fs.String("from") - to := fs.String("to") - changeHost := fs.Bool("change-host-header") - insecure := fs.Bool("insecure") - - httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort) - httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort) - - if to == "" { - return caddy.ExitCodeFailedStartup, fmt.Errorf("--to is required") - } - - // set up the downstream address; assume missing information from given parts - fromAddr, err := httpcaddyfile.ParseAddress(from) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid downstream address %s: %v", from, err) - } - if fromAddr.Path != "" { - return caddy.ExitCodeFailedStartup, fmt.Errorf("paths are not allowed: %s", from) - } - if fromAddr.Scheme == "" { - if fromAddr.Port == httpPort || fromAddr.Host == "" { - fromAddr.Scheme = "http" - } else { - fromAddr.Scheme = "https" - } - } - if fromAddr.Port == "" { - if fromAddr.Scheme == "http" { - fromAddr.Port = httpPort - } else if fromAddr.Scheme == "https" { - fromAddr.Port = httpsPort - } - } - - // set up the upstream address; assume missing information from given parts - toAddr, err := httpcaddyfile.ParseAddress(to) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid upstream address %s: %v", to, err) - } - if toAddr.Path != "" { - return caddy.ExitCodeFailedStartup, fmt.Errorf("paths are not allowed: %s", to) - } - if toAddr.Scheme == "" { - if toAddr.Port == httpsPort { - toAddr.Scheme = "https" - } else { - toAddr.Scheme = "http" - } - } - if toAddr.Port == "" { - if toAddr.Scheme == "http" { - toAddr.Port = httpPort - } else if toAddr.Scheme == "https" { - toAddr.Port = httpsPort - } - } - - // proceed to build the handler and server - - ht := HTTPTransport{} - if toAddr.Scheme == "https" { - ht.TLS = new(TLSConfig) - if insecure { - ht.TLS.InsecureSkipVerify = true - } - } - - handler := Handler{ - TransportRaw: caddyconfig.JSONModuleObject(ht, "protocol", "http", nil), - Upstreams: UpstreamPool{{Dial: net.JoinHostPort(toAddr.Host, toAddr.Port)}}, - } - - if changeHost { - handler.Headers = &headers.Handler{ - Request: &headers.HeaderOps{ - Set: http.Header{ - "Host": []string{"{http.reverse_proxy.upstream.hostport}"}, - }, - }, - } - } - - route := caddyhttp.Route{ - HandlersRaw: []json.RawMessage{ - caddyconfig.JSONModuleObject(handler, "handler", "reverse_proxy", nil), - }, - } - if fromAddr.Host != "" { - route.MatcherSetsRaw = []caddy.ModuleMap{ - { - "host": caddyconfig.JSON(caddyhttp.MatchHost{fromAddr.Host}, nil), - }, - } - } - - server := &caddyhttp.Server{ - Routes: caddyhttp.RouteList{route}, - Listen: []string{":" + fromAddr.Port}, - } - - httpApp := caddyhttp.App{ - Servers: map[string]*caddyhttp.Server{"proxy": server}, - } - - cfg := &caddy.Config{ - Admin: &caddy.AdminConfig{Disabled: true}, - AppsRaw: caddy.ModuleMap{ - "http": caddyconfig.JSON(httpApp, nil), - }, - } - - err = caddy.Run(cfg) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - fmt.Printf("Caddy proxying %s -> %s\n", fromAddr.String(), toAddr.String()) - - select {} -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/healthchecks.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/healthchecks.go deleted file mode 100644 index 8d5bd77e..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/healthchecks.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "regexp" - "runtime/debug" - "strconv" - "strings" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "go.uber.org/zap" -) - -// HealthChecks configures active and passive health checks. -type HealthChecks struct { - // Active health checks run in the background on a timer. To - // minimally enable active health checks, set either path or - // port (or both). - Active *ActiveHealthChecks `json:"active,omitempty"` - - // Passive health checks monitor proxied requests for errors or timeouts. - // To minimally enable passive health checks, specify at least an empty - // config object. - Passive *PassiveHealthChecks `json:"passive,omitempty"` -} - -// ActiveHealthChecks holds configuration related to active -// health checks (that is, health checks which occur in a -// background goroutine independently). -type ActiveHealthChecks struct { - // The path to use for health checks. - // DEPRECATED: Use 'uri' instead. - Path string `json:"path,omitempty"` - - // The URI (path and query) to use for health checks - URI string `json:"uri,omitempty"` - - // The port to use (if different from the upstream's dial - // address) for health checks. - Port int `json:"port,omitempty"` - - // HTTP headers to set on health check requests. - Headers http.Header `json:"headers,omitempty"` - - // How frequently to perform active health checks (default 30s). - Interval caddy.Duration `json:"interval,omitempty"` - - // How long to wait for a response from a backend before - // considering it unhealthy (default 5s). - Timeout caddy.Duration `json:"timeout,omitempty"` - - // The maximum response body to download from the backend - // during a health check. - MaxSize int64 `json:"max_size,omitempty"` - - // The HTTP status code to expect from a healthy backend. - ExpectStatus int `json:"expect_status,omitempty"` - - // A regular expression against which to match the response - // body of a healthy backend. - ExpectBody string `json:"expect_body,omitempty"` - - uri *url.URL - httpClient *http.Client - bodyRegexp *regexp.Regexp - logger *zap.Logger -} - -// PassiveHealthChecks holds configuration related to passive -// health checks (that is, health checks which occur during -// the normal flow of request proxying). -type PassiveHealthChecks struct { - // How long to remember a failed request to a backend. A duration > 0 - // enables passive health checking. Default is 0. - FailDuration caddy.Duration `json:"fail_duration,omitempty"` - - // The number of failed requests within the FailDuration window to - // consider a backend as "down". Must be >= 1; default is 1. Requires - // that FailDuration be > 0. - MaxFails int `json:"max_fails,omitempty"` - - // Limits the number of simultaneous requests to a backend by - // marking the backend as "down" if it has this many concurrent - // requests or more. - UnhealthyRequestCount int `json:"unhealthy_request_count,omitempty"` - - // Count the request as failed if the response comes back with - // one of these status codes. - UnhealthyStatus []int `json:"unhealthy_status,omitempty"` - - // Count the request as failed if the response takes at least this - // long to receive. - UnhealthyLatency caddy.Duration `json:"unhealthy_latency,omitempty"` - - logger *zap.Logger -} - -// CircuitBreaker is a type that can act as an early-warning -// system for the health checker when backends are getting -// overloaded. This interface is still experimental and is -// subject to change. -type CircuitBreaker interface { - OK() bool - RecordMetric(statusCode int, latency time.Duration) -} - -// activeHealthChecker runs active health checks on a -// regular basis and blocks until -// h.HealthChecks.Active.stopChan is closed. -func (h *Handler) activeHealthChecker() { - defer func() { - if err := recover(); err != nil { - log.Printf("[PANIC] active health checks: %v\n%s", err, debug.Stack()) - } - }() - ticker := time.NewTicker(time.Duration(h.HealthChecks.Active.Interval)) - h.doActiveHealthCheckForAllHosts() - for { - select { - case <-ticker.C: - h.doActiveHealthCheckForAllHosts() - case <-h.ctx.Done(): - ticker.Stop() - return - } - } -} - -// doActiveHealthCheckForAllHosts immediately performs a -// health checks for all upstream hosts configured by h. -func (h *Handler) doActiveHealthCheckForAllHosts() { - for _, upstream := range h.Upstreams { - go func(upstream *Upstream) { - defer func() { - if err := recover(); err != nil { - log.Printf("[PANIC] active health check: %v\n%s", err, debug.Stack()) - } - }() - - networkAddr, err := caddy.NewReplacer().ReplaceOrErr(upstream.Dial, true, true) - if err != nil { - h.HealthChecks.Active.logger.Error("invalid use of placeholders in dial address for active health checks", - zap.String("address", networkAddr), - zap.Error(err), - ) - return - } - addr, err := caddy.ParseNetworkAddress(networkAddr) - if err != nil { - h.HealthChecks.Active.logger.Error("bad network address", - zap.String("address", networkAddr), - zap.Error(err), - ) - return - } - if hcp := uint(upstream.activeHealthCheckPort); hcp != 0 { - if addr.IsUnixNetwork() { - addr.Network = "tcp" // I guess we just assume TCP since we are using a port?? - } - addr.StartPort, addr.EndPort = hcp, hcp - } - if upstream.LookupSRV == "" && addr.PortRangeSize() != 1 { - h.HealthChecks.Active.logger.Error("multiple addresses (upstream must map to only one address)", - zap.String("address", networkAddr), - ) - return - } - hostAddr := addr.JoinHostPort(0) - dialAddr := hostAddr - if addr.IsUnixNetwork() { - // this will be used as the Host portion of a http.Request URL, and - // paths to socket files would produce an error when creating URL, - // so use a fake Host value instead; unix sockets are usually local - hostAddr = "localhost" - } - err = h.doActiveHealthCheck(DialInfo{Network: addr.Network, Address: dialAddr}, hostAddr, upstream.Host) - if err != nil { - h.HealthChecks.Active.logger.Error("active health check failed", - zap.String("address", hostAddr), - zap.Error(err), - ) - } - }(upstream) - } -} - -// doActiveHealthCheck performs a health check to host which -// can be reached at address hostAddr. The actual address for -// the request will be built according to active health checker -// config. The health status of the host will be updated -// according to whether it passes the health check. An error is -// returned only if the health check fails to occur or if marking -// the host's health status fails. -func (h *Handler) doActiveHealthCheck(dialInfo DialInfo, hostAddr string, host Host) error { - // create the URL for the request that acts as a health check - scheme := "http" - if ht, ok := h.Transport.(TLSTransport); ok && ht.TLSEnabled() { - // this is kind of a hacky way to know if we should use HTTPS, but whatever - scheme = "https" - } - u := &url.URL{ - Scheme: scheme, - Host: hostAddr, - } - - // if we have a provisioned uri, use that, otherwise use - // the deprecated Path option - if h.HealthChecks.Active.uri != nil { - u.Path = h.HealthChecks.Active.uri.Path - u.RawQuery = h.HealthChecks.Active.uri.RawQuery - } else { - u.Path = h.HealthChecks.Active.Path - } - - // adjust the port, if configured to be different - if h.HealthChecks.Active.Port != 0 { - portStr := strconv.Itoa(h.HealthChecks.Active.Port) - host, _, err := net.SplitHostPort(hostAddr) - if err != nil { - host = hostAddr - } - u.Host = net.JoinHostPort(host, portStr) - } - - // attach dialing information to this request - ctx := h.ctx.Context - ctx = context.WithValue(ctx, caddy.ReplacerCtxKey, caddy.NewReplacer()) - ctx = context.WithValue(ctx, caddyhttp.VarsCtxKey, map[string]interface{}{ - dialInfoVarKey: dialInfo, - }) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return fmt.Errorf("making request: %v", err) - } - for key, hdrs := range h.HealthChecks.Active.Headers { - if strings.ToLower(key) == "host" { - req.Host = h.HealthChecks.Active.Headers.Get(key) - } else { - req.Header[key] = hdrs - } - } - - // do the request, being careful to tame the response body - resp, err := h.HealthChecks.Active.httpClient.Do(req) - if err != nil { - h.HealthChecks.Active.logger.Info("HTTP request failed", - zap.String("host", hostAddr), - zap.Error(err), - ) - _, err2 := host.SetHealthy(false) - if err2 != nil { - return fmt.Errorf("marking unhealthy: %v", err2) - } - return nil - } - var body io.Reader = resp.Body - if h.HealthChecks.Active.MaxSize > 0 { - body = io.LimitReader(body, h.HealthChecks.Active.MaxSize) - } - defer func() { - // drain any remaining body so connection could be re-used - _, _ = io.Copy(ioutil.Discard, body) - resp.Body.Close() - }() - - // if status code is outside criteria, mark down - if h.HealthChecks.Active.ExpectStatus > 0 { - if !caddyhttp.StatusCodeMatches(resp.StatusCode, h.HealthChecks.Active.ExpectStatus) { - h.HealthChecks.Active.logger.Info("unexpected status code", - zap.Int("status_code", resp.StatusCode), - zap.String("host", hostAddr), - ) - _, err := host.SetHealthy(false) - if err != nil { - return fmt.Errorf("marking unhealthy: %v", err) - } - return nil - } - } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { - h.HealthChecks.Active.logger.Info("status code out of tolerances", - zap.Int("status_code", resp.StatusCode), - zap.String("host", hostAddr), - ) - _, err := host.SetHealthy(false) - if err != nil { - return fmt.Errorf("marking unhealthy: %v", err) - } - return nil - } - - // if body does not match regex, mark down - if h.HealthChecks.Active.bodyRegexp != nil { - bodyBytes, err := ioutil.ReadAll(body) - if err != nil { - h.HealthChecks.Active.logger.Info("failed to read response body", - zap.String("host", hostAddr), - zap.Error(err), - ) - _, err := host.SetHealthy(false) - if err != nil { - return fmt.Errorf("marking unhealthy: %v", err) - } - return nil - } - if !h.HealthChecks.Active.bodyRegexp.Match(bodyBytes) { - h.HealthChecks.Active.logger.Info("response body failed expectations", - zap.String("host", hostAddr), - ) - _, err := host.SetHealthy(false) - if err != nil { - return fmt.Errorf("marking unhealthy: %v", err) - } - return nil - } - } - - // passed health check parameters, so mark as healthy - swapped, err := host.SetHealthy(true) - if swapped { - h.HealthChecks.Active.logger.Info("host is up", - zap.String("host", hostAddr), - ) - } - if err != nil { - return fmt.Errorf("marking healthy: %v", err) - } - - return nil -} - -// countFailure is used with passive health checks. It -// remembers 1 failure for upstream for the configured -// duration. If passive health checks are disabled or -// failure expiry is 0, this is a no-op. -func (h *Handler) countFailure(upstream *Upstream) { - // only count failures if passive health checking is enabled - // and if failures are configured have a non-zero expiry - if h.HealthChecks == nil || h.HealthChecks.Passive == nil { - return - } - failDuration := time.Duration(h.HealthChecks.Passive.FailDuration) - if failDuration == 0 { - return - } - - // count failure immediately - err := upstream.Host.CountFail(1) - if err != nil { - h.HealthChecks.Passive.logger.Error("could not count failure", - zap.String("host", upstream.Dial), - zap.Error(err)) - return - } - - // forget it later - go func(host Host, failDuration time.Duration) { - defer func() { - if err := recover(); err != nil { - log.Printf("[PANIC] health check failure forgetter: %v\n%s", err, debug.Stack()) - } - }() - time.Sleep(failDuration) - err := host.CountFail(-1) - if err != nil { - h.HealthChecks.Passive.logger.Error("could not forget failure", - zap.String("host", upstream.Dial), - zap.Error(err)) - } - }(upstream.Host, failDuration) -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/hosts.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/hosts.go deleted file mode 100644 index b9817d23..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/hosts.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "context" - "fmt" - "net" - "net/http" - "strconv" - "sync/atomic" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" -) - -// Host represents a remote host which can be proxied to. -// Its methods must be safe for concurrent use. -type Host interface { - // NumRequests returns the number of requests - // currently in process with the host. - NumRequests() int - - // Fails returns the count of recent failures. - Fails() int - - // Unhealthy returns true if the backend is unhealthy. - Unhealthy() bool - - // CountRequest atomically counts the given number of - // requests as currently in process with the host. The - // count should not go below 0. - CountRequest(int) error - - // CountFail atomically counts the given number of - // failures with the host. The count should not go - // below 0. - CountFail(int) error - - // SetHealthy atomically marks the host as either - // healthy (true) or unhealthy (false). If the given - // status is the same, this should be a no-op and - // return false. It returns true if the status was - // changed; i.e. if it is now different from before. - SetHealthy(bool) (bool, error) -} - -// UpstreamPool is a collection of upstreams. -type UpstreamPool []*Upstream - -// Upstream bridges this proxy's configuration to the -// state of the backend host it is correlated with. -type Upstream struct { - Host `json:"-"` - - // The [network address](/docs/conventions#network-addresses) - // to dial to connect to the upstream. Must represent precisely - // one socket (i.e. no port ranges). A valid network address - // either has a host and port or is a unix socket address. - // - // Placeholders may be used to make the upstream dynamic, but be - // aware of the health check implications of this: a single - // upstream that represents numerous (perhaps arbitrary) backends - // can be considered down if one or enough of the arbitrary - // backends is down. Also be aware of open proxy vulnerabilities. - Dial string `json:"dial,omitempty"` - - // If DNS SRV records are used for service discovery with this - // upstream, specify the DNS name for which to look up SRV - // records here, instead of specifying a dial address. - LookupSRV string `json:"lookup_srv,omitempty"` - - // The maximum number of simultaneous requests to allow to - // this upstream. If set, overrides the global passive health - // check UnhealthyRequestCount value. - MaxRequests int `json:"max_requests,omitempty"` - - // TODO: This could be really useful, to bind requests - // with certain properties to specific backends - // HeaderAffinity string - // IPAffinity string - - activeHealthCheckPort int - healthCheckPolicy *PassiveHealthChecks - cb CircuitBreaker -} - -func (u Upstream) String() string { - if u.LookupSRV != "" { - return u.LookupSRV - } - return u.Dial -} - -// Available returns true if the remote host -// is available to receive requests. This is -// the method that should be used by selection -// policies, etc. to determine if a backend -// should be able to be sent a request. -func (u *Upstream) Available() bool { - return u.Healthy() && !u.Full() -} - -// Healthy returns true if the remote host -// is currently known to be healthy or "up". -// It consults the circuit breaker, if any. -func (u *Upstream) Healthy() bool { - healthy := !u.Host.Unhealthy() - if healthy && u.healthCheckPolicy != nil { - healthy = u.Host.Fails() < u.healthCheckPolicy.MaxFails - } - if healthy && u.cb != nil { - healthy = u.cb.OK() - } - return healthy -} - -// Full returns true if the remote host -// cannot receive more requests at this time. -func (u *Upstream) Full() bool { - return u.MaxRequests > 0 && u.Host.NumRequests() >= u.MaxRequests -} - -// fillDialInfo returns a filled DialInfo for upstream u, using the request -// context. If the upstream has a SRV lookup configured, that is done and a -// returned address is chosen; otherwise, the upstream's regular dial address -// field is used. Note that the returned value is not a pointer. -func (u *Upstream) fillDialInfo(r *http.Request) (DialInfo, error) { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - var addr caddy.NetworkAddress - - if u.LookupSRV != "" { - // perform DNS lookup for SRV records and choose one - srvName := repl.ReplaceAll(u.LookupSRV, "") - _, records, err := net.DefaultResolver.LookupSRV(r.Context(), "", "", srvName) - if err != nil { - return DialInfo{}, err - } - addr.Network = "tcp" - addr.Host = records[0].Target - addr.StartPort, addr.EndPort = uint(records[0].Port), uint(records[0].Port) - } else { - // use provided dial address - var err error - dial := repl.ReplaceAll(u.Dial, "") - addr, err = caddy.ParseNetworkAddress(dial) - if err != nil { - return DialInfo{}, fmt.Errorf("upstream %s: invalid dial address %s: %v", u.Dial, dial, err) - } - if numPorts := addr.PortRangeSize(); numPorts != 1 { - return DialInfo{}, fmt.Errorf("upstream %s: dial address must represent precisely one socket: %s represents %d", - u.Dial, dial, numPorts) - } - } - - return DialInfo{ - Upstream: u, - Network: addr.Network, - Address: addr.JoinHostPort(0), - Host: addr.Host, - Port: strconv.Itoa(int(addr.StartPort)), - }, nil -} - -// upstreamHost is the basic, in-memory representation -// of the state of a remote host. It implements the -// Host interface. -type upstreamHost struct { - numRequests int64 // must be 64-bit aligned on 32-bit systems (see https://golang.org/pkg/sync/atomic/#pkg-note-BUG) - fails int64 - unhealthy int32 -} - -// NumRequests returns the number of active requests to the upstream. -func (uh *upstreamHost) NumRequests() int { - return int(atomic.LoadInt64(&uh.numRequests)) -} - -// Fails returns the number of recent failures with the upstream. -func (uh *upstreamHost) Fails() int { - return int(atomic.LoadInt64(&uh.fails)) -} - -// Unhealthy returns whether the upstream is healthy. -func (uh *upstreamHost) Unhealthy() bool { - return atomic.LoadInt32(&uh.unhealthy) == 1 -} - -// CountRequest mutates the active request count by -// delta. It returns an error if the adjustment fails. -func (uh *upstreamHost) CountRequest(delta int) error { - result := atomic.AddInt64(&uh.numRequests, int64(delta)) - if result < 0 { - return fmt.Errorf("count below 0: %d", result) - } - return nil -} - -// CountFail mutates the recent failures count by -// delta. It returns an error if the adjustment fails. -func (uh *upstreamHost) CountFail(delta int) error { - result := atomic.AddInt64(&uh.fails, int64(delta)) - if result < 0 { - return fmt.Errorf("count below 0: %d", result) - } - return nil -} - -// SetHealthy sets the upstream has healthy or unhealthy -// and returns true if the new value is different. -func (uh *upstreamHost) SetHealthy(healthy bool) (bool, error) { - var unhealthy, compare int32 = 1, 0 - if healthy { - unhealthy, compare = 0, 1 - } - swapped := atomic.CompareAndSwapInt32(&uh.unhealthy, compare, unhealthy) - return swapped, nil -} - -// DialInfo contains information needed to dial a -// connection to an upstream host. This information -// may be different than that which is represented -// in a URL (for example, unix sockets don't have -// a host that can be represented in a URL, but -// they certainly have a network name and address). -type DialInfo struct { - // Upstream is the Upstream associated with - // this DialInfo. It may be nil. - Upstream *Upstream - - // The network to use. This should be one of - // the values that is accepted by net.Dial: - // https://golang.org/pkg/net/#Dial - Network string - - // The address to dial. Follows the same - // semantics and rules as net.Dial. - Address string - - // Host and Port are components of Address. - Host, Port string -} - -// String returns the Caddy network address form -// by joining the network and address with a -// forward slash. -func (di DialInfo) String() string { - return caddy.JoinNetworkAddress(di.Network, di.Host, di.Port) -} - -// GetDialInfo gets the upstream dialing info out of the context, -// and returns true if there was a valid value; false otherwise. -func GetDialInfo(ctx context.Context) (DialInfo, bool) { - dialInfo, ok := caddyhttp.GetVar(ctx, dialInfoVarKey).(DialInfo) - return dialInfo, ok -} - -// hosts is the global repository for hosts that are -// currently in use by active configuration(s). This -// allows the state of remote hosts to be preserved -// through config reloads. -var hosts = caddy.NewUsagePool() - -// dialInfoVarKey is the key used for the variable that holds -// the dial info for the upstream connection. -const dialInfoVarKey = "reverse_proxy.dial_info" diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/httptransport.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/httptransport.go deleted file mode 100644 index 19328513..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/httptransport.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "fmt" - "io/ioutil" - weakrand "math/rand" - "net" - "net/http" - "reflect" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "golang.org/x/net/http2" -) - -func init() { - caddy.RegisterModule(HTTPTransport{}) -} - -// HTTPTransport is essentially a configuration wrapper for http.Transport. -// It defines a JSON structure useful when configuring the HTTP transport -// for Caddy's reverse proxy. It builds its http.Transport at Provision. -type HTTPTransport struct { - // TODO: It's possible that other transports (like fastcgi) might be - // able to borrow/use at least some of these config fields; if so, - // maybe move them into a type called CommonTransport and embed it? - - // Configures the DNS resolver used to resolve the IP address of upstream hostnames. - Resolver *UpstreamResolver `json:"resolver,omitempty"` - - // Configures TLS to the upstream. Setting this to an empty struct - // is sufficient to enable TLS with reasonable defaults. - TLS *TLSConfig `json:"tls,omitempty"` - - // Configures HTTP Keep-Alive (enabled by default). Should only be - // necessary if rigorous testing has shown that tuning this helps - // improve performance. - KeepAlive *KeepAlive `json:"keep_alive,omitempty"` - - // Whether to enable compression to upstream. Default: true - Compression *bool `json:"compression,omitempty"` - - // Maximum number of connections per host. Default: 0 (no limit) - MaxConnsPerHost int `json:"max_conns_per_host,omitempty"` - - // How long to wait before timing out trying to connect to - // an upstream. - DialTimeout caddy.Duration `json:"dial_timeout,omitempty"` - - // How long to wait before spawning an RFC 6555 Fast Fallback - // connection. A negative value disables this. - FallbackDelay caddy.Duration `json:"dial_fallback_delay,omitempty"` - - // How long to wait for reading response headers from server. - ResponseHeaderTimeout caddy.Duration `json:"response_header_timeout,omitempty"` - - // The length of time to wait for a server's first response - // headers after fully writing the request headers if the - // request has a header "Expect: 100-continue". - ExpectContinueTimeout caddy.Duration `json:"expect_continue_timeout,omitempty"` - - // The maximum bytes to read from response headers. - MaxResponseHeaderSize int64 `json:"max_response_header_size,omitempty"` - - // The size of the write buffer in bytes. - WriteBufferSize int `json:"write_buffer_size,omitempty"` - - // The size of the read buffer in bytes. - ReadBufferSize int `json:"read_buffer_size,omitempty"` - - // The versions of HTTP to support. As a special case, "h2c" - // can be specified to use H2C (HTTP/2 over Cleartext) to the - // upstream (this feature is experimental and subject to - // change or removal). Default: ["1.1", "2"] - Versions []string `json:"versions,omitempty"` - - // The pre-configured underlying HTTP transport. - Transport *http.Transport `json:"-"` - - h2cTransport *http2.Transport -} - -// CaddyModule returns the Caddy module information. -func (HTTPTransport) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.transport.http", - New: func() caddy.Module { return new(HTTPTransport) }, - } -} - -// Provision sets up h.Transport with a *http.Transport -// that is ready to use. -func (h *HTTPTransport) Provision(ctx caddy.Context) error { - if len(h.Versions) == 0 { - h.Versions = []string{"1.1", "2"} - } - - rt, err := h.NewTransport(ctx) - if err != nil { - return err - } - h.Transport = rt - - // if h2c is enabled, configure its transport (std lib http.Transport - // does not "HTTP/2 over cleartext TCP") - if sliceContains(h.Versions, "h2c") { - // crafting our own http2.Transport doesn't allow us to utilize - // most of the customizations/preferences on the http.Transport, - // because, for some reason, only http2.ConfigureTransport() - // is allowed to set the unexported field that refers to a base - // http.Transport config; oh well - h2t := &http2.Transport{ - // kind of a hack, but for plaintext/H2C requests, pretend to dial TLS - DialTLS: func(network, addr string, _ *tls.Config) (net.Conn, error) { - // TODO: no context, thus potentially wrong dial info - return net.Dial(network, addr) - }, - AllowHTTP: true, - } - if h.Compression != nil { - h2t.DisableCompression = !*h.Compression - } - h.h2cTransport = h2t - } - - return nil -} - -// NewTransport builds a standard-lib-compatible http.Transport value from h. -func (h *HTTPTransport) NewTransport(ctx caddy.Context) (*http.Transport, error) { - dialer := &net.Dialer{ - Timeout: time.Duration(h.DialTimeout), - FallbackDelay: time.Duration(h.FallbackDelay), - } - - if h.Resolver != nil { - for _, v := range h.Resolver.Addresses { - addr, err := caddy.ParseNetworkAddress(v) - if err != nil { - return nil, err - } - if addr.PortRangeSize() != 1 { - return nil, fmt.Errorf("resolver address must have exactly one address; cannot call %v", addr) - } - h.Resolver.netAddrs = append(h.Resolver.netAddrs, addr) - } - d := &net.Dialer{ - Timeout: time.Duration(h.DialTimeout), - FallbackDelay: time.Duration(h.FallbackDelay), - } - dialer.Resolver = &net.Resolver{ - PreferGo: true, - Dial: func(ctx context.Context, _, _ string) (net.Conn, error) { - //nolint:gosec - addr := h.Resolver.netAddrs[weakrand.Intn(len(h.Resolver.netAddrs))] - return d.DialContext(ctx, addr.Network, addr.JoinHostPort(0)) - }, - } - } - - rt := &http.Transport{ - DialContext: func(ctx context.Context, network, address string) (net.Conn, error) { - // the proper dialing information should be embedded into the request's context - if dialInfo, ok := GetDialInfo(ctx); ok { - network = dialInfo.Network - address = dialInfo.Address - } - conn, err := dialer.DialContext(ctx, network, address) - if err != nil { - // identify this error as one that occurred during - // dialing, which can be important when trying to - // decide whether to retry a request - return nil, DialError{err} - } - return conn, nil - }, - MaxConnsPerHost: h.MaxConnsPerHost, - ResponseHeaderTimeout: time.Duration(h.ResponseHeaderTimeout), - ExpectContinueTimeout: time.Duration(h.ExpectContinueTimeout), - MaxResponseHeaderBytes: h.MaxResponseHeaderSize, - WriteBufferSize: h.WriteBufferSize, - ReadBufferSize: h.ReadBufferSize, - } - - if h.TLS != nil { - rt.TLSHandshakeTimeout = time.Duration(h.TLS.HandshakeTimeout) - var err error - rt.TLSClientConfig, err = h.TLS.MakeTLSClientConfig(ctx) - if err != nil { - return nil, fmt.Errorf("making TLS client config: %v", err) - } - } - - if h.KeepAlive != nil { - dialer.KeepAlive = time.Duration(h.KeepAlive.ProbeInterval) - if h.KeepAlive.Enabled != nil { - rt.DisableKeepAlives = !*h.KeepAlive.Enabled - } - rt.MaxIdleConns = h.KeepAlive.MaxIdleConns - rt.MaxIdleConnsPerHost = h.KeepAlive.MaxIdleConnsPerHost - rt.IdleConnTimeout = time.Duration(h.KeepAlive.IdleConnTimeout) - } - - if h.Compression != nil { - rt.DisableCompression = !*h.Compression - } - - if sliceContains(h.Versions, "2") { - if err := http2.ConfigureTransport(rt); err != nil { - return nil, err - } - } - - return rt, nil -} - -// RoundTrip implements http.RoundTripper. -func (h *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { - h.SetScheme(req) - - // if H2C ("HTTP/2 over cleartext") is enabled and the upstream request is - // HTTP/2 without TLS, use the alternate H2C-capable transport instead - if req.ProtoMajor == 2 && req.URL.Scheme == "http" && h.h2cTransport != nil { - return h.h2cTransport.RoundTrip(req) - } - - return h.Transport.RoundTrip(req) -} - -// SetScheme ensures that the outbound request req -// has the scheme set in its URL; the underlying -// http.Transport requires a scheme to be set. -func (h *HTTPTransport) SetScheme(req *http.Request) { - if req.URL.Scheme == "" { - req.URL.Scheme = "http" - if h.TLS != nil { - req.URL.Scheme = "https" - } - } -} - -// TLSEnabled returns true if TLS is enabled. -func (h HTTPTransport) TLSEnabled() bool { - return h.TLS != nil -} - -// EnableTLS enables TLS on the transport. -func (h *HTTPTransport) EnableTLS(base *TLSConfig) error { - h.TLS = base - return nil -} - -// Cleanup implements caddy.CleanerUpper and closes any idle connections. -func (h HTTPTransport) Cleanup() error { - if h.Transport == nil { - return nil - } - h.Transport.CloseIdleConnections() - return nil -} - -// TLSConfig holds configuration related to the TLS configuration for the -// transport/client. -type TLSConfig struct { - // Optional list of base64-encoded DER-encoded CA certificates to trust. - RootCAPool []string `json:"root_ca_pool,omitempty"` - - // List of PEM-encoded CA certificate files to add to the same trust - // store as RootCAPool (or root_ca_pool in the JSON). - RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"` - - // PEM-encoded client certificate filename to present to servers. - ClientCertificateFile string `json:"client_certificate_file,omitempty"` - - // PEM-encoded key to use with the client certificate. - ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"` - - // If specified, Caddy will use and automate a client certificate - // with this subject name. - ClientCertificateAutomate string `json:"client_certificate_automate,omitempty"` - - // If true, TLS verification of server certificates will be disabled. - // This is insecure and may be removed in the future. Do not use this - // option except in testing or local development environments. - InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"` - - // The duration to allow a TLS handshake to a server. - HandshakeTimeout caddy.Duration `json:"handshake_timeout,omitempty"` - - // The server name (SNI) to use in TLS handshakes. - ServerName string `json:"server_name,omitempty"` -} - -// MakeTLSClientConfig returns a tls.Config usable by a client to a backend. -// If there is no custom TLS configuration, a nil config may be returned. -func (t TLSConfig) MakeTLSClientConfig(ctx caddy.Context) (*tls.Config, error) { - cfg := new(tls.Config) - - // client auth - if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile == "" { - return nil, fmt.Errorf("client_certificate_file specified without client_certificate_key_file") - } - if t.ClientCertificateFile == "" && t.ClientCertificateKeyFile != "" { - return nil, fmt.Errorf("client_certificate_key_file specified without client_certificate_file") - } - if t.ClientCertificateFile != "" && t.ClientCertificateKeyFile != "" { - cert, err := tls.LoadX509KeyPair(t.ClientCertificateFile, t.ClientCertificateKeyFile) - if err != nil { - return nil, fmt.Errorf("loading client certificate key pair: %v", err) - } - cfg.Certificates = []tls.Certificate{cert} - } - if t.ClientCertificateAutomate != "" { - // TODO: use or enable ctx.IdentityCredentials() ... - tlsAppIface, err := ctx.App("tls") - if err != nil { - return nil, fmt.Errorf("getting tls app: %v", err) - } - tlsApp := tlsAppIface.(*caddytls.TLS) - err = tlsApp.Manage([]string{t.ClientCertificateAutomate}) - if err != nil { - return nil, fmt.Errorf("managing client certificate: %v", err) - } - cfg.GetClientCertificate = func(cri *tls.CertificateRequestInfo) (*tls.Certificate, error) { - certs := tlsApp.AllMatchingCertificates(t.ClientCertificateAutomate) - var err error - for _, cert := range certs { - err = cri.SupportsCertificate(&cert.Certificate) - if err == nil { - return &cert.Certificate, nil - } - } - return nil, err - } - } - - // trusted root CAs - if len(t.RootCAPool) > 0 || len(t.RootCAPEMFiles) > 0 { - rootPool := x509.NewCertPool() - for _, encodedCACert := range t.RootCAPool { - caCert, err := decodeBase64DERCert(encodedCACert) - if err != nil { - return nil, fmt.Errorf("parsing CA certificate: %v", err) - } - rootPool.AddCert(caCert) - } - for _, pemFile := range t.RootCAPEMFiles { - pemData, err := ioutil.ReadFile(pemFile) - if err != nil { - return nil, fmt.Errorf("failed reading ca cert: %v", err) - } - rootPool.AppendCertsFromPEM(pemData) - - } - cfg.RootCAs = rootPool - } - - // custom SNI - cfg.ServerName = t.ServerName - - // throw all security out the window - cfg.InsecureSkipVerify = t.InsecureSkipVerify - - // only return a config if it's not empty - if reflect.DeepEqual(cfg, new(tls.Config)) { - return nil, nil - } - - return cfg, nil -} - -// UpstreamResolver holds the set of addresses of DNS resolvers of -// upstream addresses -type UpstreamResolver struct { - // The addresses of DNS resolvers to use when looking up the addresses of proxy upstreams. - // It accepts [network addresses](/docs/conventions#network-addresses) - // with port range of only 1. If the host is an IP address, it will be dialed directly to resolve the upstream server. - // If the host is not an IP address, the addresses are resolved using the [name resolution convention](https://golang.org/pkg/net/#hdr-Name_Resolution) of the Go standard library. - // If the array contains more than 1 resolver address, one is chosen at random. - Addresses []string `json:"addresses,omitempty"` - netAddrs []caddy.NetworkAddress -} - -// KeepAlive holds configuration pertaining to HTTP Keep-Alive. -type KeepAlive struct { - // Whether HTTP Keep-Alive is enabled. Default: true - Enabled *bool `json:"enabled,omitempty"` - - // How often to probe for liveness. - ProbeInterval caddy.Duration `json:"probe_interval,omitempty"` - - // Maximum number of idle connections. Default: 0, which means no limit. - MaxIdleConns int `json:"max_idle_conns,omitempty"` - - // Maximum number of idle connections per host. Default: 32. - MaxIdleConnsPerHost int `json:"max_idle_conns_per_host,omitempty"` - - // How long connections should be kept alive when idle. Default: 0, which means no timeout. - IdleConnTimeout caddy.Duration `json:"idle_timeout,omitempty"` -} - -// decodeBase64DERCert base64-decodes, then DER-decodes, certStr. -func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { - // decode base64 - derBytes, err := base64.StdEncoding.DecodeString(certStr) - if err != nil { - return nil, err - } - - // parse the DER-encoded certificate - return x509.ParseCertificate(derBytes) -} - -// sliceContains returns true if needle is in haystack. -func sliceContains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - return false -} - -// Interface guards -var ( - _ caddy.Provisioner = (*HTTPTransport)(nil) - _ http.RoundTripper = (*HTTPTransport)(nil) - _ caddy.CleanerUpper = (*HTTPTransport)(nil) - _ TLSTransport = (*HTTPTransport)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/reverseproxy.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/reverseproxy.go deleted file mode 100644 index 671ea044..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/reverseproxy.go +++ /dev/null @@ -1,964 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/textproto" - "net/url" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddyhttp" - "github.com/caddyserver/caddy/v2/modules/caddyhttp/headers" - "go.uber.org/zap" - "golang.org/x/net/http/httpguts" -) - -func init() { - caddy.RegisterModule(Handler{}) -} - -// Handler implements a highly configurable and production-ready reverse proxy. -// -// Upon proxying, this module sets the following placeholders (which can be used -// both within and after this handler; for example, in response headers): -// -// Placeholder | Description -// ------------|------------- -// `{http.reverse_proxy.upstream.address}` | The full address to the upstream as given in the config -// `{http.reverse_proxy.upstream.hostport}` | The host:port of the upstream -// `{http.reverse_proxy.upstream.host}` | The host of the upstream -// `{http.reverse_proxy.upstream.port}` | The port of the upstream -// `{http.reverse_proxy.upstream.requests}` | The approximate current number of requests to the upstream -// `{http.reverse_proxy.upstream.max_requests}` | The maximum approximate number of requests allowed to the upstream -// `{http.reverse_proxy.upstream.fails}` | The number of recent failed requests to the upstream -// `{http.reverse_proxy.upstream.latency}` | How long it took the proxy upstream to write the response header. -// `{http.reverse_proxy.upstream.duration}` | Time spent proxying to the upstream, including writing response body to client. -// `{http.reverse_proxy.duration}` | Total time spent proxying, including selecting an upstream, retries, and writing response. -type Handler struct { - // Configures the method of transport for the proxy. A transport - // is what performs the actual "round trip" to the backend. - // The default transport is plaintext HTTP. - TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"` - - // A circuit breaker may be used to relieve pressure on a backend - // that is beginning to exhibit symptoms of stress or latency. - // By default, there is no circuit breaker. - CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"` - - // Load balancing distributes load/requests between backends. - LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"` - - // Health checks update the status of backends, whether they are - // up or down. Down backends will not be proxied to. - HealthChecks *HealthChecks `json:"health_checks,omitempty"` - - // Upstreams is the list of backends to proxy to. - Upstreams UpstreamPool `json:"upstreams,omitempty"` - - // Adjusts how often to flush the response buffer. By default, - // no periodic flushing is done. A negative value disables - // response buffering, and flushes immediately after each - // write to the client. This option is ignored when the upstream's - // response is recognized as a streaming response, or if its - // content length is -1; for such responses, writes are flushed - // to the client immediately. - FlushInterval caddy.Duration `json:"flush_interval,omitempty"` - - // Headers manipulates headers between Caddy and the backend. - // By default, all headers are passed-thru without changes, - // with the exceptions of special hop-by-hop headers. - // - // X-Forwarded-For and X-Forwarded-Proto are also set - // implicitly, but this may change in the future if the official - // standardized Forwarded header field gains more adoption. - Headers *headers.Handler `json:"headers,omitempty"` - - // If true, the entire request body will be read and buffered - // in memory before being proxied to the backend. This should - // be avoided if at all possible for performance reasons, but - // could be useful if the backend is intolerant of read latency. - BufferRequests bool `json:"buffer_requests,omitempty"` - - // If true, the entire response body will be read and buffered - // in memory before being proxied to the client. This should - // be avoided if at all possible for performance reasons, but - // could be useful if the backend has tighter memory constraints. - BufferResponses bool `json:"buffer_responses,omitempty"` - - // If body buffering is enabled, the maximum size of the buffers - // used for the requests and responses (in bytes). - MaxBufferSize int64 `json:"max_buffer_size,omitempty"` - - // List of handlers and their associated matchers to evaluate - // after successful roundtrips. The first handler that matches - // the response from a backend will be invoked. The response - // body from the backend will not be written to the client; - // it is up to the handler to finish handling the response. - // If passive health checks are enabled, any errors from the - // handler chain will not affect the health status of the - // backend. - // - // Three new placeholders are available in this handler chain: - // - `{http.reverse_proxy.status_code}` The status code from the response - // - `{http.reverse_proxy.status_text}` The status text from the response - // - `{http.reverse_proxy.header.*}` The headers from the response - HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"` - - Transport http.RoundTripper `json:"-"` - CB CircuitBreaker `json:"-"` - - // Holds the named response matchers from the Caddyfile while adapting - responseMatchers map[string]caddyhttp.ResponseMatcher - - // Holds the handle_response Caddyfile tokens while adapting - handleResponseSegments []*caddyfile.Dispenser - - ctx caddy.Context - logger *zap.Logger -} - -// CaddyModule returns the Caddy module information. -func (Handler) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.handlers.reverse_proxy", - New: func() caddy.Module { return new(Handler) }, - } -} - -// Provision ensures that h is set up properly before use. -func (h *Handler) Provision(ctx caddy.Context) error { - h.ctx = ctx - h.logger = ctx.Logger(h) - - // verify SRV compatibility - for i, v := range h.Upstreams { - if v.LookupSRV == "" { - continue - } - if h.HealthChecks != nil && h.HealthChecks.Active != nil { - return fmt.Errorf(`upstream: lookup_srv is incompatible with active health checks: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV) - } - if v.Dial != "" { - return fmt.Errorf(`upstream: specifying dial address is incompatible with lookup_srv: %d: {"dial": %q, "lookup_srv": %q}`, i, v.Dial, v.LookupSRV) - } - } - - // start by loading modules - if h.TransportRaw != nil { - mod, err := ctx.LoadModule(h, "TransportRaw") - if err != nil { - return fmt.Errorf("loading transport: %v", err) - } - h.Transport = mod.(http.RoundTripper) - } - if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil { - mod, err := ctx.LoadModule(h.LoadBalancing, "SelectionPolicyRaw") - if err != nil { - return fmt.Errorf("loading load balancing selection policy: %s", err) - } - h.LoadBalancing.SelectionPolicy = mod.(Selector) - } - if h.CBRaw != nil { - mod, err := ctx.LoadModule(h, "CBRaw") - if err != nil { - return fmt.Errorf("loading circuit breaker: %s", err) - } - h.CB = mod.(CircuitBreaker) - } - - // ensure any embedded headers handler module gets provisioned - // (see https://caddy.community/t/set-cookie-manipulation-in-reverse-proxy/7666?u=matt - // for what happens if we forget to provision it) - if h.Headers != nil { - err := h.Headers.Provision(ctx) - if err != nil { - return fmt.Errorf("provisioning embedded headers handler: %v", err) - } - } - - // set up transport - if h.Transport == nil { - t := &HTTPTransport{ - KeepAlive: &KeepAlive{ - ProbeInterval: caddy.Duration(30 * time.Second), - IdleConnTimeout: caddy.Duration(2 * time.Minute), - MaxIdleConnsPerHost: 32, // seems about optimal, see #2805 - }, - DialTimeout: caddy.Duration(10 * time.Second), - } - err := t.Provision(ctx) - if err != nil { - return fmt.Errorf("provisioning default transport: %v", err) - } - h.Transport = t - } - - // set up load balancing - if h.LoadBalancing == nil { - h.LoadBalancing = new(LoadBalancing) - } - if h.LoadBalancing.SelectionPolicy == nil { - h.LoadBalancing.SelectionPolicy = RandomSelection{} - } - if h.LoadBalancing.TryDuration > 0 && h.LoadBalancing.TryInterval == 0 { - // a non-zero try_duration with a zero try_interval - // will always spin the CPU for try_duration if the - // upstream is local or low-latency; avoid that by - // defaulting to a sane wait period between attempts - h.LoadBalancing.TryInterval = caddy.Duration(250 * time.Millisecond) - } - lbMatcherSets, err := ctx.LoadModule(h.LoadBalancing, "RetryMatchRaw") - if err != nil { - return err - } - err = h.LoadBalancing.RetryMatch.FromInterface(lbMatcherSets) - if err != nil { - return err - } - - // set up upstreams - for _, upstream := range h.Upstreams { - // create or get the host representation for this upstream - var host Host = new(upstreamHost) - existingHost, loaded := hosts.LoadOrStore(upstream.String(), host) - if loaded { - host = existingHost.(Host) - } - upstream.Host = host - - // give it the circuit breaker, if any - upstream.cb = h.CB - - // if the passive health checker has a non-zero UnhealthyRequestCount - // but the upstream has no MaxRequests set (they are the same thing, - // but the passive health checker is a default value for for upstreams - // without MaxRequests), copy the value into this upstream, since the - // value in the upstream (MaxRequests) is what is used during - // availability checks - if h.HealthChecks != nil && h.HealthChecks.Passive != nil { - h.HealthChecks.Passive.logger = h.logger.Named("health_checker.passive") - if h.HealthChecks.Passive.UnhealthyRequestCount > 0 && - upstream.MaxRequests == 0 { - upstream.MaxRequests = h.HealthChecks.Passive.UnhealthyRequestCount - } - } - - // upstreams need independent access to the passive - // health check policy because passive health checks - // run without access to h. - if h.HealthChecks != nil { - upstream.healthCheckPolicy = h.HealthChecks.Passive - } - } - - if h.HealthChecks != nil { - // set defaults on passive health checks, if necessary - if h.HealthChecks.Passive != nil { - if h.HealthChecks.Passive.FailDuration > 0 && h.HealthChecks.Passive.MaxFails == 0 { - h.HealthChecks.Passive.MaxFails = 1 - } - } - - // if active health checks are enabled, configure them and start a worker - if h.HealthChecks.Active != nil && (h.HealthChecks.Active.Path != "" || - h.HealthChecks.Active.URI != "" || - h.HealthChecks.Active.Port != 0) { - - h.HealthChecks.Active.logger = h.logger.Named("health_checker.active") - - timeout := time.Duration(h.HealthChecks.Active.Timeout) - if timeout == 0 { - timeout = 5 * time.Second - } - - if h.HealthChecks.Active.Path != "" { - h.HealthChecks.Active.logger.Warn("the 'path' option is deprecated, please use 'uri' instead!") - } - - // parse the URI string (supports path and query) - if h.HealthChecks.Active.URI != "" { - parsedURI, err := url.Parse(h.HealthChecks.Active.URI) - if err != nil { - return err - } - h.HealthChecks.Active.uri = parsedURI - } - - h.HealthChecks.Active.httpClient = &http.Client{ - Timeout: timeout, - Transport: h.Transport, - } - - for _, upstream := range h.Upstreams { - // if there's an alternative port for health-check provided in the config, - // then use it, otherwise use the port of upstream. - if h.HealthChecks.Active.Port != 0 { - upstream.activeHealthCheckPort = h.HealthChecks.Active.Port - } - } - - if h.HealthChecks.Active.Interval == 0 { - h.HealthChecks.Active.Interval = caddy.Duration(30 * time.Second) - } - - if h.HealthChecks.Active.ExpectBody != "" { - var err error - h.HealthChecks.Active.bodyRegexp, err = regexp.Compile(h.HealthChecks.Active.ExpectBody) - if err != nil { - return fmt.Errorf("expect_body: compiling regular expression: %v", err) - } - } - - go h.activeHealthChecker() - } - } - - // set up any response routes - for i, rh := range h.HandleResponse { - err := rh.Provision(ctx) - if err != nil { - return fmt.Errorf("provisioning response handler %d: %v", i, err) - } - } - - return nil -} - -// Cleanup cleans up the resources made by h during provisioning. -func (h *Handler) Cleanup() error { - // TODO: Close keepalive connections on reload? https://github.com/caddyserver/caddy/pull/2507/files#diff-70219fd88fe3f36834f474ce6537ed26R762 - - // remove hosts from our config from the pool - for _, upstream := range h.Upstreams { - _, _ = hosts.Delete(upstream.String()) - } - - return nil -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - - // if enabled, buffer client request; - // this should only be enabled if the - // upstream requires it and does not - // work with "slow clients" (gunicorn, - // etc.) - this obviously has a perf - // overhead and makes the proxy at - // risk of exhausting memory and more - // susceptible to slowloris attacks, - // so it is strongly recommended to - // only use this feature if absolutely - // required, if read timeouts are set, - // and if body size is limited - if h.BufferRequests { - r.Body = h.bufferedBody(r.Body) - } - - // prepare the request for proxying; this is needed only once - err := h.prepareRequest(r) - if err != nil { - return caddyhttp.Error(http.StatusInternalServerError, - fmt.Errorf("preparing request for upstream round-trip: %v", err)) - } - - // we will need the original headers and Host value if - // header operations are configured; and we should - // restore them after we're done if they are changed - // (for example, changing the outbound Host header - // should not permanently change r.Host; issue #3509) - reqHost := r.Host - reqHeader := r.Header - defer func() { - r.Host = reqHost // TODO: data race, see #4038 - r.Header = reqHeader // TODO: data race, see #4038 - }() - - start := time.Now() - defer func() { - // total proxying duration, including time spent on LB and retries - repl.Set("http.reverse_proxy.duration", time.Since(start)) - }() - - var proxyErr error - for { - // choose an available upstream - upstream := h.LoadBalancing.SelectionPolicy.Select(h.Upstreams, r, w) - if upstream == nil { - if proxyErr == nil { - proxyErr = fmt.Errorf("no upstreams available") - } - if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) { - break - } - continue - } - - // the dial address may vary per-request if placeholders are - // used, so perform those replacements here; the resulting - // DialInfo struct should have valid network address syntax - dialInfo, err := upstream.fillDialInfo(r) - if err != nil { - return statusError(fmt.Errorf("making dial info: %v", err)) - } - - // attach to the request information about how to dial the upstream; - // this is necessary because the information cannot be sufficiently - // or satisfactorily represented in a URL - caddyhttp.SetVar(r.Context(), dialInfoVarKey, dialInfo) - - // set placeholders with information about this upstream - repl.Set("http.reverse_proxy.upstream.address", dialInfo.String()) - repl.Set("http.reverse_proxy.upstream.hostport", dialInfo.Address) - repl.Set("http.reverse_proxy.upstream.host", dialInfo.Host) - repl.Set("http.reverse_proxy.upstream.port", dialInfo.Port) - repl.Set("http.reverse_proxy.upstream.requests", upstream.Host.NumRequests()) - repl.Set("http.reverse_proxy.upstream.max_requests", upstream.MaxRequests) - repl.Set("http.reverse_proxy.upstream.fails", upstream.Host.Fails()) - - // mutate request headers according to this upstream; - // because we're in a retry loop, we have to copy - // headers (and the r.Host value) from the original - // so that each retry is identical to the first - if h.Headers != nil && h.Headers.Request != nil { - r.Header = make(http.Header) - copyHeader(r.Header, reqHeader) - r.Host = reqHost - h.Headers.Request.ApplyToRequest(r) - } - - // proxy the request to that upstream - proxyErr = h.reverseProxy(w, r, repl, dialInfo, next) - if proxyErr == nil || proxyErr == context.Canceled { - // context.Canceled happens when the downstream client - // cancels the request, which is not our failure - return nil - } - - // if the roundtrip was successful, don't retry the request or - // ding the health status of the upstream (an error can still - // occur after the roundtrip if, for example, a response handler - // after the roundtrip returns an error) - if succ, ok := proxyErr.(roundtripSucceeded); ok { - return succ.error - } - - // remember this failure (if enabled) - h.countFailure(upstream) - - // if we've tried long enough, break - if !h.LoadBalancing.tryAgain(h.ctx, start, proxyErr, r) { - break - } - } - - return statusError(proxyErr) -} - -// prepareRequest modifies req so that it is ready to be proxied, -// except for directing to a specific upstream. This method mutates -// headers and other necessary properties of the request and should -// be done just once (before proxying) regardless of proxy retries. -// This assumes that no mutations of the request are performed -// by h during or after proxying. -func (h Handler) prepareRequest(req *http.Request) error { - // most of this is borrowed from the Go std lib reverse proxy - - if req.ContentLength == 0 { - req.Body = nil // Issue golang/go#16036: nil Body for http.Transport retries - } - - req.Close = false - - // if User-Agent is not set by client, then explicitly - // disable it so it's not set to default value by std lib - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", "") - } - - reqUpType := upgradeType(req.Header) - removeConnectionHeaders(req.Header) - - // Remove hop-by-hop headers to the backend. Especially - // important is "Connection" because we want a persistent - // connection, regardless of what the client sent to us. - // Issue golang/go#46313: don't skip if field is empty. - for _, h := range hopHeaders { - // Issue golang/go#21096: tell backend applications that care about trailer support - // that we support trailers. (We do, but we don't go out of our way to - // advertise that unless the incoming client request thought it was worth - // mentioning.) - if h == "Te" && httpguts.HeaderValuesContainsToken(req.Header["Te"], "trailers") { - req.Header.Set("Te", "trailers") - continue - } - req.Header.Del(h) - } - - // After stripping all the hop-by-hop connection headers above, add back any - // necessary for protocol upgrades, such as for websockets. - if reqUpType != "" { - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", reqUpType) - } - - if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { - // If we aren't the first proxy retain prior - // X-Forwarded-For information as a comma+space - // separated list and fold multiple headers into one. - prior, ok := req.Header["X-Forwarded-For"] - omit := ok && prior == nil // Issue 38079: nil now means don't populate the header - if len(prior) > 0 { - clientIP = strings.Join(prior, ", ") + ", " + clientIP - } - if !omit { - req.Header.Set("X-Forwarded-For", clientIP) - } - } - - prior, ok := req.Header["X-Forwarded-Proto"] - omit := ok && prior == nil - if len(prior) == 0 && !omit { - // set X-Forwarded-Proto; many backend apps expect this too - proto := "https" - if req.TLS == nil { - proto = "http" - } - req.Header.Set("X-Forwarded-Proto", proto) - } - - return nil -} - -// reverseProxy performs a round-trip to the given backend and processes the response with the client. -// (This method is mostly the beginning of what was borrowed from the net/http/httputil package in the -// Go standard library which was used as the foundation.) -func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, repl *caddy.Replacer, di DialInfo, next caddyhttp.Handler) error { - _ = di.Upstream.Host.CountRequest(1) - //nolint:errcheck - defer di.Upstream.Host.CountRequest(-1) - - // point the request to this upstream - h.directRequest(req, di) - - // do the round-trip; emit debug log with values we know are - // safe, or if there is no error, emit fuller log entry - start := time.Now() - res, err := h.Transport.RoundTrip(req) - duration := time.Since(start) - logger := h.logger.With( - zap.String("upstream", di.Upstream.String()), - zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: req}), - ) - if err != nil { - logger.Debug("upstream roundtrip", - zap.Duration("duration", duration), - zap.Error(err)) - return err - } - logger.Debug("upstream roundtrip", - zap.Object("headers", caddyhttp.LoggableHTTPHeader(res.Header)), - zap.Int("status", res.StatusCode)) - - // duration until upstream wrote response headers (roundtrip duration) - repl.Set("http.reverse_proxy.upstream.latency", duration) - - // update circuit breaker on current conditions - if di.Upstream.cb != nil { - di.Upstream.cb.RecordMetric(res.StatusCode, duration) - } - - // perform passive health checks (if enabled) - if h.HealthChecks != nil && h.HealthChecks.Passive != nil { - // strike if the status code matches one that is "bad" - for _, badStatus := range h.HealthChecks.Passive.UnhealthyStatus { - if caddyhttp.StatusCodeMatches(res.StatusCode, badStatus) { - h.countFailure(di.Upstream) - } - } - - // strike if the roundtrip took too long - if h.HealthChecks.Passive.UnhealthyLatency > 0 && - duration >= time.Duration(h.HealthChecks.Passive.UnhealthyLatency) { - h.countFailure(di.Upstream) - } - } - - // if enabled, buffer the response body - if h.BufferResponses { - res.Body = h.bufferedBody(res.Body) - } - - // see if any response handler is configured for this response from the backend - for i, rh := range h.HandleResponse { - if rh.Match != nil && !rh.Match.Match(res.StatusCode, res.Header) { - continue - } - - // if configured to only change the status code, do that then continue regular proxy response - if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" { - statusCode, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, "")) - if err != nil { - return caddyhttp.Error(http.StatusInternalServerError, err) - } - if statusCode != 0 { - res.StatusCode = statusCode - } - break - } - - // otherwise, if there are any routes configured, execute those as the - // actual response instead of what we got from the proxy backend - if len(rh.Routes) == 0 { - continue - } - - res.Body.Close() - - // set up the replacer so that parts of the original response can be - // used for routing decisions - for field, value := range res.Header { - repl.Set("http.reverse_proxy.header."+field, strings.Join(value, ",")) - } - repl.Set("http.reverse_proxy.status_code", res.StatusCode) - repl.Set("http.reverse_proxy.status_text", res.Status) - - h.logger.Debug("handling response", zap.Int("handler", i)) - if routeErr := rh.Routes.Compile(next).ServeHTTP(rw, req); routeErr != nil { - // wrap error in roundtripSucceeded so caller knows that - // the roundtrip was successful and to not retry - return roundtripSucceeded{routeErr} - } - } - - // deal with 101 Switching Protocols responses: (WebSocket, h2c, etc) - if res.StatusCode == http.StatusSwitchingProtocols { - h.handleUpgradeResponse(logger, rw, req, res) - return nil - } - - removeConnectionHeaders(res.Header) - - for _, h := range hopHeaders { - res.Header.Del(h) - } - - // apply any response header operations - if h.Headers != nil && h.Headers.Response != nil { - if h.Headers.Response.Require == nil || - h.Headers.Response.Require.Match(res.StatusCode, res.Header) { - h.Headers.Response.ApplyTo(res.Header, repl) - } - } - - copyHeader(rw.Header(), res.Header) - - // The "Trailer" header isn't included in the Transport's response, - // at least for *http.Transport. Build it up from Trailer. - announcedTrailers := len(res.Trailer) - if announcedTrailers > 0 { - trailerKeys := make([]string, 0, len(res.Trailer)) - for k := range res.Trailer { - trailerKeys = append(trailerKeys, k) - } - rw.Header().Add("Trailer", strings.Join(trailerKeys, ", ")) - } - - rw.WriteHeader(res.StatusCode) - err = h.copyResponse(rw, res.Body, h.flushInterval(req, res)) - res.Body.Close() // close now, instead of defer, to populate res.Trailer - if err != nil { - // we're streaming the response and we've already written headers, so - // there's nothing an error handler can do to recover at this point; - // the standard lib's proxy panics at this point, but we'll just log - // the error and abort the stream here - h.logger.Error("aborting with incomplete response", zap.Error(err)) - return nil - } - - if len(res.Trailer) > 0 { - // Force chunking if we saw a response trailer. - // This prevents net/http from calculating the length for short - // bodies and adding a Content-Length. - if fl, ok := rw.(http.Flusher); ok { - fl.Flush() - } - } - - // total duration spent proxying, including writing response body - repl.Set("http.reverse_proxy.upstream.duration", duration) - - if len(res.Trailer) == announcedTrailers { - copyHeader(rw.Header(), res.Trailer) - return nil - } - - for k, vv := range res.Trailer { - k = http.TrailerPrefix + k - for _, v := range vv { - rw.Header().Add(k, v) - } - } - - return nil -} - -// tryAgain takes the time that the handler was initially invoked -// as well as any error currently obtained, and the request being -// tried, and returns true if another attempt should be made at -// proxying the request. If true is returned, it has already blocked -// long enough before the next retry (i.e. no more sleeping is -// needed). If false is returned, the handler should stop trying to -// proxy the request. -func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, proxyErr error, req *http.Request) bool { - // if we've tried long enough, break - if time.Since(start) >= time.Duration(lb.TryDuration) { - return false - } - - // if the error occurred while dialing (i.e. a connection - // could not even be established to the upstream), then it - // should be safe to retry, since without a connection, no - // HTTP request can be transmitted; but if the error is not - // specifically a dialer error, we need to be careful - if _, ok := proxyErr.(DialError); proxyErr != nil && !ok { - // if the error occurred after a connection was established, - // we have to assume the upstream received the request, and - // retries need to be carefully decided, because some requests - // are not idempotent - if lb.RetryMatch == nil && req.Method != "GET" { - // by default, don't retry requests if they aren't GET - return false - } - if !lb.RetryMatch.AnyMatch(req) { - return false - } - } - - // otherwise, wait and try the next available host - select { - case <-time.After(time.Duration(lb.TryInterval)): - return true - case <-ctx.Done(): - return false - } -} - -// directRequest modifies only req.URL so that it points to the upstream -// in the given DialInfo. It must modify ONLY the request URL. -func (h Handler) directRequest(req *http.Request, di DialInfo) { - // we need a host, so set the upstream's host address - reqHost := di.Address - - // if the port equates to the scheme, strip the port because - // it's weird to make a request like http://example.com:80/. - if (req.URL.Scheme == "http" && di.Port == "80") || - (req.URL.Scheme == "https" && di.Port == "443") { - reqHost = di.Host - } - - req.URL.Host = reqHost -} - -// bufferedBody reads originalBody into a buffer, then returns a reader for the buffer. -// Always close the return value when done with it, just like if it was the original body! -func (h Handler) bufferedBody(originalBody io.ReadCloser) io.ReadCloser { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - if h.MaxBufferSize > 0 { - n, err := io.CopyN(buf, originalBody, h.MaxBufferSize) - if err != nil || n == h.MaxBufferSize { - return bodyReadCloser{ - Reader: io.MultiReader(buf, originalBody), - buf: buf, - body: originalBody, - } - } - } else { - _, _ = io.Copy(buf, originalBody) - } - originalBody.Close() // no point in keeping it open - return bodyReadCloser{ - Reader: buf, - buf: buf, - } -} - -func copyHeader(dst, src http.Header) { - for k, vv := range src { - for _, v := range vv { - dst.Add(k, v) - } - } -} - -func upgradeType(h http.Header) string { - if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") { - return "" - } - return strings.ToLower(h.Get("Upgrade")) -} - -// removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h. -// See RFC 7230, section 6.1 -func removeConnectionHeaders(h http.Header) { - for _, f := range h["Connection"] { - for _, sf := range strings.Split(f, ",") { - if sf = textproto.TrimString(sf); sf != "" { - h.Del(sf) - } - } - } -} - -// statusError returns an error value that has a status code. -func statusError(err error) error { - // errors proxying usually mean there is a problem with the upstream(s) - statusCode := http.StatusBadGateway - - // if the client canceled the request (usually this means they closed - // the connection, so they won't see any response), we can report it - // as a client error (4xx) and not a server error (5xx); unfortunately - // the Go standard library, at least at time of writing in late 2020, - // obnoxiously wraps the exported, standard context.Canceled error with - // an unexported garbage value that we have to do a substring check for: - // https://github.com/golang/go/blob/6965b01ea248cabb70c3749fd218b36089a21efb/src/net/net.go#L416-L430 - if errors.Is(err, context.Canceled) || strings.Contains(err.Error(), "operation was canceled") { - // regrettably, there is no standard error code for "client closed connection", but - // for historical reasons we can use a code that a lot of people are already using; - // using 5xx is problematic for users; see #3748 - statusCode = 499 - } - return caddyhttp.Error(statusCode, err) -} - -// LoadBalancing has parameters related to load balancing. -type LoadBalancing struct { - // A selection policy is how to choose an available backend. - // The default policy is random selection. - SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"` - - // How long to try selecting available backends for each request - // if the next available host is down. By default, this retry is - // disabled. Clients will wait for up to this long while the load - // balancer tries to find an available upstream host. - TryDuration caddy.Duration `json:"try_duration,omitempty"` - - // How long to wait between selecting the next host from the pool. Default - // is 250ms. Only relevant when a request to an upstream host fails. Be - // aware that setting this to 0 with a non-zero try_duration can cause the - // CPU to spin if all backends are down and latency is very low. - TryInterval caddy.Duration `json:"try_interval,omitempty"` - - // A list of matcher sets that restricts with which requests retries are - // allowed. A request must match any of the given matcher sets in order - // to be retried if the connection to the upstream succeeded but the - // subsequent round-trip failed. If the connection to the upstream failed, - // a retry is always allowed. If unspecified, only GET requests will be - // allowed to be retried. Note that a retry is done with the next available - // host according to the load balancing policy. - RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"` - - SelectionPolicy Selector `json:"-"` - RetryMatch caddyhttp.MatcherSets `json:"-"` -} - -// Selector selects an available upstream from the pool. -type Selector interface { - Select(UpstreamPool, *http.Request, http.ResponseWriter) *Upstream -} - -// Hop-by-hop headers. These are removed when sent to the backend. -// As of RFC 7230, hop-by-hop headers are required to appear in the -// Connection header field. These are the headers defined by the -// obsoleted RFC 2616 (section 13.5.1) and are used for backward -// compatibility. -var hopHeaders = []string{ - "Alt-Svc", - "Connection", - "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google - "Keep-Alive", - "Proxy-Authenticate", - "Proxy-Authorization", - "Te", // canonicalized version of "TE" - "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522 - "Transfer-Encoding", - "Upgrade", -} - -// DialError is an error that specifically occurs -// in a call to Dial or DialContext. -type DialError struct{ error } - -// TLSTransport is implemented by transports -// that are capable of using TLS. -type TLSTransport interface { - // TLSEnabled returns true if the transport - // has TLS enabled, false otherwise. - TLSEnabled() bool - - // EnableTLS enables TLS within the transport - // if it is not already, using the provided - // value as a basis for the TLS config. - EnableTLS(base *TLSConfig) error -} - -// roundtripSucceeded is an error type that is returned if the -// roundtrip succeeded, but an error occurred after-the-fact. -type roundtripSucceeded struct{ error } - -// bodyReadCloser is a reader that, upon closing, will return -// its buffer to the pool and close the underlying body reader. -type bodyReadCloser struct { - io.Reader - buf *bytes.Buffer - body io.ReadCloser -} - -func (brc bodyReadCloser) Close() error { - bufPool.Put(brc.buf) - if brc.body != nil { - return brc.body.Close() - } - return nil -} - -// bufPool is used for buffering requests and responses. -var bufPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -// Interface guards -var ( - _ caddy.Provisioner = (*Handler)(nil) - _ caddy.CleanerUpper = (*Handler)(nil) - _ caddyhttp.MiddlewareHandler = (*Handler)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/selectionpolicies.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/selectionpolicies.go deleted file mode 100644 index 001f7f80..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/selectionpolicies.go +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reverseproxy - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "hash/fnv" - weakrand "math/rand" - "net" - "net/http" - "strconv" - "sync/atomic" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" -) - -func init() { - caddy.RegisterModule(RandomSelection{}) - caddy.RegisterModule(RandomChoiceSelection{}) - caddy.RegisterModule(LeastConnSelection{}) - caddy.RegisterModule(RoundRobinSelection{}) - caddy.RegisterModule(FirstSelection{}) - caddy.RegisterModule(IPHashSelection{}) - caddy.RegisterModule(URIHashSelection{}) - caddy.RegisterModule(HeaderHashSelection{}) - caddy.RegisterModule(CookieHashSelection{}) - - weakrand.Seed(time.Now().UTC().UnixNano()) -} - -// RandomSelection is a policy that selects -// an available host at random. -type RandomSelection struct{} - -// CaddyModule returns the Caddy module information. -func (RandomSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.random", - New: func() caddy.Module { return new(RandomSelection) }, - } -} - -// Select returns an available host, if any. -func (r RandomSelection) Select(pool UpstreamPool, request *http.Request, _ http.ResponseWriter) *Upstream { - return selectRandomHost(pool) -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (r *RandomSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - return d.ArgErr() - } - } - return nil -} - -// RandomChoiceSelection is a policy that selects -// two or more available hosts at random, then -// chooses the one with the least load. -type RandomChoiceSelection struct { - // The size of the sub-pool created from the larger upstream pool. The default value - // is 2 and the maximum at selection time is the size of the upstream pool. - Choose int `json:"choose,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (RandomChoiceSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.random_choose", - New: func() caddy.Module { return new(RandomChoiceSelection) }, - } -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (r *RandomChoiceSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if !d.NextArg() { - return d.ArgErr() - } - chooseStr := d.Val() - choose, err := strconv.Atoi(chooseStr) - if err != nil { - return d.Errf("invalid choice value '%s': %v", chooseStr, err) - } - r.Choose = choose - } - return nil -} - -// Provision sets up r. -func (r *RandomChoiceSelection) Provision(ctx caddy.Context) error { - if r.Choose == 0 { - r.Choose = 2 - } - return nil -} - -// Validate ensures that r's configuration is valid. -func (r RandomChoiceSelection) Validate() error { - if r.Choose < 2 { - return fmt.Errorf("choose must be at least 2") - } - return nil -} - -// Select returns an available host, if any. -func (r RandomChoiceSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { - k := r.Choose - if k > len(pool) { - k = len(pool) - } - choices := make([]*Upstream, k) - for i, upstream := range pool { - if !upstream.Available() { - continue - } - j := weakrand.Intn(i + 1) - if j < k { - choices[j] = upstream - } - } - return leastRequests(choices) -} - -// LeastConnSelection is a policy that selects the -// host with the least active requests. If multiple -// hosts have the same fewest number, one is chosen -// randomly. The term "conn" or "connection" is used -// in this policy name due to its similar meaning in -// other software, but our load balancer actually -// counts active requests rather than connections, -// since these days requests are multiplexed onto -// shared connections. -type LeastConnSelection struct{} - -// CaddyModule returns the Caddy module information. -func (LeastConnSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.least_conn", - New: func() caddy.Module { return new(LeastConnSelection) }, - } -} - -// Select selects the up host with the least number of connections in the -// pool. If more than one host has the same least number of connections, -// one of the hosts is chosen at random. -func (LeastConnSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { - var bestHost *Upstream - var count int - leastReqs := -1 - - for _, host := range pool { - if !host.Available() { - continue - } - numReqs := host.NumRequests() - if leastReqs == -1 || numReqs < leastReqs { - leastReqs = numReqs - count = 0 - } - - // among hosts with same least connections, perform a reservoir - // sample: https://en.wikipedia.org/wiki/Reservoir_sampling - if numReqs == leastReqs { - count++ - if (weakrand.Int() % count) == 0 { - bestHost = host - } - } - } - - return bestHost -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (r *LeastConnSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - return d.ArgErr() - } - } - return nil -} - -// RoundRobinSelection is a policy that selects -// a host based on round-robin ordering. -type RoundRobinSelection struct { - robin uint32 -} - -// CaddyModule returns the Caddy module information. -func (RoundRobinSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.round_robin", - New: func() caddy.Module { return new(RoundRobinSelection) }, - } -} - -// Select returns an available host, if any. -func (r *RoundRobinSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { - n := uint32(len(pool)) - if n == 0 { - return nil - } - for i := uint32(0); i < n; i++ { - robin := atomic.AddUint32(&r.robin, 1) - host := pool[robin%n] - if host.Available() { - return host - } - } - return nil -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (r *RoundRobinSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - return d.ArgErr() - } - } - return nil -} - -// FirstSelection is a policy that selects -// the first available host. -type FirstSelection struct{} - -// CaddyModule returns the Caddy module information. -func (FirstSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.first", - New: func() caddy.Module { return new(FirstSelection) }, - } -} - -// Select returns an available host, if any. -func (FirstSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream { - for _, host := range pool { - if host.Available() { - return host - } - } - return nil -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (r *FirstSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - return d.ArgErr() - } - } - return nil -} - -// IPHashSelection is a policy that selects a host -// based on hashing the remote IP of the request. -type IPHashSelection struct{} - -// CaddyModule returns the Caddy module information. -func (IPHashSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.ip_hash", - New: func() caddy.Module { return new(IPHashSelection) }, - } -} - -// Select returns an available host, if any. -func (IPHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { - clientIP, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - clientIP = req.RemoteAddr - } - return hostByHashing(pool, clientIP) -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (r *IPHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - return d.ArgErr() - } - } - return nil -} - -// URIHashSelection is a policy that selects a -// host by hashing the request URI. -type URIHashSelection struct{} - -// CaddyModule returns the Caddy module information. -func (URIHashSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.uri_hash", - New: func() caddy.Module { return new(URIHashSelection) }, - } -} - -// Select returns an available host, if any. -func (URIHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { - return hostByHashing(pool, req.RequestURI) -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (r *URIHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - return d.ArgErr() - } - } - return nil -} - -// HeaderHashSelection is a policy that selects -// a host based on a given request header. -type HeaderHashSelection struct { - // The HTTP header field whose value is to be hashed and used for upstream selection. - Field string `json:"field,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (HeaderHashSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.header", - New: func() caddy.Module { return new(HeaderHashSelection) }, - } -} - -// Select returns an available host, if any. -func (s HeaderHashSelection) Select(pool UpstreamPool, req *http.Request, _ http.ResponseWriter) *Upstream { - if s.Field == "" { - return nil - } - - // The Host header should be obtained from the req.Host field - // since net/http removes it from the header map. - if s.Field == "Host" && req.Host != "" { - return hostByHashing(pool, req.Host) - } - - val := req.Header.Get(s.Field) - if val == "" { - return RandomSelection{}.Select(pool, req, nil) - } - return hostByHashing(pool, val) -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. -func (s *HeaderHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if !d.NextArg() { - return d.ArgErr() - } - s.Field = d.Val() - } - return nil -} - -// CookieHashSelection is a policy that selects -// a host based on a given cookie name. -type CookieHashSelection struct { - // The HTTP cookie name whose value is to be hashed and used for upstream selection. - Name string `json:"name,omitempty"` - // Secret to hash (Hmac256) chosen upstream in cookie - Secret string `json:"secret,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (CookieHashSelection) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.reverse_proxy.selection_policies.cookie", - New: func() caddy.Module { return new(CookieHashSelection) }, - } -} - -// Select returns an available host, if any. -func (s CookieHashSelection) Select(pool UpstreamPool, req *http.Request, w http.ResponseWriter) *Upstream { - if s.Name == "" { - s.Name = "lb" - } - cookie, err := req.Cookie(s.Name) - // If there's no cookie, select new random host - if err != nil || cookie == nil { - return selectNewHostWithCookieHashSelection(pool, w, s.Secret, s.Name) - } - // If the cookie is present, loop over the available upstreams until we find a match - cookieValue := cookie.Value - for _, upstream := range pool { - if !upstream.Available() { - continue - } - sha, err := hashCookie(s.Secret, upstream.Dial) - if err == nil && sha == cookieValue { - return upstream - } - } - // If there is no matching host, select new random host - return selectNewHostWithCookieHashSelection(pool, w, s.Secret, s.Name) -} - -// UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax: -// lb_policy cookie [ []] -// -// By default name is `lb` -func (s *CookieHashSelection) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - args := d.RemainingArgs() - switch len(args) { - case 1: - case 2: - s.Name = args[1] - case 3: - s.Name = args[1] - s.Secret = args[2] - default: - return d.ArgErr() - } - return nil -} - -// Select a new Host randomly and add a sticky session cookie -func selectNewHostWithCookieHashSelection(pool []*Upstream, w http.ResponseWriter, cookieSecret string, cookieName string) *Upstream { - randomHost := selectRandomHost(pool) - - if randomHost != nil { - // Hash (HMAC with some key for privacy) the upstream.Dial string as the cookie value - sha, err := hashCookie(cookieSecret, randomHost.Dial) - if err == nil { - // write the cookie. - http.SetCookie(w, &http.Cookie{Name: cookieName, Value: sha, Path: "/", Secure: false}) - } - } - return randomHost -} - -// hashCookie hashes (HMAC 256) some data with the secret -func hashCookie(secret string, data string) (string, error) { - h := hmac.New(sha256.New, []byte(secret)) - _, err := h.Write([]byte(data)) - if err != nil { - return "", err - } - return hex.EncodeToString(h.Sum(nil)), nil -} - -// selectRandomHost returns a random available host -func selectRandomHost(pool []*Upstream) *Upstream { - // use reservoir sampling because the number of available - // hosts isn't known: https://en.wikipedia.org/wiki/Reservoir_sampling - var randomHost *Upstream - var count int - for _, upstream := range pool { - if !upstream.Available() { - continue - } - // (n % 1 == 0) holds for all n, therefore a - // upstream will always be chosen if there is at - // least one available - count++ - if (weakrand.Int() % count) == 0 { - randomHost = upstream - } - } - return randomHost -} - -// leastRequests returns the host with the -// least number of active requests to it. -// If more than one host has the same -// least number of active requests, then -// one of those is chosen at random. -func leastRequests(upstreams []*Upstream) *Upstream { - if len(upstreams) == 0 { - return nil - } - var best []*Upstream - var bestReqs int = -1 - for _, upstream := range upstreams { - if upstream == nil { - continue - } - reqs := upstream.NumRequests() - if reqs == 0 { - return upstream - } - // If bestReqs was just initialized to -1 - // we need to append upstream also - if reqs <= bestReqs || bestReqs == -1 { - bestReqs = reqs - best = append(best, upstream) - } - } - if len(best) == 0 { - return nil - } - return best[weakrand.Intn(len(best))] -} - -// hostByHashing returns an available host -// from pool based on a hashable string s. -func hostByHashing(pool []*Upstream, s string) *Upstream { - poolLen := uint32(len(pool)) - if poolLen == 0 { - return nil - } - index := hash(s) % poolLen - for i := uint32(0); i < poolLen; i++ { - upstream := pool[(index+i)%poolLen] - if upstream.Available() { - return upstream - } - } - return nil -} - -// hash calculates a fast hash based on s. -func hash(s string) uint32 { - h := fnv.New32a() - _, _ = h.Write([]byte(s)) - return h.Sum32() -} - -// Interface guards -var ( - _ Selector = (*RandomSelection)(nil) - _ Selector = (*RandomChoiceSelection)(nil) - _ Selector = (*LeastConnSelection)(nil) - _ Selector = (*RoundRobinSelection)(nil) - _ Selector = (*FirstSelection)(nil) - _ Selector = (*IPHashSelection)(nil) - _ Selector = (*URIHashSelection)(nil) - _ Selector = (*HeaderHashSelection)(nil) - _ Selector = (*CookieHashSelection)(nil) - - _ caddy.Validator = (*RandomChoiceSelection)(nil) - _ caddy.Provisioner = (*RandomChoiceSelection)(nil) - - _ caddyfile.Unmarshaler = (*RandomChoiceSelection)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/streaming.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/streaming.go deleted file mode 100644 index 1db352b7..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/reverseproxy/streaming.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Most of the code in this file was initially borrowed from the Go -// standard library and modified; It had this copyright notice: -// Copyright 2011 The Go Authors - -package reverseproxy - -import ( - "context" - "io" - "mime" - "net/http" - "sync" - "time" - - "go.uber.org/zap" -) - -func (h Handler) handleUpgradeResponse(logger *zap.Logger, rw http.ResponseWriter, req *http.Request, res *http.Response) { - reqUpType := upgradeType(req.Header) - resUpType := upgradeType(res.Header) - // TODO: Update to use "net/http/internal/ascii" once we bumped - // the minimum Go version to 1.17. - // See https://github.com/golang/go/commit/5c489514bc5e61ad9b5b07bd7d8ec65d66a0512a - if reqUpType != resUpType { - h.logger.Debug("backend tried to switch to unexpected protocol via Upgrade header", - zap.String("backend_upgrade", resUpType), - zap.String("requested_upgrade", reqUpType)) - return - } - - hj, ok := rw.(http.Hijacker) - if !ok { - h.logger.Sugar().Errorf("can't switch protocols using non-Hijacker ResponseWriter type %T", rw) - return - } - backConn, ok := res.Body.(io.ReadWriteCloser) - if !ok { - h.logger.Error("internal error: 101 switching protocols response with non-writable body") - return - } - - // adopted from https://github.com/golang/go/commit/8bcf2834afdf6a1f7937390903a41518715ef6f5 - backConnCloseCh := make(chan struct{}) - go func() { - // Ensure that the cancelation of a request closes the backend. - // See issue https://golang.org/issue/35559. - select { - case <-req.Context().Done(): - case <-backConnCloseCh: - } - backConn.Close() - }() - defer close(backConnCloseCh) - - logger.Debug("upgrading connection") - conn, brw, err := hj.Hijack() - if err != nil { - h.logger.Error("hijack failed on protocol switch", zap.Error(err)) - return - } - defer conn.Close() - - start := time.Now() - defer func() { - logger.Debug("connection closed", zap.Duration("duration", time.Since(start))) - }() - - copyHeader(rw.Header(), res.Header) - - res.Header = rw.Header() - res.Body = nil // so res.Write only writes the headers; we have res.Body in backConn above - if err := res.Write(brw); err != nil { - h.logger.Debug("response write", zap.Error(err)) - return - } - if err := brw.Flush(); err != nil { - h.logger.Debug("response flush", zap.Error(err)) - return - } - - errc := make(chan error, 1) - spc := switchProtocolCopier{user: conn, backend: backConn} - go spc.copyToBackend(errc) - go spc.copyFromBackend(errc) - <-errc -} - -// flushInterval returns the p.FlushInterval value, conditionally -// overriding its value for a specific request/response. -func (h Handler) flushInterval(req *http.Request, res *http.Response) time.Duration { - resCTHeader := res.Header.Get("Content-Type") - resCT, _, err := mime.ParseMediaType(resCTHeader) - - // For Server-Sent Events responses, flush immediately. - // The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream - if err == nil && resCT == "text/event-stream" { - return -1 // negative means immediately - } - - // We might have the case of streaming for which Content-Length might be unset. - if res.ContentLength == -1 { - return -1 - } - - // for h2 and h2c upstream streaming data to client (issues #3556 and #3606) - if h.isBidirectionalStream(req, res) { - return -1 - } - - return time.Duration(h.FlushInterval) -} - -// isBidirectionalStream returns whether we should work in bi-directional stream mode. -// -// See https://github.com/caddyserver/caddy/pull/3620 for discussion of nuances. -func (h Handler) isBidirectionalStream(req *http.Request, res *http.Response) bool { - // We have to check the encoding here; only flush headers with identity encoding. - // Non-identity encoding might combine with "encode" directive, and in that case, - // if body size larger than enc.MinLength, upper level encode handle might have - // Content-Encoding header to write. - // (see https://github.com/caddyserver/caddy/issues/3606 for use case) - ae := req.Header.Get("Accept-Encoding") - - return req.ProtoMajor == 2 && - res.ProtoMajor == 2 && - res.ContentLength == -1 && - (ae == "identity" || ae == "") -} - -func (h Handler) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration) error { - if flushInterval != 0 { - if wf, ok := dst.(writeFlusher); ok { - mlw := &maxLatencyWriter{ - dst: wf, - latency: flushInterval, - } - defer mlw.stop() - - // set up initial timer so headers get flushed even if body writes are delayed - mlw.flushPending = true - mlw.t = time.AfterFunc(flushInterval, mlw.delayedFlush) - - dst = mlw - } - } - - buf := streamingBufPool.Get().(*[]byte) - defer streamingBufPool.Put(buf) - _, err := h.copyBuffer(dst, src, *buf) - return err -} - -// copyBuffer returns any write errors or non-EOF read errors, and the amount -// of bytes written. -func (h Handler) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, error) { - if len(buf) == 0 { - buf = make([]byte, defaultBufferSize) - } - var written int64 - for { - nr, rerr := src.Read(buf) - if rerr != nil && rerr != io.EOF && rerr != context.Canceled { - // TODO: this could be useful to know (indeed, it revealed an error in our - // fastcgi PoC earlier; but it's this single error report here that necessitates - // a function separate from io.CopyBuffer, since io.CopyBuffer does not distinguish - // between read or write errors; in a reverse proxy situation, write errors are not - // something we need to report to the client, but read errors are a problem on our - // end for sure. so we need to decide what we want.) - // p.logf("copyBuffer: ReverseProxy read error during body copy: %v", rerr) - h.logger.Error("reading from backend", zap.Error(rerr)) - } - if nr > 0 { - nw, werr := dst.Write(buf[:nr]) - if nw > 0 { - written += int64(nw) - } - if werr != nil { - return written, werr - } - if nr != nw { - return written, io.ErrShortWrite - } - } - if rerr != nil { - if rerr == io.EOF { - rerr = nil - } - return written, rerr - } - } -} - -type writeFlusher interface { - io.Writer - http.Flusher -} - -type maxLatencyWriter struct { - dst writeFlusher - latency time.Duration // non-zero; negative means to flush immediately - - mu sync.Mutex // protects t, flushPending, and dst.Flush - t *time.Timer - flushPending bool -} - -func (m *maxLatencyWriter) Write(p []byte) (n int, err error) { - m.mu.Lock() - defer m.mu.Unlock() - n, err = m.dst.Write(p) - if m.latency < 0 { - m.dst.Flush() - return - } - if m.flushPending { - return - } - if m.t == nil { - m.t = time.AfterFunc(m.latency, m.delayedFlush) - } else { - m.t.Reset(m.latency) - } - m.flushPending = true - return -} - -func (m *maxLatencyWriter) delayedFlush() { - m.mu.Lock() - defer m.mu.Unlock() - if !m.flushPending { // if stop was called but AfterFunc already started this goroutine - return - } - m.dst.Flush() - m.flushPending = false -} - -func (m *maxLatencyWriter) stop() { - m.mu.Lock() - defer m.mu.Unlock() - m.flushPending = false - if m.t != nil { - m.t.Stop() - } -} - -// switchProtocolCopier exists so goroutines proxying data back and -// forth have nice names in stacks. -type switchProtocolCopier struct { - user, backend io.ReadWriter -} - -func (c switchProtocolCopier) copyFromBackend(errc chan<- error) { - _, err := io.Copy(c.user, c.backend) - errc <- err -} - -func (c switchProtocolCopier) copyToBackend(errc chan<- error) { - _, err := io.Copy(c.backend, c.user) - errc <- err -} - -var streamingBufPool = sync.Pool{ - New: func() interface{} { - // The Pool's New function should generally only return pointer - // types, since a pointer can be put into the return interface - // value without an allocation - // - (from the package docs) - b := make([]byte, defaultBufferSize) - return &b - }, -} - -const defaultBufferSize = 32 * 1024 diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/routes.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/routes.go deleted file mode 100644 index ebd763c7..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/routes.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/caddyserver/caddy/v2" -) - -// Route consists of a set of rules for matching HTTP requests, -// a list of handlers to execute, and optional flow control -// parameters which customize the handling of HTTP requests -// in a highly flexible and performant manner. -type Route struct { - // Group is an optional name for a group to which this - // route belongs. Grouping a route makes it mutually - // exclusive with others in its group; if a route belongs - // to a group, only the first matching route in that group - // will be executed. - Group string `json:"group,omitempty"` - - // The matcher sets which will be used to qualify this - // route for a request (essentially the "if" statement - // of this route). Each matcher set is OR'ed, but matchers - // within a set are AND'ed together. - MatcherSetsRaw RawMatcherSets `json:"match,omitempty" caddy:"namespace=http.matchers"` - - // The list of handlers for this route. Upon matching a request, they are chained - // together in a middleware fashion: requests flow from the first handler to the last - // (top of the list to the bottom), with the possibility that any handler could stop - // the chain and/or return an error. Responses flow back through the chain (bottom of - // the list to the top) as they are written out to the client. - // - // Not all handlers call the next handler in the chain. For example, the reverse_proxy - // handler always sends a request upstream or returns an error. Thus, configuring - // handlers after reverse_proxy in the same route is illogical, since they would never - // be executed. You will want to put handlers which originate the response at the very - // end of your route(s). The documentation for a module should state whether it invokes - // the next handler, but sometimes it is common sense. - // - // Some handlers manipulate the response. Remember that requests flow down the list, and - // responses flow up the list. - // - // For example, if you wanted to use both `templates` and `encode` handlers, you would - // need to put `templates` after `encode` in your route, because responses flow up. - // Thus, `templates` will be able to parse and execute the plain-text response as a - // template, and then return it up to the `encode` handler which will then compress it - // into a binary format. - // - // If `templates` came before `encode`, then `encode` would write a compressed, - // binary-encoded response to `templates` which would not be able to parse the response - // properly. - // - // The correct order, then, is this: - // - // [ - // {"handler": "encode"}, - // {"handler": "templates"}, - // {"handler": "file_server"} - // ] - // - // The request flows â¬‡ï¸ DOWN (`encode` -> `templates` -> `file_server`). - // - // 1. First, `encode` will choose how to `encode` the response and wrap the response. - // 2. Then, `templates` will wrap the response with a buffer. - // 3. Finally, `file_server` will originate the content from a file. - // - // The response flows â¬†ï¸ UP (`file_server` -> `templates` -> `encode`): - // - // 1. First, `file_server` will write the file to the response. - // 2. That write will be buffered and then executed by `templates`. - // 3. Lastly, the write from `templates` will flow into `encode` which will compress the stream. - // - // If you think of routes in this way, it will be easy and even fun to solve the puzzle of writing correct routes. - HandlersRaw []json.RawMessage `json:"handle,omitempty" caddy:"namespace=http.handlers inline_key=handler"` - - // If true, no more routes will be executed after this one. - Terminal bool `json:"terminal,omitempty"` - - // decoded values - MatcherSets MatcherSets `json:"-"` - Handlers []MiddlewareHandler `json:"-"` - - middleware []Middleware -} - -// Empty returns true if the route has all zero/default values. -func (r Route) Empty() bool { - return len(r.MatcherSetsRaw) == 0 && - len(r.MatcherSets) == 0 && - len(r.HandlersRaw) == 0 && - len(r.Handlers) == 0 && - !r.Terminal && - r.Group == "" -} - -// RouteList is a list of server routes that can -// create a middleware chain. -type RouteList []Route - -// Provision sets up both the matchers and handlers in the routes. -func (routes RouteList) Provision(ctx caddy.Context) error { - err := routes.ProvisionMatchers(ctx) - if err != nil { - return err - } - return routes.ProvisionHandlers(ctx) -} - -// ProvisionMatchers sets up all the matchers by loading the -// matcher modules. Only call this method directly if you need -// to set up matchers and handlers separately without having -// to provision a second time; otherwise use Provision instead. -func (routes RouteList) ProvisionMatchers(ctx caddy.Context) error { - for i := range routes { - // matchers - matchersIface, err := ctx.LoadModule(&routes[i], "MatcherSetsRaw") - if err != nil { - return fmt.Errorf("route %d: loading matcher modules: %v", i, err) - } - err = routes[i].MatcherSets.FromInterface(matchersIface) - if err != nil { - return fmt.Errorf("route %d: %v", i, err) - } - } - return nil -} - -// ProvisionHandlers sets up all the handlers by loading the -// handler modules. Only call this method directly if you need -// to set up matchers and handlers separately without having -// to provision a second time; otherwise use Provision instead. -func (routes RouteList) ProvisionHandlers(ctx caddy.Context) error { - for i := range routes { - handlersIface, err := ctx.LoadModule(&routes[i], "HandlersRaw") - if err != nil { - return fmt.Errorf("route %d: loading handler modules: %v", i, err) - } - for _, handler := range handlersIface.([]interface{}) { - routes[i].Handlers = append(routes[i].Handlers, handler.(MiddlewareHandler)) - } - - // pre-compile the middleware handler chain - for _, midhandler := range routes[i].Handlers { - routes[i].middleware = append(routes[i].middleware, wrapMiddleware(ctx, midhandler)) - } - } - return nil -} - -// Compile prepares a middleware chain from the route list. -// This should only be done once: after all the routes have -// been provisioned, and before serving requests. -func (routes RouteList) Compile(next Handler) Handler { - mid := make([]Middleware, 0, len(routes)) - for _, route := range routes { - mid = append(mid, wrapRoute(route)) - } - stack := next - for i := len(mid) - 1; i >= 0; i-- { - stack = mid[i](stack) - } - return stack -} - -// wrapRoute wraps route with a middleware and handler so that it can -// be chained in and defer evaluation of its matchers to request-time. -// Like wrapMiddleware, it is vital that this wrapping takes place in -// its own stack frame so as to not overwrite the reference to the -// intended route by looping and changing the reference each time. -func wrapRoute(route Route) Middleware { - return func(next Handler) Handler { - return HandlerFunc(func(rw http.ResponseWriter, req *http.Request) error { - // TODO: Update this comment, it seems we've moved the copy into the handler? - // copy the next handler (it's an interface, so it's just - // a very lightweight copy of a pointer); this is important - // because this is a closure to the func below, which - // re-assigns the value as it compiles the middleware stack; - // if we don't make this copy, we'd affect the underlying - // pointer for all future request (yikes); we could - // alternatively solve this by moving the func below out of - // this closure and into a standalone package-level func, - // but I just thought this made more sense - nextCopy := next - - // route must match at least one of the matcher sets - if !route.MatcherSets.AnyMatch(req) { - return nextCopy.ServeHTTP(rw, req) - } - - // if route is part of a group, ensure only the - // first matching route in the group is applied - if route.Group != "" { - groups := req.Context().Value(routeGroupCtxKey).(map[string]struct{}) - - if _, ok := groups[route.Group]; ok { - // this group has already been - // satisfied by a matching route - return nextCopy.ServeHTTP(rw, req) - } - - // this matching route satisfies the group - groups[route.Group] = struct{}{} - } - - // make terminal routes terminate - if route.Terminal { - if _, ok := req.Context().Value(ErrorCtxKey).(error); ok { - nextCopy = errorEmptyHandler - } else { - nextCopy = emptyHandler - } - } - - // compile this route's handler stack - for i := len(route.middleware) - 1; i >= 0; i-- { - nextCopy = route.middleware[i](nextCopy) - } - - return nextCopy.ServeHTTP(rw, req) - }) - } -} - -// wrapMiddleware wraps mh such that it can be correctly -// appended to a list of middleware in preparation for -// compiling into a handler chain. We can't do this inline -// inside a loop, because it relies on a reference to mh -// not changing until the execution of its handler (which -// is deferred by multiple func closures). In other words, -// we need to pull this particular MiddlewareHandler -// pointer into its own stack frame to preserve it so it -// won't be overwritten in future loop iterations. -func wrapMiddleware(ctx caddy.Context, mh MiddlewareHandler) Middleware { - // wrap the middleware with metrics instrumentation - metricsHandler := newMetricsInstrumentedHandler(caddy.GetModuleName(mh), mh) - - return func(next Handler) Handler { - // copy the next handler (it's an interface, so it's - // just a very lightweight copy of a pointer); this - // is a safeguard against the handler changing the - // value, which could affect future requests (yikes) - nextCopy := next - - return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - // TODO: This is where request tracing could be implemented - // TODO: see what the std lib gives us in terms of stack tracing too - return metricsHandler.ServeHTTP(w, r, nextCopy) - }) - } -} - -// MatcherSet is a set of matchers which -// must all match in order for the request -// to be matched successfully. -type MatcherSet []RequestMatcher - -// Match returns true if the request matches all -// matchers in mset or if there are no matchers. -func (mset MatcherSet) Match(r *http.Request) bool { - for _, m := range mset { - if !m.Match(r) { - return false - } - } - return true -} - -// RawMatcherSets is a group of matcher sets -// in their raw, JSON form. -type RawMatcherSets []caddy.ModuleMap - -// MatcherSets is a group of matcher sets capable -// of checking whether a request matches any of -// the sets. -type MatcherSets []MatcherSet - -// AnyMatch returns true if req matches any of the -// matcher sets in ms or if there are no matchers, -// in which case the request always matches. -func (ms MatcherSets) AnyMatch(req *http.Request) bool { - for _, m := range ms { - if m.Match(req) { - return true - } - } - return len(ms) == 0 -} - -// FromInterface fills ms from an interface{} value obtained from LoadModule. -func (ms *MatcherSets) FromInterface(matcherSets interface{}) error { - for _, matcherSetIfaces := range matcherSets.([]map[string]interface{}) { - var matcherSet MatcherSet - for _, matcher := range matcherSetIfaces { - reqMatcher, ok := matcher.(RequestMatcher) - if !ok { - return fmt.Errorf("decoded module is not a RequestMatcher: %#v", matcher) - } - matcherSet = append(matcherSet, reqMatcher) - } - *ms = append(*ms, matcherSet) - } - return nil -} - -var routeGroupCtxKey = caddy.CtxKey("route_group") diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/server.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/server.go deleted file mode 100644 index 80948325..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/server.go +++ /dev/null @@ -1,645 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "context" - "encoding/json" - "fmt" - "net" - "net/http" - "net/url" - "runtime" - "strings" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/modules/caddytls" - "github.com/lucas-clemente/quic-go/http3" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Server describes an HTTP server. -type Server struct { - // Socket addresses to which to bind listeners. Accepts - // [network addresses](/docs/conventions#network-addresses) - // that may include port ranges. Listener addresses must - // be unique; they cannot be repeated across all defined - // servers. - Listen []string `json:"listen,omitempty"` - - // A list of listener wrapper modules, which can modify the behavior - // of the base listener. They are applied in the given order. - ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"` - - // How long to allow a read from a client's upload. Setting this - // to a short, non-zero value can mitigate slowloris attacks, but - // may also affect legitimately slow clients. - ReadTimeout caddy.Duration `json:"read_timeout,omitempty"` - - // ReadHeaderTimeout is like ReadTimeout but for request headers. - ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"` - - // WriteTimeout is how long to allow a write to a client. Note - // that setting this to a small value when serving large files - // may negatively affect legitimately slow clients. - WriteTimeout caddy.Duration `json:"write_timeout,omitempty"` - - // IdleTimeout is the maximum time to wait for the next request - // when keep-alives are enabled. If zero, a default timeout of - // 5m is applied to help avoid resource exhaustion. - IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"` - - // MaxHeaderBytes is the maximum size to parse from a client's - // HTTP request headers. - MaxHeaderBytes int `json:"max_header_bytes,omitempty"` - - // Routes describes how this server will handle requests. - // Routes are executed sequentially. First a route's matchers - // are evaluated, then its grouping. If it matches and has - // not been mutually-excluded by its grouping, then its - // handlers are executed sequentially. The sequence of invoked - // handlers comprises a compiled middleware chain that flows - // from each matching route and its handlers to the next. - // - // By default, all unrouted requests receive a 200 OK response - // to indicate the server is working. - Routes RouteList `json:"routes,omitempty"` - - // Errors is how this server will handle errors returned from any - // of the handlers in the primary routes. If the primary handler - // chain returns an error, the error along with its recommended - // status code are bubbled back up to the HTTP server which - // executes a separate error route, specified using this property. - // The error routes work exactly like the normal routes. - Errors *HTTPErrorConfig `json:"errors,omitempty"` - - // How to handle TLS connections. At least one policy is - // required to enable HTTPS on this server if automatic - // HTTPS is disabled or does not apply. - TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"` - - // AutoHTTPS configures or disables automatic HTTPS within this server. - // HTTPS is enabled automatically and by default when qualifying names - // are present in a Host matcher and/or when the server is listening - // only on the HTTPS port. - AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"` - - // If true, will require that a request's Host header match - // the value of the ServerName sent by the client's TLS - // ClientHello; often a necessary safeguard when using TLS - // client authentication. - StrictSNIHost *bool `json:"strict_sni_host,omitempty"` - - // Enables access logging and configures how access logs are handled - // in this server. To minimally enable access logs, simply set this - // to a non-null, empty struct. - Logs *ServerLogConfig `json:"logs,omitempty"` - - // Enable experimental HTTP/3 support. Note that HTTP/3 is not a - // finished standard and has extremely limited client support. - // This field is not subject to compatibility promises. - ExperimentalHTTP3 bool `json:"experimental_http3,omitempty"` - - // Enables H2C ("Cleartext HTTP/2" or "H2 over TCP") support, - // which will serve HTTP/2 over plaintext TCP connections if - // the client supports it. Because this is not implemented by the - // Go standard library, using H2C is incompatible with most - // of the other options for this server. Do not enable this - // only to achieve maximum client compatibility. In practice, - // very few clients implement H2C, and even fewer require it. - // This setting applies only to unencrypted HTTP listeners. - // âš ï¸ Experimental feature; subject to change or removal. - AllowH2C bool `json:"allow_h2c,omitempty"` - - name string - - primaryHandlerChain Handler - errorHandlerChain Handler - listenerWrappers []caddy.ListenerWrapper - - tlsApp *caddytls.TLS - logger *zap.Logger - accessLogger *zap.Logger - errorLogger *zap.Logger - - h3server *http3.Server -} - -// ServeHTTP is the entry point for all HTTP requests. -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Server", "Caddy") - - if s.h3server != nil { - err := s.h3server.SetQuicHeaders(w.Header()) - if err != nil { - s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err)) - } - } - - repl := caddy.NewReplacer() - r = PrepareRequest(r, repl, w, s) - - // encode the request for logging purposes before - // it enters any handler chain; this is necessary - // to capture the original request in case it gets - // modified during handling - loggableReq := zap.Object("request", LoggableHTTPRequest{r}) - errLog := s.errorLogger.With(loggableReq) - - var duration time.Duration - - if s.shouldLogRequest(r) { - wrec := NewResponseRecorder(w, nil, nil) - w = wrec - - // capture the original version of the request - accLog := s.accessLogger.With(loggableReq) - - defer func() { - repl.Set("http.response.status", wrec.Status()) - repl.Set("http.response.size", wrec.Size()) - repl.Set("http.response.duration", duration) - - logger := accLog - if s.Logs != nil { - logger = s.Logs.wrapLogger(logger, r.Host) - } - - log := logger.Info - if wrec.Status() >= 400 { - log = logger.Error - } - - userID, _ := repl.GetString("http.auth.user.id") - - log("handled request", - zap.String("common_log", repl.ReplaceAll(commonLogFormat, commonLogEmptyValue)), - zap.String("user_id", userID), - zap.Duration("duration", duration), - zap.Int("size", wrec.Size()), - zap.Int("status", wrec.Status()), - zap.Object("resp_headers", LoggableHTTPHeader(wrec.Header())), - ) - }() - } - - start := time.Now() - - // guarantee ACME HTTP challenges; handle them - // separately from any user-defined handlers - if s.tlsApp.HandleHTTPChallenge(w, r) { - duration = time.Since(start) - return - } - - // execute the primary handler chain - err := s.primaryHandlerChain.ServeHTTP(w, r) - duration = time.Since(start) - - // if no errors, we're done! - if err == nil { - return - } - - // restore original request before invoking error handler chain (issue #3717) - // TODO: this does not restore original headers, if modified (for efficiency) - origReq := r.Context().Value(OriginalRequestCtxKey).(http.Request) - r.Method = origReq.Method - r.RemoteAddr = origReq.RemoteAddr - r.RequestURI = origReq.RequestURI - cloneURL(origReq.URL, r.URL) - - // prepare the error log - logger := errLog - if s.Logs != nil { - logger = s.Logs.wrapLogger(logger, r.Host) - } - logger = logger.With(zap.Duration("duration", duration)) - - // get the values that will be used to log the error - errStatus, errMsg, errFields := errLogValues(err) - - // add HTTP error information to request context - r = s.Errors.WithError(r, err) - - if s.Errors != nil && len(s.Errors.Routes) > 0 { - // execute user-defined error handling route - err2 := s.errorHandlerChain.ServeHTTP(w, r) - if err2 == nil { - // user's error route handled the error response - // successfully, so now just log the error - if errStatus >= 500 { - logger.Error(errMsg, errFields...) - } - } else { - // well... this is awkward - errFields = append([]zapcore.Field{ - zap.String("error", err2.Error()), - zap.Namespace("first_error"), - zap.String("msg", errMsg), - }, errFields...) - logger.Error("error handling handler error", errFields...) - if handlerErr, ok := err.(HandlerError); ok { - w.WriteHeader(handlerErr.StatusCode) - } else { - w.WriteHeader(http.StatusInternalServerError) - } - } - } else { - if errStatus >= 500 { - logger.Error(errMsg, errFields...) - } - w.WriteHeader(errStatus) - } -} - -// wrapPrimaryRoute wraps stack (a compiled middleware handler chain) -// in s.enforcementHandler which performs crucial security checks, etc. -func (s *Server) wrapPrimaryRoute(stack Handler) Handler { - return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - return s.enforcementHandler(w, r, stack) - }) -} - -// enforcementHandler is an implicit middleware which performs -// standard checks before executing the HTTP middleware chain. -func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error { - // enforce strict host matching, which ensures that the SNI - // value (if any), matches the Host header; essential for - // servers that rely on TLS ClientAuth sharing a listener - // with servers that do not; if not enforced, client could - // bypass by sending benign SNI then restricted Host header - if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil { - hostname, _, err := net.SplitHostPort(r.Host) - if err != nil { - hostname = r.Host // OK; probably lacked port - } - if !strings.EqualFold(r.TLS.ServerName, hostname) { - err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ", - r.TLS.ServerName, hostname) - r.Close = true - return Error(http.StatusForbidden, err) - } - } - return next.ServeHTTP(w, r) -} - -// listenersUseAnyPortOtherThan returns true if there are any -// listeners in s that use a port which is not otherPort. -func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool { - for _, lnAddr := range s.Listen { - laddrs, err := caddy.ParseNetworkAddress(lnAddr) - if err != nil { - continue - } - if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort { - return true - } - } - return false -} - -// hasListenerAddress returns true if s has a listener -// at the given address fullAddr. Currently, fullAddr -// must represent exactly one socket address (port -// ranges are not supported) -func (s *Server) hasListenerAddress(fullAddr string) bool { - laddrs, err := caddy.ParseNetworkAddress(fullAddr) - if err != nil { - return false - } - if laddrs.PortRangeSize() != 1 { - return false // TODO: support port ranges - } - - for _, lnAddr := range s.Listen { - thisAddrs, err := caddy.ParseNetworkAddress(lnAddr) - if err != nil { - continue - } - if thisAddrs.Network != laddrs.Network { - continue - } - - // Apparently, Linux requires all bound ports to be distinct - // *regardless of host interface* even if the addresses are - // in fact different; binding "192.168.0.1:9000" and then - // ":9000" will fail for ":9000" because "address is already - // in use" even though it's not, and the same bindings work - // fine on macOS. I also found on Linux that listening on - // "[::]:9000" would fail with a similar error, except with - // the address "0.0.0.0:9000", as if deliberately ignoring - // that I specified the IPv6 interface explicitly. This seems - // to be a major bug in the Linux network stack and I don't - // know why it hasn't been fixed yet, so for now we have to - // special-case ourselves around Linux like a doting parent. - // The second issue seems very similar to a discussion here: - // https://github.com/nodejs/node/issues/9390 - // - // This is very easy to reproduce by creating an HTTP server - // that listens to both addresses or just one with a host - // interface; or for a more confusing reproduction, try - // listening on "127.0.0.1:80" and ":443" and you'll see - // the error, if you take away the GOOS condition below. - // - // So, an address is equivalent if the port is in the port - // range, and if not on Linux, the host is the same... sigh. - if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) && - (laddrs.StartPort <= thisAddrs.EndPort) && - (laddrs.StartPort >= thisAddrs.StartPort) { - return true - } - } - return false -} - -func (s *Server) hasTLSClientAuth() bool { - for _, cp := range s.TLSConnPolicies { - if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() { - return true - } - } - return false -} - -// findLastRouteWithHostMatcher returns the index of the last route -// in the server which has a host matcher. Used during Automatic HTTPS -// to determine where to insert the HTTP->HTTPS redirect route, such -// that it is after any other host matcher but before any "catch-all" -// route without a host matcher. -func (s *Server) findLastRouteWithHostMatcher() int { - foundHostMatcher := false - lastIndex := len(s.Routes) - - for i, route := range s.Routes { - // since we want to break out of an inner loop, use a closure - // to allow us to use 'return' when we found a host matcher - found := (func() bool { - for _, sets := range route.MatcherSets { - for _, matcher := range sets { - switch matcher.(type) { - case *MatchHost: - foundHostMatcher = true - return true - } - } - } - return false - })() - - // if we found the host matcher, change the lastIndex to - // just after the current route - if found { - lastIndex = i + 1 - } - } - - // If we didn't actually find a host matcher, return 0 - // because that means every defined route was a "catch-all". - // See https://caddy.community/t/how-to-set-priority-in-caddyfile/13002/8 - if !foundHostMatcher { - return 0 - } - - return lastIndex -} - -// HTTPErrorConfig determines how to handle errors -// from the HTTP handlers. -type HTTPErrorConfig struct { - // The routes to evaluate after the primary handler - // chain returns an error. In an error route, extra - // placeholders are available: - // - // Placeholder | Description - // ------------|--------------- - // `{http.error.status_code}` | The recommended HTTP status code - // `{http.error.status_text}` | The status text associated with the recommended status code - // `{http.error.message}` | The error message - // `{http.error.trace}` | The origin of the error - // `{http.error.id}` | An identifier for this occurrence of the error - Routes RouteList `json:"routes,omitempty"` -} - -// WithError makes a shallow copy of r to add the error to its -// context, and sets placeholders on the request's replacer -// related to err. It returns the modified request which has -// the error information in its context and replacer. It -// overwrites any existing error values that are stored. -func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request { - // add the raw error value to the request context - // so it can be accessed by error handlers - c := context.WithValue(r.Context(), ErrorCtxKey, err) - r = r.WithContext(c) - - // add error values to the replacer - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - repl.Set("http.error", err) - if handlerErr, ok := err.(HandlerError); ok { - repl.Set("http.error.status_code", handlerErr.StatusCode) - repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode)) - repl.Set("http.error.trace", handlerErr.Trace) - repl.Set("http.error.id", handlerErr.ID) - } - - return r -} - -// shouldLogRequest returns true if this request should be logged. -func (s *Server) shouldLogRequest(r *http.Request) bool { - if s.accessLogger == nil || s.Logs == nil { - // logging is disabled - return false - } - for _, dh := range s.Logs.SkipHosts { - // logging for this particular host is disabled - if r.Host == dh { - return false - } - } - if _, ok := s.Logs.LoggerNames[r.Host]; ok { - // this host is mapped to a particular logger name - return true - } - if s.Logs.SkipUnmappedHosts { - // this host is not mapped and thus must not be logged - return false - } - return true -} - -// ServerLogConfig describes a server's logging configuration. If -// enabled without customization, all requests to this server are -// logged to the default logger; logger destinations may be -// customized per-request-host. -type ServerLogConfig struct { - // The default logger name for all logs emitted by this server for - // hostnames that are not in the LoggerNames (logger_names) map. - DefaultLoggerName string `json:"default_logger_name,omitempty"` - - // LoggerNames maps request hostnames to a custom logger name. - // For example, a mapping of "example.com" to "example" would - // cause access logs from requests with a Host of example.com - // to be emitted by a logger named "http.log.access.example". - LoggerNames map[string]string `json:"logger_names,omitempty"` - - // By default, all requests to this server will be logged if - // access logging is enabled. This field lists the request - // hosts for which access logging should be disabled. - SkipHosts []string `json:"skip_hosts,omitempty"` - - // If true, requests to any host not appearing in the - // LoggerNames (logger_names) map will not be logged. - SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"` -} - -// wrapLogger wraps logger in a logger named according to user preferences for the given host. -func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, host string) *zap.Logger { - if loggerName := slc.getLoggerName(host); loggerName != "" { - return logger.Named(loggerName) - } - return logger -} - -func (slc ServerLogConfig) getLoggerName(host string) string { - tryHost := func(key string) (string, bool) { - // first try exact match - if loggerName, ok := slc.LoggerNames[key]; ok { - return loggerName, ok - } - // strip port and try again (i.e. Host header of "example.com:1234" should - // match "example.com" if there is no "example.com:1234" in the map) - hostOnly, _, err := net.SplitHostPort(key) - if err != nil { - return "", false - } - loggerName, ok := slc.LoggerNames[hostOnly] - return loggerName, ok - } - - // try the exact hostname first - if loggerName, ok := tryHost(host); ok { - return loggerName - } - - // try matching wildcard domains if other non-specific loggers exist - labels := strings.Split(host, ".") - for i := range labels { - if labels[i] == "" { - continue - } - labels[i] = "*" - wildcardHost := strings.Join(labels, ".") - if loggerName, ok := tryHost(wildcardHost); ok { - return loggerName - } - } - - return slc.DefaultLoggerName -} - -// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can -// be nil, but the handlers will lose response placeholders and access to the server. -func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request { - // set up the context for the request - ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl) - ctx = context.WithValue(ctx, ServerCtxKey, s) - ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{})) - ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{})) - var url2 url.URL // avoid letting this escape to the heap - ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2)) - r = r.WithContext(ctx) - - // once the pointer to the request won't change - // anymore, finish setting up the replacer - addHTTPVarsToReplacer(repl, r, w) - - return r -} - -// errLogValues inspects err and returns the status code -// to use, the error log message, and any extra fields. -// If err is a HandlerError, the returned values will -// have richer information. -func errLogValues(err error) (status int, msg string, fields []zapcore.Field) { - if handlerErr, ok := err.(HandlerError); ok { - status = handlerErr.StatusCode - if handlerErr.Err == nil { - msg = err.Error() - } else { - msg = handlerErr.Err.Error() - } - fields = []zapcore.Field{ - zap.Int("status", handlerErr.StatusCode), - zap.String("err_id", handlerErr.ID), - zap.String("err_trace", handlerErr.Trace), - } - return - } - status = http.StatusInternalServerError - msg = err.Error() - return -} - -// originalRequest returns a partial, shallow copy of -// req, including: req.Method, deep copy of req.URL -// (into the urlCopy parameter, which should be on the -// stack), req.RequestURI, and req.RemoteAddr. Notably, -// headers are not copied. This function is designed to -// be very fast and efficient, and useful primarily for -// read-only/logging purposes. -func originalRequest(req *http.Request, urlCopy *url.URL) http.Request { - cloneURL(req.URL, urlCopy) - return http.Request{ - Method: req.Method, - RemoteAddr: req.RemoteAddr, - RequestURI: req.RequestURI, - URL: urlCopy, - } -} - -// cloneURL makes a copy of r.URL and returns a -// new value that doesn't reference the original. -func cloneURL(from, to *url.URL) { - *to = *from - if from.User != nil { - userInfo := new(url.Userinfo) - *userInfo = *from.User - to.User = userInfo - } -} - -const ( - // commonLogFormat is the common log format. https://en.wikipedia.org/wiki/Common_Log_Format - commonLogFormat = `{http.request.remote.host} ` + commonLogEmptyValue + ` {http.auth.user.id} [{time.now.common_log}] "{http.request.orig_method} {http.request.orig_uri} {http.request.proto}" {http.response.status} {http.response.size}` - - // commonLogEmptyValue is the common empty log value. - commonLogEmptyValue = "-" -) - -// Context keys for HTTP request context values. -const ( - // For referencing the server instance - ServerCtxKey caddy.CtxKey = "server" - - // For the request's variable table - VarsCtxKey caddy.CtxKey = "vars" - - // For a partial copy of the unmodified request that - // originally came into the server's entry handler - OriginalRequestCtxKey caddy.CtxKey = "original_request" -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticerror.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticerror.go deleted file mode 100644 index 914e6c14..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticerror.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "fmt" - "net/http" - "strconv" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" -) - -func init() { - caddy.RegisterModule(StaticError{}) -} - -// StaticError implements a simple handler that returns an error. -// This handler returns an error value, but does not write a response. -// This is useful when you want the server to act as if an error -// occurred; for example, to invoke your custom error handling logic. -// -// Since this handler does not write a response, the error information -// is for use by the server to know how to handle the error. -type StaticError struct { - // The error message. Optional. Default is no error message. - Error string `json:"error,omitempty"` - - // The recommended HTTP status code. Can be either an integer or a - // string if placeholders are needed. Optional. Default is 500. - StatusCode WeakString `json:"status_code,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (StaticError) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.handlers.error", - New: func() caddy.Module { return new(StaticError) }, - } -} - -// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: -// -// error [] | [] { -// message -// } -// -// If there is just one argument (other than the matcher), it is considered -// to be a status code if it's a valid positive integer of 3 digits. -func (e *StaticError) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - args := d.RemainingArgs() - switch len(args) { - case 1: - if len(args[0]) == 3 { - if num, err := strconv.Atoi(args[0]); err == nil && num > 0 { - e.StatusCode = WeakString(args[0]) - break - } - } - e.Error = args[0] - case 2: - e.Error = args[0] - e.StatusCode = WeakString(args[1]) - default: - return d.ArgErr() - } - - for d.NextBlock(0) { - switch d.Val() { - case "message": - if e.Error != "" { - return d.Err("message already specified") - } - if !d.AllArgs(&e.Error) { - return d.ArgErr() - } - default: - return d.Errf("unrecognized subdirective '%s'", d.Val()) - } - } - } - return nil -} - -func (e StaticError) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error { - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - - statusCode := http.StatusInternalServerError - if codeStr := e.StatusCode.String(); codeStr != "" { - intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, "")) - if err != nil { - return Error(http.StatusInternalServerError, err) - } - statusCode = intVal - } - - return Error(statusCode, fmt.Errorf("%s", e.Error)) -} - -// Interface guard -var ( - _ MiddlewareHandler = (*StaticError)(nil) - _ caddyfile.Unmarshaler = (*StaticError)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticresp.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticresp.go deleted file mode 100644 index c587f5ee..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/staticresp.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "fmt" - "net/http" - "strconv" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" -) - -func init() { - caddy.RegisterModule(StaticResponse{}) -} - -// StaticResponse implements a simple responder for static responses. -type StaticResponse struct { - // The HTTP status code to respond with. Can be an integer or, - // if needing to use a placeholder, a string. - StatusCode WeakString `json:"status_code,omitempty"` - - // Header fields to set on the response. - Headers http.Header `json:"headers,omitempty"` - - // The response body. - Body string `json:"body,omitempty"` - - // If true, the server will close the client's connection - // after writing the response. - Close bool `json:"close,omitempty"` - - // Immediately and forcefully closes the connection without - // writing a response. Interrupts any other HTTP streams on - // the same connection. - Abort bool `json:"abort,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (StaticResponse) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.handlers.static_response", - New: func() caddy.Module { return new(StaticResponse) }, - } -} - -// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax: -// -// respond [] | [] { -// body -// close -// } -// -// If there is just one argument (other than the matcher), it is considered -// to be a status code if it's a valid positive integer of 3 digits. -func (s *StaticResponse) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - args := d.RemainingArgs() - switch len(args) { - case 1: - if len(args[0]) == 3 { - if num, err := strconv.Atoi(args[0]); err == nil && num > 0 { - s.StatusCode = WeakString(args[0]) - break - } - } - s.Body = args[0] - case 2: - s.Body = args[0] - s.StatusCode = WeakString(args[1]) - default: - return d.ArgErr() - } - - for d.NextBlock(0) { - switch d.Val() { - case "body": - if s.Body != "" { - return d.Err("body already specified") - } - if !d.AllArgs(&s.Body) { - return d.ArgErr() - } - case "close": - if s.Close { - return d.Err("close already specified") - } - s.Close = true - default: - return d.Errf("unrecognized subdirective '%s'", d.Val()) - } - } - } - return nil -} - -func (s StaticResponse) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error { - // close the connection immediately - if s.Abort { - panic(http.ErrAbortHandler) - } - - // close the connection after responding - if s.Close { - r.Close = true - w.Header().Set("Connection", "close") - } - - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - - // set all headers - for field, vals := range s.Headers { - field = repl.ReplaceAll(field, "") - newVals := make([]string, len(vals)) - for i := range vals { - newVals[i] = repl.ReplaceAll(vals[i], "") - } - w.Header()[field] = newVals - } - - // do not allow Go to sniff the content-type - if w.Header().Get("Content-Type") == "" { - w.Header()["Content-Type"] = nil - } - - // get the status code; if this handler exists in an error route, - // use the recommended status code as the default; otherwise 200 - statusCode := http.StatusOK - if reqErr, ok := r.Context().Value(ErrorCtxKey).(error); ok { - if handlerErr, ok := reqErr.(HandlerError); ok { - if handlerErr.StatusCode > 0 { - statusCode = handlerErr.StatusCode - } - } - } - if codeStr := s.StatusCode.String(); codeStr != "" { - intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, "")) - if err != nil { - return Error(http.StatusInternalServerError, err) - } - statusCode = intVal - } - - // write headers - w.WriteHeader(statusCode) - - // write response body - if s.Body != "" { - fmt.Fprint(w, repl.ReplaceKnown(s.Body, "")) - } - - return nil -} - -// Interface guards -var ( - _ MiddlewareHandler = (*StaticResponse)(nil) - _ caddyfile.Unmarshaler = (*StaticResponse)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/subroute.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/subroute.go deleted file mode 100644 index 2e80d88d..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/subroute.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "fmt" - "net/http" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - caddy.RegisterModule(Subroute{}) -} - -// Subroute implements a handler that compiles and executes routes. -// This is useful for a batch of routes that all inherit the same -// matchers, or for multiple routes that should be treated as a -// single route. -// -// You can also use subroutes to handle errors from its handlers. -// First the primary routes will be executed, and if they return an -// error, the errors routes will be executed; in that case, an error -// is only returned to the entry point at the server if there is an -// additional error returned from the errors routes. -type Subroute struct { - // The primary list of routes to compile and execute. - Routes RouteList `json:"routes,omitempty"` - - // If the primary routes return an error, error handling - // can be promoted to this configuration instead. - Errors *HTTPErrorConfig `json:"errors,omitempty"` -} - -// CaddyModule returns the Caddy module information. -func (Subroute) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.handlers.subroute", - New: func() caddy.Module { return new(Subroute) }, - } -} - -// Provision sets up subrouting. -func (sr *Subroute) Provision(ctx caddy.Context) error { - if sr.Routes != nil { - err := sr.Routes.Provision(ctx) - if err != nil { - return fmt.Errorf("setting up subroutes: %v", err) - } - if sr.Errors != nil { - err := sr.Errors.Routes.Provision(ctx) - if err != nil { - return fmt.Errorf("setting up error subroutes: %v", err) - } - } - } - return nil -} - -func (sr *Subroute) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error { - subroute := sr.Routes.Compile(next) - err := subroute.ServeHTTP(w, r) - if err != nil && sr.Errors != nil { - r = sr.Errors.WithError(r, err) - errRoute := sr.Errors.Routes.Compile(next) - return errRoute.ServeHTTP(w, r) - } - return err -} - -// Interface guards -var ( - _ caddy.Provisioner = (*Subroute)(nil) - _ MiddlewareHandler = (*Subroute)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/vars.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/vars.go deleted file mode 100644 index 479ef0a7..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddyhttp/vars.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - "context" - "fmt" - "net/http" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" -) - -func init() { - caddy.RegisterModule(VarsMiddleware{}) - caddy.RegisterModule(VarsMatcher{}) - caddy.RegisterModule(MatchVarsRE{}) -} - -// VarsMiddleware is an HTTP middleware which sets variables -// in the context, mainly for use by placeholders. The -// placeholders have the form: `{http.vars.variable_name}` -type VarsMiddleware map[string]string - -// CaddyModule returns the Caddy module information. -func (VarsMiddleware) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.handlers.vars", - New: func() caddy.Module { return new(VarsMiddleware) }, - } -} - -func (t VarsMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error { - vars := r.Context().Value(VarsCtxKey).(map[string]interface{}) - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - for k, v := range t { - keyExpanded := repl.ReplaceAll(k, "") - valExpanded := repl.ReplaceAll(v, "") - vars[keyExpanded] = valExpanded - } - return next.ServeHTTP(w, r) -} - -// VarsMatcher is an HTTP request matcher which can match -// requests based on variables in the context. -type VarsMatcher map[string]string - -// CaddyModule returns the Caddy module information. -func (VarsMatcher) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.vars", - New: func() caddy.Module { return new(VarsMatcher) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *VarsMatcher) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - if *m == nil { - *m = make(map[string]string) - } - for d.Next() { - var field, val string - if !d.Args(&field, &val) { - return d.Errf("malformed vars matcher: expected both field and value") - } - (*m)[field] = val - if d.NextBlock(0) { - return d.Err("malformed vars matcher: blocks are not supported") - } - } - return nil -} - -// Match matches a request based on variables in the context. -func (m VarsMatcher) Match(r *http.Request) bool { - vars := r.Context().Value(VarsCtxKey).(map[string]interface{}) - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - for k, v := range m { - keyExpanded := repl.ReplaceAll(k, "") - valExpanded := repl.ReplaceAll(v, "") - var varStr string - switch vv := vars[keyExpanded].(type) { - case string: - varStr = vv - case fmt.Stringer: - varStr = vv.String() - case error: - varStr = vv.Error() - default: - varStr = fmt.Sprintf("%v", vv) - } - if varStr != valExpanded { - return false - } - } - return true -} - -// MatchVarsRE matches the value of the context variables by a given regular expression. -// -// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}` -// where `name` is the regular expression's name, and `capture_group` is either -// the named or positional capture group from the expression itself. If no name -// is given, then the placeholder omits the name: `{http.regexp.capture_group}` -// (potentially leading to collisions). -type MatchVarsRE map[string]*MatchRegexp - -// CaddyModule returns the Caddy module information. -func (MatchVarsRE) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "http.matchers.vars_regexp", - New: func() caddy.Module { return new(MatchVarsRE) }, - } -} - -// UnmarshalCaddyfile implements caddyfile.Unmarshaler. -func (m *MatchVarsRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - if *m == nil { - *m = make(map[string]*MatchRegexp) - } - for d.Next() { - var first, second, third string - if !d.Args(&first, &second) { - return d.ArgErr() - } - - var name, field, val string - if d.Args(&third) { - name = first - field = second - val = third - } else { - field = first - val = second - } - - (*m)[field] = &MatchRegexp{Pattern: val, Name: name} - if d.NextBlock(0) { - return d.Err("malformed vars_regexp matcher: blocks are not supported") - } - } - return nil -} - -// Provision compiles m's regular expressions. -func (m MatchVarsRE) Provision(ctx caddy.Context) error { - for _, rm := range m { - err := rm.Provision(ctx) - if err != nil { - return err - } - } - return nil -} - -// Match returns true if r matches m. -func (m MatchVarsRE) Match(r *http.Request) bool { - vars := r.Context().Value(VarsCtxKey).(map[string]interface{}) - repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer) - for k, rm := range m { - var varStr string - switch vv := vars[k].(type) { - case string: - varStr = vv - case fmt.Stringer: - varStr = vv.String() - case error: - varStr = vv.Error() - default: - varStr = fmt.Sprintf("%v", vv) - } - valExpanded := repl.ReplaceAll(varStr, "") - if match := rm.Match(valExpanded, repl); match { - return match - } - - replacedVal := repl.ReplaceAll(k, "") - if match := rm.Match(replacedVal, repl); match { - return match - } - } - return false -} - -// Validate validates m's regular expressions. -func (m MatchVarsRE) Validate() error { - for _, rm := range m { - err := rm.Validate() - if err != nil { - return err - } - } - return nil -} - -// GetVar gets a value out of the context's variable table by key. -// If the key does not exist, the return value will be nil. -func GetVar(ctx context.Context, key string) interface{} { - varMap, ok := ctx.Value(VarsCtxKey).(map[string]interface{}) - if !ok { - return nil - } - return varMap[key] -} - -// SetVar sets a value in the context's variable table with -// the given key. It overwrites any previous value with the -// same key. -func SetVar(ctx context.Context, key string, value interface{}) { - varMap, ok := ctx.Value(VarsCtxKey).(map[string]interface{}) - if !ok { - return - } - varMap[key] = value -} - -// Interface guards -var ( - _ MiddlewareHandler = (*VarsMiddleware)(nil) - _ RequestMatcher = (*VarsMatcher)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/ca.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/ca.go deleted file mode 100644 index e3102fba..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/ca.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddypki - -import ( - "crypto" - "crypto/x509" - "encoding/json" - "fmt" - "path" - "sync" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/certmagic" - "github.com/smallstep/certificates/authority" - "github.com/smallstep/certificates/db" - "github.com/smallstep/truststore" - "go.uber.org/zap" -) - -// CA describes a certificate authority, which consists of -// root/signing certificates and various settings pertaining -// to the issuance of certificates and trusting them. -type CA struct { - // The user-facing name of the certificate authority. - Name string `json:"name,omitempty"` - - // The name to put in the CommonName field of the - // root certificate. - RootCommonName string `json:"root_common_name,omitempty"` - - // The name to put in the CommonName field of the - // intermediate certificates. - IntermediateCommonName string `json:"intermediate_common_name,omitempty"` - - // Whether Caddy will attempt to install the CA's root - // into the system trust store, as well as into Java - // and Mozilla Firefox trust stores. Default: true. - InstallTrust *bool `json:"install_trust,omitempty"` - - // The root certificate to use; if null, one will be generated. - Root *KeyPair `json:"root,omitempty"` - - // The intermediate (signing) certificate; if null, one will be generated. - Intermediate *KeyPair `json:"intermediate,omitempty"` - - // Optionally configure a separate storage module associated with this - // issuer, instead of using Caddy's global/default-configured storage. - // This can be useful if you want to keep your signing keys in a - // separate location from your leaf certificates. - StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` - - // The unique config-facing ID of the certificate authority. - // Since the ID is set in JSON config via object key, this - // field is exported only for purposes of config generation - // and module provisioning. - ID string `json:"-"` - - storage certmagic.Storage - root, inter *x509.Certificate - interKey interface{} // TODO: should we just store these as crypto.Signer? - mu *sync.RWMutex - - rootCertPath string // mainly used for logging purposes if trusting - log *zap.Logger -} - -// Provision sets up the CA. -func (ca *CA) Provision(ctx caddy.Context, id string, log *zap.Logger) error { - ca.mu = new(sync.RWMutex) - ca.log = log.Named("ca." + id) - - if id == "" { - return fmt.Errorf("CA ID is required (use 'local' for the default CA)") - } - ca.mu.Lock() - ca.ID = id - ca.mu.Unlock() - - if ca.StorageRaw != nil { - val, err := ctx.LoadModule(ca, "StorageRaw") - if err != nil { - return fmt.Errorf("loading storage module: %v", err) - } - cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage() - if err != nil { - return fmt.Errorf("creating storage configuration: %v", err) - } - ca.storage = cmStorage - } - if ca.storage == nil { - ca.storage = ctx.Storage() - } - - if ca.Name == "" { - ca.Name = defaultCAName - } - if ca.RootCommonName == "" { - ca.RootCommonName = defaultRootCommonName - } - if ca.IntermediateCommonName == "" { - ca.IntermediateCommonName = defaultIntermediateCommonName - } - - // load the certs and key that will be used for signing - var rootCert, interCert *x509.Certificate - var rootKey, interKey interface{} - var err error - if ca.Root != nil { - if ca.Root.Format == "" || ca.Root.Format == "pem_file" { - ca.rootCertPath = ca.Root.Certificate - } - rootCert, rootKey, err = ca.Root.Load() - } else { - ca.rootCertPath = "storage:" + ca.storageKeyRootCert() - rootCert, rootKey, err = ca.loadOrGenRoot() - } - if err != nil { - return err - } - if ca.Intermediate != nil { - interCert, interKey, err = ca.Intermediate.Load() - } else { - interCert, interKey, err = ca.loadOrGenIntermediate(rootCert, rootKey) - } - if err != nil { - return err - } - - ca.mu.Lock() - ca.root, ca.inter, ca.interKey = rootCert, interCert, interKey - ca.mu.Unlock() - - return nil -} - -// RootCertificate returns the CA's root certificate (public key). -func (ca CA) RootCertificate() *x509.Certificate { - ca.mu.RLock() - defer ca.mu.RUnlock() - return ca.root -} - -// RootKey returns the CA's root private key. Since the root key is -// not cached in memory long-term, it needs to be loaded from storage, -// which could yield an error. -func (ca CA) RootKey() (interface{}, error) { - _, rootKey, err := ca.loadOrGenRoot() - return rootKey, err -} - -// IntermediateCertificate returns the CA's intermediate -// certificate (public key). -func (ca CA) IntermediateCertificate() *x509.Certificate { - ca.mu.RLock() - defer ca.mu.RUnlock() - return ca.inter -} - -// IntermediateKey returns the CA's intermediate private key. -func (ca CA) IntermediateKey() interface{} { - ca.mu.RLock() - defer ca.mu.RUnlock() - return ca.interKey -} - -// NewAuthority returns a new Smallstep-powered signing authority for this CA. -func (ca CA) NewAuthority(authorityConfig AuthorityConfig) (*authority.Authority, error) { - // get the root certificate and the issuer cert+key - rootCert := ca.RootCertificate() - var issuerCert *x509.Certificate - var issuerKey interface{} - if authorityConfig.SignWithRoot { - issuerCert = rootCert - var err error - issuerKey, err = ca.RootKey() - if err != nil { - return nil, fmt.Errorf("loading signing key: %v", err) - } - } else { - issuerCert = ca.IntermediateCertificate() - issuerKey = ca.IntermediateKey() - } - - opts := []authority.Option{ - authority.WithConfig(&authority.Config{ - AuthorityConfig: authorityConfig.AuthConfig, - }), - authority.WithX509Signer(issuerCert, issuerKey.(crypto.Signer)), - authority.WithX509RootCerts(rootCert), - } - // Add a database if we have one - if authorityConfig.DB != nil { - opts = append(opts, authority.WithDatabase(*authorityConfig.DB)) - } - auth, err := authority.NewEmbedded(opts...) - if err != nil { - return nil, fmt.Errorf("initializing certificate authority: %v", err) - } - - return auth, nil -} - -func (ca CA) loadOrGenRoot() (rootCert *x509.Certificate, rootKey interface{}, err error) { - rootCertPEM, err := ca.storage.Load(ca.storageKeyRootCert()) - if err != nil { - if _, ok := err.(certmagic.ErrNotExist); !ok { - return nil, nil, fmt.Errorf("loading root cert: %v", err) - } - - // TODO: should we require that all or none of the assets are required before overwriting anything? - rootCert, rootKey, err = ca.genRoot() - if err != nil { - return nil, nil, fmt.Errorf("generating root: %v", err) - } - } - - if rootCert == nil { - rootCert, err = pemDecodeSingleCert(rootCertPEM) - if err != nil { - return nil, nil, fmt.Errorf("parsing root certificate PEM: %v", err) - } - } - if rootKey == nil { - rootKeyPEM, err := ca.storage.Load(ca.storageKeyRootKey()) - if err != nil { - return nil, nil, fmt.Errorf("loading root key: %v", err) - } - rootKey, err = pemDecodePrivateKey(rootKeyPEM) - if err != nil { - return nil, nil, fmt.Errorf("decoding root key: %v", err) - } - } - - return rootCert, rootKey, nil -} - -func (ca CA) genRoot() (rootCert *x509.Certificate, rootKey interface{}, err error) { - repl := ca.newReplacer() - - rootCert, rootKey, err = generateRoot(repl.ReplaceAll(ca.RootCommonName, "")) - if err != nil { - return nil, nil, fmt.Errorf("generating CA root: %v", err) - } - rootCertPEM, err := pemEncodeCert(rootCert.Raw) - if err != nil { - return nil, nil, fmt.Errorf("encoding root certificate: %v", err) - } - err = ca.storage.Store(ca.storageKeyRootCert(), rootCertPEM) - if err != nil { - return nil, nil, fmt.Errorf("saving root certificate: %v", err) - } - rootKeyPEM, err := pemEncodePrivateKey(rootKey) - if err != nil { - return nil, nil, fmt.Errorf("encoding root key: %v", err) - } - err = ca.storage.Store(ca.storageKeyRootKey(), rootKeyPEM) - if err != nil { - return nil, nil, fmt.Errorf("saving root key: %v", err) - } - - return rootCert, rootKey, nil -} - -func (ca CA) loadOrGenIntermediate(rootCert *x509.Certificate, rootKey interface{}) (interCert *x509.Certificate, interKey interface{}, err error) { - interCertPEM, err := ca.storage.Load(ca.storageKeyIntermediateCert()) - if err != nil { - if _, ok := err.(certmagic.ErrNotExist); !ok { - return nil, nil, fmt.Errorf("loading intermediate cert: %v", err) - } - - // TODO: should we require that all or none of the assets are required before overwriting anything? - interCert, interKey, err = ca.genIntermediate(rootCert, rootKey) - if err != nil { - return nil, nil, fmt.Errorf("generating new intermediate cert: %v", err) - } - } - - if interCert == nil { - interCert, err = pemDecodeSingleCert(interCertPEM) - if err != nil { - return nil, nil, fmt.Errorf("decoding intermediate certificate PEM: %v", err) - } - } - - if interKey == nil { - interKeyPEM, err := ca.storage.Load(ca.storageKeyIntermediateKey()) - if err != nil { - return nil, nil, fmt.Errorf("loading intermediate key: %v", err) - } - interKey, err = pemDecodePrivateKey(interKeyPEM) - if err != nil { - return nil, nil, fmt.Errorf("decoding intermediate key: %v", err) - } - } - - return interCert, interKey, nil -} - -func (ca CA) genIntermediate(rootCert *x509.Certificate, rootKey interface{}) (interCert *x509.Certificate, interKey interface{}, err error) { - repl := ca.newReplacer() - - interCert, interKey, err = generateIntermediate(repl.ReplaceAll(ca.IntermediateCommonName, ""), rootCert, rootKey) - if err != nil { - return nil, nil, fmt.Errorf("generating CA intermediate: %v", err) - } - interCertPEM, err := pemEncodeCert(interCert.Raw) - if err != nil { - return nil, nil, fmt.Errorf("encoding intermediate certificate: %v", err) - } - err = ca.storage.Store(ca.storageKeyIntermediateCert(), interCertPEM) - if err != nil { - return nil, nil, fmt.Errorf("saving intermediate certificate: %v", err) - } - interKeyPEM, err := pemEncodePrivateKey(interKey) - if err != nil { - return nil, nil, fmt.Errorf("encoding intermediate key: %v", err) - } - err = ca.storage.Store(ca.storageKeyIntermediateKey(), interKeyPEM) - if err != nil { - return nil, nil, fmt.Errorf("saving intermediate key: %v", err) - } - - return interCert, interKey, nil -} - -func (ca CA) storageKeyCAPrefix() string { - return path.Join("pki", "authorities", certmagic.StorageKeys.Safe(ca.ID)) -} -func (ca CA) storageKeyRootCert() string { - return path.Join(ca.storageKeyCAPrefix(), "root.crt") -} -func (ca CA) storageKeyRootKey() string { - return path.Join(ca.storageKeyCAPrefix(), "root.key") -} -func (ca CA) storageKeyIntermediateCert() string { - return path.Join(ca.storageKeyCAPrefix(), "intermediate.crt") -} -func (ca CA) storageKeyIntermediateKey() string { - return path.Join(ca.storageKeyCAPrefix(), "intermediate.key") -} - -func (ca CA) newReplacer() *caddy.Replacer { - repl := caddy.NewReplacer() - repl.Set("pki.ca.name", ca.Name) - return repl -} - -// installRoot installs this CA's root certificate into the -// local trust store(s) if it is not already trusted. The CA -// must already be provisioned. -func (ca CA) installRoot() error { - // avoid password prompt if already trusted - if trusted(ca.root) { - ca.log.Info("root certificate is already trusted by system", - zap.String("path", ca.rootCertPath)) - return nil - } - - ca.log.Warn("installing root certificate (you might be prompted for password)", - zap.String("path", ca.rootCertPath)) - - return truststore.Install(ca.root, - truststore.WithDebug(), - truststore.WithFirefox(), - truststore.WithJava(), - ) -} - -// AuthorityConfig is used to help a CA configure -// the underlying signing authority. -type AuthorityConfig struct { - SignWithRoot bool - - // TODO: should we just embed the underlying authority.Config struct type? - DB *db.AuthDB - AuthConfig *authority.AuthConfig -} - -const ( - // DefaultCAID is the default CA ID. - DefaultCAID = "local" - - defaultCAName = "Caddy Local Authority" - defaultRootCommonName = "{pki.ca.name} - {time.now.year} ECC Root" - defaultIntermediateCommonName = "{pki.ca.name} - ECC Intermediate" - - defaultRootLifetime = 24 * time.Hour * 30 * 12 * 10 - defaultIntermediateLifetime = 24 * time.Hour * 7 -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/certificates.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/certificates.go deleted file mode 100644 index a55c1658..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/certificates.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddypki - -import ( - "crypto/x509" - "time" - - "github.com/smallstep/cli/crypto/x509util" -) - -func generateRoot(commonName string) (rootCrt *x509.Certificate, privateKey interface{}, err error) { - rootProfile, err := x509util.NewRootProfile(commonName) - if err != nil { - return - } - rootProfile.Subject().NotAfter = time.Now().Add(defaultRootLifetime) // TODO: make configurable - return newCert(rootProfile) -} - -func generateIntermediate(commonName string, rootCrt *x509.Certificate, rootKey interface{}) (cert *x509.Certificate, privateKey interface{}, err error) { - interProfile, err := x509util.NewIntermediateProfile(commonName, rootCrt, rootKey) - if err != nil { - return - } - interProfile.Subject().NotAfter = time.Now().Add(defaultIntermediateLifetime) // TODO: make configurable - return newCert(interProfile) -} - -func newCert(profile x509util.Profile) (cert *x509.Certificate, privateKey interface{}, err error) { - certBytes, err := profile.CreateCertificate() - if err != nil { - return - } - privateKey = profile.SubjectPrivateKey() - cert, err = x509.ParseCertificate(certBytes) - return -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/command.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/command.go deleted file mode 100644 index 34daefaa..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/command.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddypki - -import ( - "context" - "flag" - "fmt" - "os" - "path/filepath" - - "github.com/caddyserver/caddy/v2" - caddycmd "github.com/caddyserver/caddy/v2/cmd" - "github.com/smallstep/truststore" -) - -func init() { - caddycmd.RegisterCommand(caddycmd.Command{ - Name: "trust", - Func: cmdTrust, - Short: "Installs a CA certificate into local trust stores", - Long: ` -Adds a root certificate into the local trust stores. Intended for -development environments only. - -Since Caddy will install its root certificates into the local trust -stores automatically when they are first generated, this command is -only necessary if you need to pre-install the certificates before -using them; for example, if you have elevated privileges at one -point but not later, you will want to use this command so that a -password prompt is not required later. - -This command installs the root certificate only for Caddy's -default CA.`, - }) - - caddycmd.RegisterCommand(caddycmd.Command{ - Name: "untrust", - Func: cmdUntrust, - Usage: "[--ca | --cert ]", - Short: "Untrusts a locally-trusted CA certificate", - Long: ` -Untrusts a root certificate from the local trust store(s). Intended -for development environments only. - -This command uninstalls trust; it does not necessarily delete the -root certificate from trust stores entirely. Thus, repeatedly -trusting and untrusting new certificates can fill up trust databases. - -This command does not delete or modify certificate files. - -Specify which certificate to untrust either by the ID of its CA with -the --ca flag, or the direct path to the certificate file with the ---cert flag. If the --ca flag is used, only the default storage paths -are assumed (i.e. using --ca flag with custom storage backends or file -paths will not work). - -If no flags are specified, --ca=local is assumed.`, - Flags: func() *flag.FlagSet { - fs := flag.NewFlagSet("untrust", flag.ExitOnError) - fs.String("ca", "", "The ID of the CA to untrust") - fs.String("cert", "", "The path to the CA certificate to untrust") - return fs - }(), - }) -} - -func cmdTrust(fs caddycmd.Flags) (int, error) { - // we have to create a sort of dummy context so that - // the CA can provision itself... - ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()}) - defer cancel() - - // provision the CA, which generates and stores a root - // certificate if one doesn't already exist in storage - ca := CA{ - storage: caddy.DefaultStorage, - } - err := ca.Provision(ctx, DefaultCAID, caddy.Log()) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - err = ca.installRoot() - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - return caddy.ExitCodeSuccess, nil -} - -func cmdUntrust(fs caddycmd.Flags) (int, error) { - ca := fs.String("ca") - cert := fs.String("cert") - - if ca != "" && cert != "" { - return caddy.ExitCodeFailedStartup, fmt.Errorf("conflicting command line arguments") - } - if ca == "" && cert == "" { - ca = DefaultCAID - } - if ca != "" { - cert = filepath.Join(caddy.AppDataDir(), "pki", "authorities", ca, "root.crt") - } - - // sanity check, make sure cert file exists first - _, err := os.Stat(cert) - if err != nil { - return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing certificate file: %v", err) - } - - err = truststore.UninstallFile(cert, - truststore.WithDebug(), - truststore.WithFirefox(), - truststore.WithJava()) - if err != nil { - return caddy.ExitCodeFailedStartup, err - } - - return caddy.ExitCodeSuccess, nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/crypto.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/crypto.go deleted file mode 100644 index e1a0e354..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/crypto.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddypki - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "strings" -) - -func pemDecodeSingleCert(pemDER []byte) (*x509.Certificate, error) { - pemBlock, remaining := pem.Decode(pemDER) - if pemBlock == nil { - return nil, fmt.Errorf("no PEM block found") - } - if len(remaining) > 0 { - return nil, fmt.Errorf("input contained more than a single PEM block") - } - if pemBlock.Type != "CERTIFICATE" { - return nil, fmt.Errorf("expected PEM block type to be CERTIFICATE, but got '%s'", pemBlock.Type) - } - return x509.ParseCertificate(pemBlock.Bytes) -} - -func pemEncodeCert(der []byte) ([]byte, error) { - return pemEncode("CERTIFICATE", der) -} - -// pemEncodePrivateKey marshals a EC or RSA private key into a PEM-encoded array of bytes. -// TODO: this is the same thing as in certmagic. Should we reuse that code somehow? It's unexported. -func pemEncodePrivateKey(key crypto.PrivateKey) ([]byte, error) { - var pemType string - var keyBytes []byte - switch key := key.(type) { - case *ecdsa.PrivateKey: - var err error - pemType = "EC" - keyBytes, err = x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - case *rsa.PrivateKey: - pemType = "RSA" - keyBytes = x509.MarshalPKCS1PrivateKey(key) - case *ed25519.PrivateKey: - var err error - pemType = "ED25519" - keyBytes, err = x509.MarshalPKCS8PrivateKey(key) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unsupported key type: %T", key) - } - return pemEncode(pemType+" PRIVATE KEY", keyBytes) -} - -// pemDecodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes. -// Borrowed from Go standard library, to handle various private key and PEM block types. -// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308 -// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238) -// TODO: this is the same thing as in certmagic. Should we reuse that code somehow? It's unexported. -func pemDecodePrivateKey(keyPEMBytes []byte) (crypto.PrivateKey, error) { - keyBlockDER, _ := pem.Decode(keyPEMBytes) - - if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") { - return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type) - } - - if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil { - return key, nil - } - - if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil { - switch key := key.(type) { - case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: - return key, nil - default: - return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key) - } - } - - if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil { - return key, nil - } - - return nil, fmt.Errorf("unknown private key type") -} - -func pemEncode(blockType string, b []byte) ([]byte, error) { - var buf bytes.Buffer - err := pem.Encode(&buf, &pem.Block{Type: blockType, Bytes: b}) - return buf.Bytes(), err -} - -func trusted(cert *x509.Certificate) bool { - chains, err := cert.Verify(x509.VerifyOptions{}) - return len(chains) > 0 && err == nil -} - -// KeyPair represents a public-private key pair, where the -// public key is also called a certificate. -type KeyPair struct { - // The certificate. By default, this should be the path to - // a PEM file unless format is something else. - Certificate string `json:"certificate,omitempty"` - - // The private key. By default, this should be the path to - // a PEM file unless format is something else. - PrivateKey string `json:"private_key,omitempty"` - - // The format in which the certificate and private - // key are provided. Default: pem_file - Format string `json:"format,omitempty"` -} - -// Load loads the certificate and key. -func (kp KeyPair) Load() (*x509.Certificate, interface{}, error) { - switch kp.Format { - case "", "pem_file": - certData, err := ioutil.ReadFile(kp.Certificate) - if err != nil { - return nil, nil, err - } - keyData, err := ioutil.ReadFile(kp.PrivateKey) - if err != nil { - return nil, nil, err - } - - cert, err := pemDecodeSingleCert(certData) - if err != nil { - return nil, nil, err - } - key, err := pemDecodePrivateKey(keyData) - if err != nil { - return nil, nil, err - } - - return cert, key, nil - - default: - return nil, nil, fmt.Errorf("unsupported format: %s", kp.Format) - } -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/maintain.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/maintain.go deleted file mode 100644 index 31e453ff..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/maintain.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddypki - -import ( - "crypto/x509" - "fmt" - "log" - "runtime/debug" - "time" - - "go.uber.org/zap" -) - -func (p *PKI) maintenance() { - defer func() { - if err := recover(); err != nil { - log.Printf("[PANIC] PKI maintenance: %v\n%s", err, debug.Stack()) - } - }() - - ticker := time.NewTicker(10 * time.Minute) // TODO: make configurable - defer ticker.Stop() - - for { - select { - case <-ticker.C: - p.renewCerts() - case <-p.ctx.Done(): - return - } - } -} - -func (p *PKI) renewCerts() { - for _, ca := range p.CAs { - err := p.renewCertsForCA(ca) - if err != nil { - p.log.Error("renewing intermediate certificates", - zap.Error(err), - zap.String("ca", ca.ID)) - } - } -} - -func (p *PKI) renewCertsForCA(ca *CA) error { - ca.mu.Lock() - defer ca.mu.Unlock() - - log := p.log.With(zap.String("ca", ca.ID)) - - // only maintain the root if it's not manually provided in the config - if ca.Root == nil { - if needsRenewal(ca.root) { - // TODO: implement root renewal (use same key) - log.Warn("root certificate expiring soon (FIXME: ROOT RENEWAL NOT YET IMPLEMENTED)", - zap.Duration("time_remaining", time.Until(ca.inter.NotAfter)), - ) - } - } - - // only maintain the intermediate if it's not manually provided in the config - if ca.Intermediate == nil { - if needsRenewal(ca.inter) { - log.Info("intermediate expires soon; renewing", - zap.Duration("time_remaining", time.Until(ca.inter.NotAfter)), - ) - - rootCert, rootKey, err := ca.loadOrGenRoot() - if err != nil { - return fmt.Errorf("loading root key: %v", err) - } - interCert, interKey, err := ca.genIntermediate(rootCert, rootKey) - if err != nil { - return fmt.Errorf("generating new certificate: %v", err) - } - ca.inter, ca.interKey = interCert, interKey - - log.Info("renewed intermediate", - zap.Time("new_expiration", ca.inter.NotAfter), - ) - } - } - - return nil -} - -func needsRenewal(cert *x509.Certificate) bool { - lifetime := cert.NotAfter.Sub(cert.NotBefore) - renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio) - renewalWindowStart := cert.NotAfter.Add(-renewalWindow) - return time.Now().After(renewalWindowStart) -} - -const renewalWindowRatio = 0.2 // TODO: make configurable diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/pki.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/pki.go deleted file mode 100644 index b6f08b18..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddypki/pki.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddypki - -import ( - "fmt" - - "github.com/caddyserver/caddy/v2" - "go.uber.org/zap" -) - -func init() { - caddy.RegisterModule(PKI{}) -} - -// PKI provides Public Key Infrastructure facilities for Caddy. -type PKI struct { - // The CAs to manage. Each CA is keyed by an ID that is used - // to uniquely identify it from other CAs. The default CA ID - // is "local". - CAs map[string]*CA `json:"certificate_authorities,omitempty"` - - ctx caddy.Context - log *zap.Logger -} - -// CaddyModule returns the Caddy module information. -func (PKI) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "pki", - New: func() caddy.Module { return new(PKI) }, - } -} - -// Provision sets up the configuration for the PKI app. -func (p *PKI) Provision(ctx caddy.Context) error { - p.ctx = ctx - p.log = ctx.Logger(p) - - // if this app is initialized at all, ensure there's at - // least a default CA that can be used: the standard CA - // which is used implicitly for signing local-use certs - if p.CAs == nil { - p.CAs = make(map[string]*CA) - } - if _, ok := p.CAs[DefaultCAID]; !ok { - p.CAs[DefaultCAID] = new(CA) - } - - for caID, ca := range p.CAs { - err := ca.Provision(ctx, caID, p.log) - if err != nil { - return fmt.Errorf("provisioning CA '%s': %v", caID, err) - } - } - - return nil -} - -// Start starts the PKI app. -func (p *PKI) Start() error { - // install roots to trust store, if not disabled - for _, ca := range p.CAs { - if ca.InstallTrust != nil && !*ca.InstallTrust { - ca.log.Warn("root certificate trust store installation disabled; unconfigured clients may show warnings", - zap.String("path", ca.rootCertPath)) - continue - } - - if err := ca.installRoot(); err != nil { - // could be some system dependencies that are missing; - // shouldn't totally prevent startup, but we should log it - ca.log.Error("failed to install root certificate", - zap.Error(err), - zap.String("certificate_file", ca.rootCertPath)) - } - } - - // see if root/intermediates need renewal... - p.renewCerts() - - // ...and keep them renewed - go p.maintenance() - - return nil -} - -// Stop stops the PKI app. -func (p *PKI) Stop() error { - return nil -} - -// Interface guards -var ( - _ caddy.Provisioner = (*PKI)(nil) - _ caddy.App = (*PKI)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/acmeissuer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/acmeissuer.go deleted file mode 100644 index b60e560e..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/acmeissuer.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "context" - "crypto/x509" - "fmt" - "io/ioutil" - "net/url" - "strconv" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/certmagic" - "github.com/mholt/acmez" - "github.com/mholt/acmez/acme" - "go.uber.org/zap" -) - -func init() { - caddy.RegisterModule(ACMEIssuer{}) -} - -// ACMEIssuer makes an ACME manager -// for managing certificates using ACME. -// -// TODO: support multiple ACME endpoints (probably -// requires an array of these structs) - caddy would -// also have to load certs from the backup CAs if the -// first one is expired... -type ACMEIssuer struct { - // The URL to the CA's ACME directory endpoint. - CA string `json:"ca,omitempty"` - - // The URL to the test CA's ACME directory endpoint. - // This endpoint is only used during retries if there - // is a failure using the primary CA. - TestCA string `json:"test_ca,omitempty"` - - // Your email address, so the CA can contact you if necessary. - // Not required, but strongly recommended to provide one so - // you can be reached if there is a problem. Your email is - // not sent to any Caddy mothership or used for any purpose - // other than ACME transactions. - Email string `json:"email,omitempty"` - - // If you have an existing account with the ACME server, put - // the private key here in PEM format. The ACME client will - // look up your account information with this key first before - // trying to create a new one. You can use placeholders here, - // for example if you have it in an environment variable. - AccountKey string `json:"account_key,omitempty"` - - // If using an ACME CA that requires an external account - // binding, specify the CA-provided credentials here. - ExternalAccount *acme.EAB `json:"external_account,omitempty"` - - // Time to wait before timing out an ACME operation. - ACMETimeout caddy.Duration `json:"acme_timeout,omitempty"` - - // Configures the various ACME challenge types. - Challenges *ChallengesConfig `json:"challenges,omitempty"` - - // An array of files of CA certificates to accept when connecting to the - // ACME CA. Generally, you should only use this if the ACME CA endpoint - // is internal or for development/testing purposes. - TrustedRootsPEMFiles []string `json:"trusted_roots_pem_files,omitempty"` - - // Preferences for selecting alternate certificate chains, if offered - // by the CA. By default, the first offered chain will be selected. - // If configured, the chains may be sorted and the first matching chain - // will be selected. - PreferredChains *ChainPreference `json:"preferred_chains,omitempty"` - - rootPool *x509.CertPool - template certmagic.ACMEManager - magic *certmagic.Config - logger *zap.Logger -} - -// CaddyModule returns the Caddy module information. -func (ACMEIssuer) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.issuance.acme", - New: func() caddy.Module { return new(ACMEIssuer) }, - } -} - -// Provision sets up iss. -func (iss *ACMEIssuer) Provision(ctx caddy.Context) error { - iss.logger = ctx.Logger(iss) - - repl := caddy.NewReplacer() - - // expand email address, if non-empty - if iss.Email != "" { - email, err := repl.ReplaceOrErr(iss.Email, true, true) - if err != nil { - return fmt.Errorf("expanding email address '%s': %v", iss.Email, err) - } - iss.Email = email - } - - // expand account key, if non-empty - if iss.AccountKey != "" { - accountKey, err := repl.ReplaceOrErr(iss.AccountKey, true, true) - if err != nil { - return fmt.Errorf("expanding account key PEM '%s': %v", iss.AccountKey, err) - } - iss.AccountKey = accountKey - } - - // DNS providers - if iss.Challenges != nil && iss.Challenges.DNS != nil && iss.Challenges.DNS.ProviderRaw != nil { - val, err := ctx.LoadModule(iss.Challenges.DNS, "ProviderRaw") - if err != nil { - return fmt.Errorf("loading DNS provider module: %v", err) - } - - if deprecatedProvider, ok := val.(acmez.Solver); ok { - // TODO: For a temporary amount of time, we are allowing the use of DNS - // providers from go-acme/lego since there are so many providers implemented - // using that API -- they are adapted as an all-in-one Caddy module in this - // repository: https://github.com/caddy-dns/lego-deprecated - the module is a - // acmez.Solver type, so we use it directly. The user must set environment - // variables to configure it. Remove this shim once a sufficient number of - // DNS providers are implemented for the libdns APIs instead. - iss.Challenges.DNS.solver = deprecatedProvider - } else { - iss.Challenges.DNS.solver = &certmagic.DNS01Solver{ - DNSProvider: val.(certmagic.ACMEDNSProvider), - TTL: time.Duration(iss.Challenges.DNS.TTL), - PropagationTimeout: time.Duration(iss.Challenges.DNS.PropagationTimeout), - Resolvers: iss.Challenges.DNS.Resolvers, - } - } - } - - // add any custom CAs to trust store - if len(iss.TrustedRootsPEMFiles) > 0 { - iss.rootPool = x509.NewCertPool() - for _, pemFile := range iss.TrustedRootsPEMFiles { - pemData, err := ioutil.ReadFile(pemFile) - if err != nil { - return fmt.Errorf("loading trusted root CA's PEM file: %s: %v", pemFile, err) - } - if !iss.rootPool.AppendCertsFromPEM(pemData) { - return fmt.Errorf("unable to add %s to trust pool: %v", pemFile, err) - } - } - } - - var err error - iss.template, err = iss.makeIssuerTemplate() - if err != nil { - return err - } - - return nil -} - -func (iss *ACMEIssuer) makeIssuerTemplate() (certmagic.ACMEManager, error) { - template := certmagic.ACMEManager{ - CA: iss.CA, - TestCA: iss.TestCA, - Email: iss.Email, - AccountKeyPEM: iss.AccountKey, - CertObtainTimeout: time.Duration(iss.ACMETimeout), - TrustedRoots: iss.rootPool, - ExternalAccount: iss.ExternalAccount, - Logger: iss.logger, - } - - if iss.Challenges != nil { - if iss.Challenges.HTTP != nil { - template.DisableHTTPChallenge = iss.Challenges.HTTP.Disabled - template.AltHTTPPort = iss.Challenges.HTTP.AlternatePort - } - if iss.Challenges.TLSALPN != nil { - template.DisableTLSALPNChallenge = iss.Challenges.TLSALPN.Disabled - template.AltTLSALPNPort = iss.Challenges.TLSALPN.AlternatePort - } - if iss.Challenges.DNS != nil { - template.DNS01Solver = iss.Challenges.DNS.solver - } - template.ListenHost = iss.Challenges.BindHost - } - - if iss.PreferredChains != nil { - template.PreferredChains = certmagic.ChainPreference{ - Smallest: iss.PreferredChains.Smallest, - AnyCommonName: iss.PreferredChains.AnyCommonName, - RootCommonName: iss.PreferredChains.RootCommonName, - } - } - - return template, nil -} - -// SetConfig sets the associated certmagic config for this issuer. -// This is required because ACME needs values from the config in -// order to solve the challenges during issuance. This implements -// the ConfigSetter interface. -func (iss *ACMEIssuer) SetConfig(cfg *certmagic.Config) { - iss.magic = cfg -} - -// TODO: I kind of hate how each call to these methods needs to -// make a new ACME manager to fill in defaults before using; can -// we find the right place to do that just once and then re-use? - -// PreCheck implements the certmagic.PreChecker interface. -func (iss *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error { - return certmagic.NewACMEManager(iss.magic, iss.template).PreCheck(ctx, names, interactive) -} - -// Issue obtains a certificate for the given csr. -func (iss *ACMEIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) { - return certmagic.NewACMEManager(iss.magic, iss.template).Issue(ctx, csr) -} - -// IssuerKey returns the unique issuer key for the configured CA endpoint. -func (iss *ACMEIssuer) IssuerKey() string { - return certmagic.NewACMEManager(iss.magic, iss.template).IssuerKey() -} - -// Revoke revokes the given certificate. -func (iss *ACMEIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error { - return certmagic.NewACMEManager(iss.magic, iss.template).Revoke(ctx, cert, reason) -} - -// GetACMEIssuer returns iss. This is useful when other types embed ACMEIssuer, because -// type-asserting them to *ACMEIssuer will fail, but type-asserting them to an interface -// with only this method will succeed, and will still allow the embedded ACMEIssuer -// to be accessed and manipulated. -func (iss *ACMEIssuer) GetACMEIssuer() *ACMEIssuer { return iss } - -// UnmarshalCaddyfile deserializes Caddyfile tokens into iss. -// -// ... acme [] { -// dir -// test_dir -// email -// timeout -// disable_http_challenge -// disable_tlsalpn_challenge -// alt_http_port -// alt_tlsalpn_port -// eab -// trusted_roots -// dns [] -// resolvers -// preferred_chains [smallest] { -// root_common_name -// any_common_name -// } -// } -// -func (iss *ACMEIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - iss.CA = d.Val() - if d.NextArg() { - return d.ArgErr() - } - } - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "dir": - if iss.CA != "" { - return d.Errf("directory is already specified: %s", iss.CA) - } - if !d.AllArgs(&iss.CA) { - return d.ArgErr() - } - - case "test_dir": - if !d.AllArgs(&iss.TestCA) { - return d.ArgErr() - } - - case "email": - if !d.AllArgs(&iss.Email) { - return d.ArgErr() - } - - case "timeout": - var timeoutStr string - if !d.AllArgs(&timeoutStr) { - return d.ArgErr() - } - timeout, err := caddy.ParseDuration(timeoutStr) - if err != nil { - return d.Errf("invalid timeout duration %s: %v", timeoutStr, err) - } - iss.ACMETimeout = caddy.Duration(timeout) - - case "disable_http_challenge": - if d.NextArg() { - return d.ArgErr() - } - if iss.Challenges == nil { - iss.Challenges = new(ChallengesConfig) - } - if iss.Challenges.HTTP == nil { - iss.Challenges.HTTP = new(HTTPChallengeConfig) - } - iss.Challenges.HTTP.Disabled = true - - case "disable_tlsalpn_challenge": - if d.NextArg() { - return d.ArgErr() - } - if iss.Challenges == nil { - iss.Challenges = new(ChallengesConfig) - } - if iss.Challenges.TLSALPN == nil { - iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig) - } - iss.Challenges.TLSALPN.Disabled = true - - case "alt_http_port": - if !d.NextArg() { - return d.ArgErr() - } - port, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("invalid port %s: %v", d.Val(), err) - } - if iss.Challenges == nil { - iss.Challenges = new(ChallengesConfig) - } - if iss.Challenges.HTTP == nil { - iss.Challenges.HTTP = new(HTTPChallengeConfig) - } - iss.Challenges.HTTP.AlternatePort = port - - case "alt_tlsalpn_port": - if !d.NextArg() { - return d.ArgErr() - } - port, err := strconv.Atoi(d.Val()) - if err != nil { - return d.Errf("invalid port %s: %v", d.Val(), err) - } - if iss.Challenges == nil { - iss.Challenges = new(ChallengesConfig) - } - if iss.Challenges.TLSALPN == nil { - iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig) - } - iss.Challenges.TLSALPN.AlternatePort = port - - case "eab": - iss.ExternalAccount = new(acme.EAB) - if !d.AllArgs(&iss.ExternalAccount.KeyID, &iss.ExternalAccount.MACKey) { - return d.ArgErr() - } - - case "trusted_roots": - iss.TrustedRootsPEMFiles = d.RemainingArgs() - - case "dns": - if !d.NextArg() { - return d.ArgErr() - } - provName := d.Val() - if iss.Challenges == nil { - iss.Challenges = new(ChallengesConfig) - } - if iss.Challenges.DNS == nil { - iss.Challenges.DNS = new(DNSChallengeConfig) - } - unm, err := caddyfile.UnmarshalModule(d, "dns.providers."+provName) - if err != nil { - return err - } - iss.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, nil) - case "propagation_timeout": - if !d.NextArg() { - return d.ArgErr() - } - timeoutStr := d.Val() - timeout, err := caddy.ParseDuration(timeoutStr) - if err != nil { - return d.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err) - } - if iss.Challenges == nil { - iss.Challenges = new(ChallengesConfig) - } - if iss.Challenges.DNS == nil { - iss.Challenges.DNS = new(DNSChallengeConfig) - } - iss.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout) - - case "resolvers": - if iss.Challenges == nil { - iss.Challenges = new(ChallengesConfig) - } - if iss.Challenges.DNS == nil { - iss.Challenges.DNS = new(DNSChallengeConfig) - } - iss.Challenges.DNS.Resolvers = d.RemainingArgs() - if len(iss.Challenges.DNS.Resolvers) == 0 { - return d.ArgErr() - } - - case "preferred_chains": - chainPref, err := ParseCaddyfilePreferredChainsOptions(d) - if err != nil { - return err - } - iss.PreferredChains = chainPref - - default: - return d.Errf("unrecognized ACME issuer property: %s", d.Val()) - } - } - } - return nil -} - -// onDemandAskRequest makes a request to the ask URL -// to see if a certificate can be obtained for name. -// The certificate request should be denied if this -// returns an error. -func onDemandAskRequest(ask string, name string) error { - askURL, err := url.Parse(ask) - if err != nil { - return fmt.Errorf("parsing ask URL: %v", err) - } - qs := askURL.Query() - qs.Set("domain", name) - askURL.RawQuery = qs.Encode() - - resp, err := onDemandAskClient.Get(askURL.String()) - if err != nil { - return fmt.Errorf("error checking %v to determine if certificate for hostname '%s' should be allowed: %v", - ask, name, err) - } - resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return fmt.Errorf("certificate for hostname '%s' not allowed; non-2xx status code %d returned from %v", - name, resp.StatusCode, ask) - } - - return nil -} - -func ParseCaddyfilePreferredChainsOptions(d *caddyfile.Dispenser) (*ChainPreference, error) { - chainPref := new(ChainPreference) - if d.NextArg() { - smallestOpt := d.Val() - if smallestOpt == "smallest" { - trueBool := true - chainPref.Smallest = &trueBool - if d.NextArg() { // Only one argument allowed - return nil, d.ArgErr() - } - if d.NextBlock(d.Nesting()) { // Don't allow other options when smallest == true - return nil, d.Err("No more options are accepted when using the 'smallest' option") - } - } else { // Smallest option should always be 'smallest' or unset - return nil, d.Errf("Invalid argument '%s'", smallestOpt) - } - } - for nesting := d.Nesting(); d.NextBlock(nesting); { - switch d.Val() { - case "root_common_name": - rootCommonNameOpt := d.RemainingArgs() - chainPref.RootCommonName = rootCommonNameOpt - if rootCommonNameOpt == nil { - return nil, d.ArgErr() - } - if chainPref.AnyCommonName != nil { - return nil, d.Err("Can't set root_common_name when any_common_name is already set") - } - - case "any_common_name": - anyCommonNameOpt := d.RemainingArgs() - chainPref.AnyCommonName = anyCommonNameOpt - if anyCommonNameOpt == nil { - return nil, d.ArgErr() - } - if chainPref.RootCommonName != nil { - return nil, d.Err("Can't set any_common_name when root_common_name is already set") - } - - default: - return nil, d.Errf("Received unrecognized parameter '%s'", d.Val()) - } - } - - if chainPref.Smallest == nil && chainPref.RootCommonName == nil && chainPref.AnyCommonName == nil { - return nil, d.Err("No options for preferred_chains received") - } - - return chainPref, nil -} - -// ChainPreference describes the client's preferred certificate chain, -// useful if the CA offers alternate chains. The first matching chain -// will be selected. -type ChainPreference struct { - // Prefer chains with the fewest number of bytes. - Smallest *bool `json:"smallest,omitempty"` - - // Select first chain having a root with one of - // these common names. - RootCommonName []string `json:"root_common_name,omitempty"` - - // Select first chain that has any issuer with one - // of these common names. - AnyCommonName []string `json:"any_common_name,omitempty"` -} - -// Interface guards -var ( - _ certmagic.PreChecker = (*ACMEIssuer)(nil) - _ certmagic.Issuer = (*ACMEIssuer)(nil) - _ certmagic.Revoker = (*ACMEIssuer)(nil) - _ caddy.Provisioner = (*ACMEIssuer)(nil) - _ ConfigSetter = (*ACMEIssuer)(nil) - _ caddyfile.Unmarshaler = (*ACMEIssuer)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/automation.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/automation.go deleted file mode 100644 index c4a90a84..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/automation.go +++ /dev/null @@ -1,399 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/certmagic" - "github.com/mholt/acmez" -) - -// AutomationConfig governs the automated management of TLS certificates. -type AutomationConfig struct { - // The list of automation policies. The first matching - // policy will be applied for a given certificate/name. - Policies []*AutomationPolicy `json:"policies,omitempty"` - - // On-Demand TLS defers certificate operations to the - // moment they are needed, e.g. during a TLS handshake. - // Useful when you don't know all the hostnames at - // config-time, or when you are not in control of the - // domain names you are managing certificates for. - // In 2015, Caddy became the first web server to - // implement this experimental technology. - // - // Note that this field does not enable on-demand TLS, - // it only configures it for when it is used. To enable - // it, create an automation policy with `on_demand`. - OnDemand *OnDemandConfig `json:"on_demand,omitempty"` - - // Caddy staples OCSP (and caches the response) for all - // qualifying certificates by default. This setting - // changes how often it scans responses for freshness, - // and updates them if they are getting stale. Default: 1h - OCSPCheckInterval caddy.Duration `json:"ocsp_interval,omitempty"` - - // Every so often, Caddy will scan all loaded, managed - // certificates for expiration. This setting changes how - // frequently the scan for expiring certificates is - // performed. Default: 10m - RenewCheckInterval caddy.Duration `json:"renew_interval,omitempty"` - - // How often to scan storage units for old or expired - // assets and remove them. These scans exert lots of - // reads (and list operations) on the storage module, so - // choose a longer interval for large deployments. - // Default: 24h - // - // Storage will always be cleaned when the process first - // starts. Then, a new cleaning will be started this - // duration after the previous cleaning started if the - // previous cleaning finished in less than half the time - // of this interval (otherwise next start will be skipped). - StorageCleanInterval caddy.Duration `json:"storage_clean_interval,omitempty"` - - defaultPublicAutomationPolicy *AutomationPolicy - defaultInternalAutomationPolicy *AutomationPolicy // only initialized if necessary -} - -// AutomationPolicy designates the policy for automating the -// management (obtaining, renewal, and revocation) of managed -// TLS certificates. -// -// An AutomationPolicy value is not valid until it has been -// provisioned; use the `AddAutomationPolicy()` method on the -// TLS app to properly provision a new policy. -type AutomationPolicy struct { - // Which subjects (hostnames or IP addresses) this policy applies to. - Subjects []string `json:"subjects,omitempty"` - - // The modules that may issue certificates. Default: internal if all - // subjects do not qualify for public certificates; othewise acme and - // zerossl. - IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"` - - // If true, certificates will be requested with MustStaple. Not all - // CAs support this, and there are potentially serious consequences - // of enabling this feature without proper threat modeling. - MustStaple bool `json:"must_staple,omitempty"` - - // How long before a certificate's expiration to try renewing it, - // as a function of its total lifetime. As a general and conservative - // rule, it is a good idea to renew a certificate when it has about - // 1/3 of its total lifetime remaining. This utilizes the majority - // of the certificate's lifetime while still saving time to - // troubleshoot problems. However, for extremely short-lived certs, - // you may wish to increase the ratio to ~1/2. - RenewalWindowRatio float64 `json:"renewal_window_ratio,omitempty"` - - // The type of key to generate for certificates. - // Supported values: `ed25519`, `p256`, `p384`, `rsa2048`, `rsa4096`. - KeyType string `json:"key_type,omitempty"` - - // Optionally configure a separate storage module associated with this - // manager, instead of using Caddy's global/default-configured storage. - StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"` - - // If true, certificates will be managed "on demand"; that is, during - // TLS handshakes or when needed, as opposed to at startup or config - // load. - OnDemand bool `json:"on_demand,omitempty"` - - // Disables OCSP stapling. Disabling OCSP stapling puts clients at - // greater risk, reduces their privacy, and usually lowers client - // performance. It is NOT recommended to disable this unless you - // are able to justify the costs. - // EXPERIMENTAL. Subject to change. - DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"` - - // Overrides the URLs of OCSP responders embedded in certificates. - // Each key is a OCSP server URL to override, and its value is the - // replacement. An empty value will disable querying of that server. - // EXPERIMENTAL. Subject to change. - OCSPOverrides map[string]string `json:"ocsp_overrides,omitempty"` - - // Issuers stores the decoded issuer parameters. This is only - // used to populate an underlying certmagic.Config's Issuers - // field; it is not referenced thereafter. - Issuers []certmagic.Issuer `json:"-"` - - magic *certmagic.Config - storage certmagic.Storage -} - -// Provision sets up ap and builds its underlying CertMagic config. -func (ap *AutomationPolicy) Provision(tlsApp *TLS) error { - // policy-specific storage implementation - if ap.StorageRaw != nil { - val, err := tlsApp.ctx.LoadModule(ap, "StorageRaw") - if err != nil { - return fmt.Errorf("loading TLS storage module: %v", err) - } - cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage() - if err != nil { - return fmt.Errorf("creating TLS storage configuration: %v", err) - } - ap.storage = cmStorage - } - - var ond *certmagic.OnDemandConfig - if ap.OnDemand { - ond = &certmagic.OnDemandConfig{ - DecisionFunc: func(name string) error { - // if an "ask" endpoint was defined, consult it first - if tlsApp.Automation != nil && - tlsApp.Automation.OnDemand != nil && - tlsApp.Automation.OnDemand.Ask != "" { - err := onDemandAskRequest(tlsApp.Automation.OnDemand.Ask, name) - if err != nil { - return err - } - } - // check the rate limiter last because - // doing so makes a reservation - if !onDemandRateLimiter.Allow() { - return fmt.Errorf("on-demand rate limit exceeded") - } - return nil - }, - } - } - - // load and provision any explicitly-configured issuer modules - if ap.IssuersRaw != nil { - val, err := tlsApp.ctx.LoadModule(ap, "IssuersRaw") - if err != nil { - return fmt.Errorf("loading TLS automation management module: %s", err) - } - for _, issVal := range val.([]interface{}) { - ap.Issuers = append(ap.Issuers, issVal.(certmagic.Issuer)) - } - } - - issuers := ap.Issuers - if len(issuers) == 0 { - var err error - issuers, err = DefaultIssuersProvisioned(tlsApp.ctx) - if err != nil { - return err - } - } - - keyType := ap.KeyType - if keyType != "" { - var err error - keyType, err = caddy.NewReplacer().ReplaceOrErr(ap.KeyType, true, true) - if err != nil { - return fmt.Errorf("invalid key type %s: %s", ap.KeyType, err) - } - if _, ok := supportedCertKeyTypes[keyType]; !ok { - return fmt.Errorf("unrecognized key type: %s", keyType) - } - } - keySource := certmagic.StandardKeyGenerator{ - KeyType: supportedCertKeyTypes[keyType], - } - - storage := ap.storage - if storage == nil { - storage = tlsApp.ctx.Storage() - } - - template := certmagic.Config{ - MustStaple: ap.MustStaple, - RenewalWindowRatio: ap.RenewalWindowRatio, - KeySource: keySource, - OnDemand: ond, - OCSP: certmagic.OCSPConfig{ - DisableStapling: ap.DisableOCSPStapling, - ResponderOverrides: ap.OCSPOverrides, - }, - Storage: storage, - Issuers: issuers, - Logger: tlsApp.logger, - } - ap.magic = certmagic.New(tlsApp.certCache, template) - - // sometimes issuers may need the parent certmagic.Config in - // order to function properly (for example, ACMEIssuer needs - // access to the correct storage and cache so it can solve - // ACME challenges -- it's an annoying, inelegant circular - // dependency that I don't know how to resolve nicely!) - for _, issuer := range ap.magic.Issuers { - if annoying, ok := issuer.(ConfigSetter); ok { - annoying.SetConfig(ap.magic) - } - } - - return nil -} - -// DefaultIssuers returns empty Issuers (not provisioned) to be used as defaults. -// This function is experimental and has no compatibility promises. -func DefaultIssuers() []certmagic.Issuer { - return []certmagic.Issuer{ - new(ACMEIssuer), - &ZeroSSLIssuer{ACMEIssuer: new(ACMEIssuer)}, - } -} - -// DefaultIssuersProvisioned returns empty but provisioned default Issuers from -// DefaultIssuers(). This function is experimental and has no compatibility promises. -func DefaultIssuersProvisioned(ctx caddy.Context) ([]certmagic.Issuer, error) { - issuers := DefaultIssuers() - for i, iss := range issuers { - if prov, ok := iss.(caddy.Provisioner); ok { - err := prov.Provision(ctx) - if err != nil { - return nil, fmt.Errorf("provisioning default issuer %d: %T: %v", i, iss, err) - } - } - } - return issuers, nil -} - -// ChallengesConfig configures the ACME challenges. -type ChallengesConfig struct { - // HTTP configures the ACME HTTP challenge. This - // challenge is enabled and used automatically - // and by default. - HTTP *HTTPChallengeConfig `json:"http,omitempty"` - - // TLSALPN configures the ACME TLS-ALPN challenge. - // This challenge is enabled and used automatically - // and by default. - TLSALPN *TLSALPNChallengeConfig `json:"tls-alpn,omitempty"` - - // Configures the ACME DNS challenge. Because this - // challenge typically requires credentials for - // interfacing with a DNS provider, this challenge is - // not enabled by default. This is the only challenge - // type which does not require a direct connection - // to Caddy from an external server. - // - // NOTE: DNS providers are currently being upgraded, - // and this API is subject to change, but should be - // stabilized soon. - DNS *DNSChallengeConfig `json:"dns,omitempty"` - - // Optionally customize the host to which a listener - // is bound if required for solving a challenge. - BindHost string `json:"bind_host,omitempty"` -} - -// HTTPChallengeConfig configures the ACME HTTP challenge. -type HTTPChallengeConfig struct { - // If true, the HTTP challenge will be disabled. - Disabled bool `json:"disabled,omitempty"` - - // An alternate port on which to service this - // challenge. Note that the HTTP challenge port is - // hard-coded into the spec and cannot be changed, - // so you would have to forward packets from the - // standard HTTP challenge port to this one. - AlternatePort int `json:"alternate_port,omitempty"` -} - -// TLSALPNChallengeConfig configures the ACME TLS-ALPN challenge. -type TLSALPNChallengeConfig struct { - // If true, the TLS-ALPN challenge will be disabled. - Disabled bool `json:"disabled,omitempty"` - - // An alternate port on which to service this - // challenge. Note that the TLS-ALPN challenge port - // is hard-coded into the spec and cannot be changed, - // so you would have to forward packets from the - // standard TLS-ALPN challenge port to this one. - AlternatePort int `json:"alternate_port,omitempty"` -} - -// DNSChallengeConfig configures the ACME DNS challenge. -// -// NOTE: This API is still experimental and is subject to change. -type DNSChallengeConfig struct { - // The DNS provider module to use which will manage - // the DNS records relevant to the ACME challenge. - ProviderRaw json.RawMessage `json:"provider,omitempty" caddy:"namespace=dns.providers inline_key=name"` - - // The TTL of the TXT record used for the DNS challenge. - TTL caddy.Duration `json:"ttl,omitempty"` - - // How long to wait for DNS record to propagate. - PropagationTimeout caddy.Duration `json:"propagation_timeout,omitempty"` - - // Custom DNS resolvers to prefer over system/built-in defaults. - // Often necessary to configure when using split-horizon DNS. - Resolvers []string `json:"resolvers,omitempty"` - - solver acmez.Solver -} - -// OnDemandConfig configures on-demand TLS, for obtaining -// needed certificates at handshake-time. Because this -// feature can easily be abused, you should use this to -// establish rate limits and/or an internal endpoint that -// Caddy can "ask" if it should be allowed to manage -// certificates for a given hostname. -type OnDemandConfig struct { - // An optional rate limit to throttle the - // issuance of certificates from handshakes. - RateLimit *RateLimit `json:"rate_limit,omitempty"` - - // If Caddy needs to obtain or renew a certificate - // during a TLS handshake, it will perform a quick - // HTTP request to this URL to check if it should be - // allowed to try to get a certificate for the name - // in the "domain" query string parameter, like so: - // `?domain=example.com`. The endpoint must return a - // 200 OK status if a certificate is allowed; - // anything else will cause it to be denied. - // Redirects are not followed. - Ask string `json:"ask,omitempty"` -} - -// RateLimit specifies an interval with optional burst size. -type RateLimit struct { - // A duration value. A certificate may be obtained 'burst' - // times during this interval. - Interval caddy.Duration `json:"interval,omitempty"` - - // How many times during an interval a certificate can be obtained. - Burst int `json:"burst,omitempty"` -} - -// ConfigSetter is implemented by certmagic.Issuers that -// need access to a parent certmagic.Config as part of -// their provisioning phase. For example, the ACMEIssuer -// requires a config so it can access storage and the -// cache to solve ACME challenges. -type ConfigSetter interface { - SetConfig(cfg *certmagic.Config) -} - -// These perpetual values are used for on-demand TLS. -var ( - onDemandRateLimiter = certmagic.NewRateLimiter(0, 0) - onDemandAskClient = &http.Client{ - Timeout: 10 * time.Second, - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return fmt.Errorf("following http redirects is not allowed") - }, - } -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/certselection.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/certselection.go deleted file mode 100644 index 0311f116..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/certselection.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "math/big" - - "github.com/caddyserver/certmagic" -) - -// CustomCertSelectionPolicy represents a policy for selecting the certificate -// used to complete a handshake when there may be multiple options. All fields -// specified must match the candidate certificate for it to be chosen. -// This was needed to solve https://github.com/caddyserver/caddy/issues/2588. -type CustomCertSelectionPolicy struct { - // The certificate must have one of these serial numbers. - SerialNumber []bigInt `json:"serial_number,omitempty"` - - // The certificate must have one of these organization names. - SubjectOrganization []string `json:"subject_organization,omitempty"` - - // The certificate must use this public key algorithm. - PublicKeyAlgorithm PublicKeyAlgorithm `json:"public_key_algorithm,omitempty"` - - // The certificate must have at least one of the tags in the list. - AnyTag []string `json:"any_tag,omitempty"` - - // The certificate must have all of the tags in the list. - AllTags []string `json:"all_tags,omitempty"` -} - -// SelectCertificate implements certmagic.CertificateSelector. It -// only chooses a certificate that at least meets the criteria in -// p. It then chooses the first non-expired certificate that is -// compatible with the client. If none are valid, it chooses the -// first viable candidate anyway. -func (p CustomCertSelectionPolicy) SelectCertificate(hello *tls.ClientHelloInfo, choices []certmagic.Certificate) (certmagic.Certificate, error) { - viable := make([]certmagic.Certificate, 0, len(choices)) - -nextChoice: - for _, cert := range choices { - if len(p.SerialNumber) > 0 { - var found bool - for _, sn := range p.SerialNumber { - if cert.Leaf.SerialNumber.Cmp(&sn.Int) == 0 { - found = true - break - } - } - if !found { - continue - } - } - - if len(p.SubjectOrganization) > 0 { - var found bool - for _, subjOrg := range p.SubjectOrganization { - for _, org := range cert.Leaf.Subject.Organization { - if subjOrg == org { - found = true - break - } - } - } - if !found { - continue - } - } - - if p.PublicKeyAlgorithm != PublicKeyAlgorithm(x509.UnknownPublicKeyAlgorithm) && - PublicKeyAlgorithm(cert.Leaf.PublicKeyAlgorithm) != p.PublicKeyAlgorithm { - continue - } - - if len(p.AnyTag) > 0 { - var found bool - for _, tag := range p.AnyTag { - if cert.HasTag(tag) { - found = true - break - } - } - if !found { - continue - } - } - - if len(p.AllTags) > 0 { - for _, tag := range p.AllTags { - if !cert.HasTag(tag) { - continue nextChoice - } - } - } - - // this certificate at least meets the policy's requirements, - // but we still have to check expiration and compatibility - viable = append(viable, cert) - } - - if len(viable) == 0 { - return certmagic.Certificate{}, fmt.Errorf("no certificates matched custom selection policy") - } - - return certmagic.DefaultCertificateSelector(hello, viable) -} - -// bigInt is a big.Int type that interops with JSON encodings as a string. -type bigInt struct{ big.Int } - -func (bi bigInt) MarshalJSON() ([]byte, error) { - return json.Marshal(bi.String()) -} - -func (bi *bigInt) UnmarshalJSON(p []byte) error { - if string(p) == "null" { - return nil - } - var stringRep string - err := json.Unmarshal(p, &stringRep) - if err != nil { - return err - } - _, ok := bi.SetString(stringRep, 10) - if !ok { - return fmt.Errorf("not a valid big integer: %s", p) - } - return nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/connpolicy.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/connpolicy.go deleted file mode 100644 index 6c7fe3f4..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/connpolicy.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "crypto/x509" - "encoding/base64" - "fmt" - "io/ioutil" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/mholt/acmez" -) - -// ConnectionPolicies govern the establishment of TLS connections. It is -// an ordered group of connection policies; the first matching policy will -// be used to configure TLS connections at handshake-time. -type ConnectionPolicies []*ConnectionPolicy - -// Provision sets up each connection policy. It should be called -// during the Validate() phase, after the TLS app (if any) is -// already set up. -func (cp ConnectionPolicies) Provision(ctx caddy.Context) error { - for i, pol := range cp { - // matchers - mods, err := ctx.LoadModule(pol, "MatchersRaw") - if err != nil { - return fmt.Errorf("loading handshake matchers: %v", err) - } - for _, modIface := range mods.(map[string]interface{}) { - cp[i].matchers = append(cp[i].matchers, modIface.(ConnectionMatcher)) - } - - // enable HTTP/2 by default - if len(pol.ALPN) == 0 { - pol.ALPN = append(pol.ALPN, defaultALPN...) - } - - // pre-build standard TLS config so we don't have to at handshake-time - err = pol.buildStandardTLSConfig(ctx) - if err != nil { - return fmt.Errorf("connection policy %d: building standard TLS config: %s", i, err) - } - } - - return nil -} - -// TLSConfig returns a standard-lib-compatible TLS configuration which -// selects the first matching policy based on the ClientHello. -func (cp ConnectionPolicies) TLSConfig(ctx caddy.Context) *tls.Config { - // using ServerName to match policies is extremely common, especially in configs - // with lots and lots of different policies; we can fast-track those by indexing - // them by SNI, so we don't have to iterate potentially thousands of policies - // (TODO: this map does not account for wildcards, see if this is a problem in practice? look for reports of high connection latency with wildcard certs but low latency for non-wildcards in multi-thousand-cert deployments) - indexedBySNI := make(map[string]ConnectionPolicies) - if len(cp) > 30 { - for _, p := range cp { - for _, m := range p.matchers { - if sni, ok := m.(MatchServerName); ok { - for _, sniName := range sni { - indexedBySNI[sniName] = append(indexedBySNI[sniName], p) - } - } - } - } - } - - return &tls.Config{ - MinVersion: tls.VersionTLS12, - GetConfigForClient: func(hello *tls.ClientHelloInfo) (*tls.Config, error) { - // filter policies by SNI first, if possible, to speed things up - // when there may be lots of policies - possiblePolicies := cp - if indexedPolicies, ok := indexedBySNI[hello.ServerName]; ok { - possiblePolicies = indexedPolicies - } - - policyLoop: - for _, pol := range possiblePolicies { - for _, matcher := range pol.matchers { - if !matcher.Match(hello) { - continue policyLoop - } - } - return pol.stdTLSConfig, nil - } - - return nil, fmt.Errorf("no server TLS configuration available for ClientHello: %+v", hello) - }, - } -} - -// ConnectionPolicy specifies the logic for handling a TLS handshake. -// An empty policy is valid; safe and sensible defaults will be used. -type ConnectionPolicy struct { - // How to match this policy with a TLS ClientHello. If - // this policy is the first to match, it will be used. - MatchersRaw caddy.ModuleMap `json:"match,omitempty" caddy:"namespace=tls.handshake_match"` - - // How to choose a certificate if more than one matched - // the given ServerName (SNI) value. - CertSelection *CustomCertSelectionPolicy `json:"certificate_selection,omitempty"` - - // The list of cipher suites to support. Caddy's - // defaults are modern and secure. - CipherSuites []string `json:"cipher_suites,omitempty"` - - // The list of elliptic curves to support. Caddy's - // defaults are modern and secure. - Curves []string `json:"curves,omitempty"` - - // Protocols to use for Application-Layer Protocol - // Negotiation (ALPN) during the handshake. - ALPN []string `json:"alpn,omitempty"` - - // Minimum TLS protocol version to allow. Default: `tls1.2` - ProtocolMin string `json:"protocol_min,omitempty"` - - // Maximum TLS protocol version to allow. Default: `tls1.3` - ProtocolMax string `json:"protocol_max,omitempty"` - - // Enables and configures TLS client authentication. - ClientAuthentication *ClientAuthentication `json:"client_authentication,omitempty"` - - // DefaultSNI becomes the ServerName in a ClientHello if there - // is no policy configured for the empty SNI value. - DefaultSNI string `json:"default_sni,omitempty"` - - matchers []ConnectionMatcher - stdTLSConfig *tls.Config -} - -func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error { - tlsAppIface, err := ctx.App("tls") - if err != nil { - return fmt.Errorf("getting tls app: %v", err) - } - tlsApp := tlsAppIface.(*TLS) - - // fill in some "easy" default values, but for other values - // (such as slices), we should ensure that they start empty - // so the user-provided config can fill them in; then we will - // fill in a default config at the end if they are still unset - cfg := &tls.Config{ - NextProtos: p.ALPN, - PreferServerCipherSuites: true, - GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { - // TODO: I don't love how this works: we pre-build certmagic configs - // so that handshakes are faster. Unfortunately, certmagic configs are - // comprised of settings from both a TLS connection policy and a TLS - // automation policy. The only two fields (as of March 2020; v2 beta 17) - // of a certmagic config that come from the TLS connection policy are - // CertSelection and DefaultServerName, so an automation policy is what - // builds the base certmagic config. Since the pre-built config is - // shared, I don't think we can change any of its fields per-handshake, - // hence the awkward shallow copy (dereference) here and the subsequent - // changing of some of its fields. I'm worried this dereference allocates - // more at handshake-time, but I don't know how to practically pre-build - // a certmagic config for each combination of conn policy + automation policy... - cfg := *tlsApp.getConfigForName(hello.ServerName) - if p.CertSelection != nil { - // you would think we could just set this whether or not - // p.CertSelection is nil, but that leads to panics if - // it is, because cfg.CertSelection is an interface, - // so it will have a non-nil value even if the actual - // value underlying it is nil (sigh) - cfg.CertSelection = p.CertSelection - } - cfg.DefaultServerName = p.DefaultSNI - return cfg.GetCertificate(hello) - }, - MinVersion: tls.VersionTLS12, - MaxVersion: tls.VersionTLS13, - } - - // session tickets support - if tlsApp.SessionTickets != nil { - cfg.SessionTicketsDisabled = tlsApp.SessionTickets.Disabled - - // session ticket key rotation - tlsApp.SessionTickets.register(cfg) - ctx.OnCancel(func() { - // do cleanup when the context is canceled because, - // though unlikely, it is possible that a context - // needing a TLS server config could exist for less - // than the lifetime of the whole app - tlsApp.SessionTickets.unregister(cfg) - }) - } - - // TODO: Clean up session ticket active locks in storage if app (or process) is being closed! - - // add all the cipher suites in order, without duplicates - cipherSuitesAdded := make(map[uint16]struct{}) - for _, csName := range p.CipherSuites { - csID := CipherSuiteID(csName) - if csID == 0 { - return fmt.Errorf("unsupported cipher suite: %s", csName) - } - if _, ok := cipherSuitesAdded[csID]; !ok { - cipherSuitesAdded[csID] = struct{}{} - cfg.CipherSuites = append(cfg.CipherSuites, csID) - } - } - - // add all the curve preferences in order, without duplicates - curvesAdded := make(map[tls.CurveID]struct{}) - for _, curveName := range p.Curves { - curveID := SupportedCurves[curveName] - if _, ok := curvesAdded[curveID]; !ok { - curvesAdded[curveID] = struct{}{} - cfg.CurvePreferences = append(cfg.CurvePreferences, curveID) - } - } - - // ensure ALPN includes the ACME TLS-ALPN protocol - var alpnFound bool - for _, a := range p.ALPN { - if a == acmez.ACMETLS1Protocol { - alpnFound = true - break - } - } - if !alpnFound { - cfg.NextProtos = append(cfg.NextProtos, acmez.ACMETLS1Protocol) - } - - // min and max protocol versions - if (p.ProtocolMin != "" && p.ProtocolMax != "") && p.ProtocolMin > p.ProtocolMax { - return fmt.Errorf("protocol min (%x) cannot be greater than protocol max (%x)", p.ProtocolMin, p.ProtocolMax) - } - if p.ProtocolMin != "" { - cfg.MinVersion = SupportedProtocols[p.ProtocolMin] - } - if p.ProtocolMax != "" { - cfg.MaxVersion = SupportedProtocols[p.ProtocolMax] - } - - // client authentication - if p.ClientAuthentication != nil { - err := p.ClientAuthentication.ConfigureTLSConfig(cfg) - if err != nil { - return fmt.Errorf("configuring TLS client authentication: %v", err) - } - } - - setDefaultTLSParams(cfg) - - p.stdTLSConfig = cfg - - return nil -} - -// SettingsEmpty returns true if p's settings (fields -// except the matchers) are all empty/unset. -func (p ConnectionPolicy) SettingsEmpty() bool { - return p.CertSelection == nil && - p.CipherSuites == nil && - p.Curves == nil && - p.ALPN == nil && - p.ProtocolMin == "" && - p.ProtocolMax == "" && - p.ClientAuthentication == nil && - p.DefaultSNI == "" -} - -// ClientAuthentication configures TLS client auth. -type ClientAuthentication struct { - // A list of base64 DER-encoded CA certificates - // against which to validate client certificates. - // Client certs which are not signed by any of - // these CAs will be rejected. - TrustedCACerts []string `json:"trusted_ca_certs,omitempty"` - - // TrustedCACertPEMFiles is a list of PEM file names - // from which to load certificates of trusted CAs. - // Client certificates which are not signed by any of - // these CA certificates will be rejected. - TrustedCACertPEMFiles []string `json:"trusted_ca_certs_pem_files,omitempty"` - - // A list of base64 DER-encoded client leaf certs - // to accept. If this list is not empty, client certs - // which are not in this list will be rejected. - TrustedLeafCerts []string `json:"trusted_leaf_certs,omitempty"` - - // The mode for authenticating the client. Allowed values are: - // - // Mode | Description - // -----|--------------- - // `request` | Ask clients for a certificate, but allow even if there isn't one; do not verify it - // `require` | Require clients to present a certificate, but do not verify it - // `verify_if_given` | Ask clients for a certificate; allow even if there isn't one, but verify it if there is - // `require_and_verify` | Require clients to present a valid certificate that is verified - // - // The default mode is `require_and_verify` if any - // TrustedCACerts or TrustedCACertPEMFiles or TrustedLeafCerts - // are provided; otherwise, the default mode is `require`. - Mode string `json:"mode,omitempty"` - - // state established with the last call to ConfigureTLSConfig - trustedLeafCerts []*x509.Certificate - existingVerifyPeerCert func([][]byte, [][]*x509.Certificate) error -} - -// Active returns true if clientauth has an actionable configuration. -func (clientauth ClientAuthentication) Active() bool { - return len(clientauth.TrustedCACerts) > 0 || - len(clientauth.TrustedCACertPEMFiles) > 0 || - len(clientauth.TrustedLeafCerts) > 0 || - len(clientauth.Mode) > 0 -} - -// ConfigureTLSConfig sets up cfg to enforce clientauth's configuration. -func (clientauth *ClientAuthentication) ConfigureTLSConfig(cfg *tls.Config) error { - // if there's no actionable client auth, simply disable it - if !clientauth.Active() { - cfg.ClientAuth = tls.NoClientCert - return nil - } - - // enforce desired mode of client authentication - if len(clientauth.Mode) > 0 { - switch clientauth.Mode { - case "request": - cfg.ClientAuth = tls.RequestClientCert - case "require": - cfg.ClientAuth = tls.RequireAnyClientCert - case "verify_if_given": - cfg.ClientAuth = tls.VerifyClientCertIfGiven - case "require_and_verify": - cfg.ClientAuth = tls.RequireAndVerifyClientCert - default: - return fmt.Errorf("client auth mode not recognized: %s", clientauth.Mode) - } - } else { - // otherwise, set a safe default mode - if len(clientauth.TrustedCACerts) > 0 || - len(clientauth.TrustedCACertPEMFiles) > 0 || - len(clientauth.TrustedLeafCerts) > 0 { - cfg.ClientAuth = tls.RequireAndVerifyClientCert - } else { - cfg.ClientAuth = tls.RequireAnyClientCert - } - } - - // enforce CA verification by adding CA certs to the ClientCAs pool - if len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedCACertPEMFiles) > 0 { - caPool := x509.NewCertPool() - for _, clientCAString := range clientauth.TrustedCACerts { - clientCA, err := decodeBase64DERCert(clientCAString) - if err != nil { - return fmt.Errorf("parsing certificate: %v", err) - } - caPool.AddCert(clientCA) - } - for _, pemFile := range clientauth.TrustedCACertPEMFiles { - pemContents, err := ioutil.ReadFile(pemFile) - if err != nil { - return fmt.Errorf("reading %s: %v", pemFile, err) - } - caPool.AppendCertsFromPEM(pemContents) - } - cfg.ClientCAs = caPool - } - - // enforce leaf verification by writing our own verify function - if len(clientauth.TrustedLeafCerts) > 0 { - clientauth.trustedLeafCerts = []*x509.Certificate{} - for _, clientCertString := range clientauth.TrustedLeafCerts { - clientCert, err := decodeBase64DERCert(clientCertString) - if err != nil { - return fmt.Errorf("parsing certificate: %v", err) - } - clientauth.trustedLeafCerts = append(clientauth.trustedLeafCerts, clientCert) - } - // if a custom verification function already exists, wrap it - clientauth.existingVerifyPeerCert = cfg.VerifyPeerCertificate - cfg.VerifyPeerCertificate = clientauth.verifyPeerCertificate - } - - return nil -} - -// verifyPeerCertificate is for use as a tls.Config.VerifyPeerCertificate -// callback to do custom client certificate verification. It is intended -// for installation only by clientauth.ConfigureTLSConfig(). -func (clientauth ClientAuthentication) verifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - // first use any pre-existing custom verification function - if clientauth.existingVerifyPeerCert != nil { - err := clientauth.existingVerifyPeerCert(rawCerts, verifiedChains) - if err != nil { - return err - } - } - - if len(rawCerts) == 0 { - return fmt.Errorf("no client certificate provided") - } - - remoteLeafCert, err := x509.ParseCertificate(rawCerts[0]) - if err != nil { - return fmt.Errorf("can't parse the given certificate: %s", err.Error()) - } - - for _, trustedLeafCert := range clientauth.trustedLeafCerts { - if remoteLeafCert.Equal(trustedLeafCert) { - return nil - } - } - - return fmt.Errorf("client leaf certificate failed validation") -} - -// decodeBase64DERCert base64-decodes, then DER-decodes, certStr. -func decodeBase64DERCert(certStr string) (*x509.Certificate, error) { - derBytes, err := base64.StdEncoding.DecodeString(certStr) - if err != nil { - return nil, err - } - return x509.ParseCertificate(derBytes) -} - -// setDefaultTLSParams sets the default TLS cipher suites, protocol versions, -// and server preferences of cfg if they are not already set; it does not -// overwrite values, only fills in missing values. -func setDefaultTLSParams(cfg *tls.Config) { - if len(cfg.CipherSuites) == 0 { - cfg.CipherSuites = getOptimalDefaultCipherSuites() - } - - // Not a cipher suite, but still important for mitigating protocol downgrade attacks - // (prepend since having it at end breaks http2 due to non-h2-approved suites before it) - cfg.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, cfg.CipherSuites...) - - if len(cfg.CurvePreferences) == 0 { - cfg.CurvePreferences = defaultCurves - } - - if cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS12 - } - if cfg.MaxVersion == 0 { - cfg.MaxVersion = tls.VersionTLS13 - } - - cfg.PreferServerCipherSuites = true -} - -// PublicKeyAlgorithm is a JSON-unmarshalable wrapper type. -type PublicKeyAlgorithm x509.PublicKeyAlgorithm - -// UnmarshalJSON satisfies json.Unmarshaler. -func (a *PublicKeyAlgorithm) UnmarshalJSON(b []byte) error { - algoStr := strings.ToLower(strings.Trim(string(b), `"`)) - algo, ok := publicKeyAlgorithms[algoStr] - if !ok { - return fmt.Errorf("unrecognized public key algorithm: %s (expected one of %v)", - algoStr, publicKeyAlgorithms) - } - *a = PublicKeyAlgorithm(algo) - return nil -} - -// ConnectionMatcher is a type which matches TLS handshakes. -type ConnectionMatcher interface { - Match(*tls.ClientHelloInfo) bool -} - -var defaultALPN = []string{"h2", "http/1.1"} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/fileloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/fileloader.go deleted file mode 100644 index fdf54864..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/fileloader.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - caddy.RegisterModule(FileLoader{}) -} - -// FileLoader loads certificates and their associated keys from disk. -type FileLoader []CertKeyFilePair - -// CaddyModule returns the Caddy module information. -func (FileLoader) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.certificates.load_files", - New: func() caddy.Module { return new(FileLoader) }, - } -} - -// CertKeyFilePair pairs certificate and key file names along with their -// encoding format so that they can be loaded from disk. -type CertKeyFilePair struct { - // Path to the certificate (public key) file. - Certificate string `json:"certificate"` - - // Path to the private key file. - Key string `json:"key"` - - // The format of the cert and key. Can be "pem". Default: "pem" - Format string `json:"format,omitempty"` - - // Arbitrary values to associate with this certificate. - // Can be useful when you want to select a particular - // certificate when there may be multiple valid candidates. - Tags []string `json:"tags,omitempty"` -} - -// LoadCertificates returns the certificates to be loaded by fl. -func (fl FileLoader) LoadCertificates() ([]Certificate, error) { - certs := make([]Certificate, 0, len(fl)) - for _, pair := range fl { - certData, err := ioutil.ReadFile(pair.Certificate) - if err != nil { - return nil, err - } - keyData, err := ioutil.ReadFile(pair.Key) - if err != nil { - return nil, err - } - - var cert tls.Certificate - switch pair.Format { - case "": - fallthrough - case "pem": - cert, err = tls.X509KeyPair(certData, keyData) - default: - return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format) - } - if err != nil { - return nil, err - } - - certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags}) - } - return certs, nil -} - -// Interface guard -var _ CertificateLoader = (FileLoader)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/folderloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/folderloader.go deleted file mode 100644 index 10b017ee..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/folderloader.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "bytes" - "crypto/tls" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - caddy.RegisterModule(FolderLoader{}) -} - -// FolderLoader loads certificates and their associated keys from disk -// by recursively walking the specified directories, looking for PEM -// files which contain both a certificate and a key. -type FolderLoader []string - -// CaddyModule returns the Caddy module information. -func (FolderLoader) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.certificates.load_folders", - New: func() caddy.Module { return new(FolderLoader) }, - } -} - -// LoadCertificates loads all the certificates+keys in the directories -// listed in fl from all files ending with .pem. This method of loading -// certificates expects the certificate and key to be bundled into the -// same file. -func (fl FolderLoader) LoadCertificates() ([]Certificate, error) { - var certs []Certificate - for _, dir := range fl { - err := filepath.Walk(dir, func(fpath string, info os.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("unable to traverse into path: %s", fpath) - } - if info.IsDir() { - return nil - } - if !strings.HasSuffix(strings.ToLower(info.Name()), ".pem") { - return nil - } - - cert, err := x509CertFromCertAndKeyPEMFile(fpath) - if err != nil { - return err - } - - certs = append(certs, Certificate{Certificate: cert}) - - return nil - }) - if err != nil { - return nil, err - } - } - return certs, nil -} - -func x509CertFromCertAndKeyPEMFile(fpath string) (tls.Certificate, error) { - bundle, err := ioutil.ReadFile(fpath) - if err != nil { - return tls.Certificate{}, err - } - - certBuilder, keyBuilder := new(bytes.Buffer), new(bytes.Buffer) - var foundKey bool // use only the first key in the file - - for { - // Decode next block so we can see what type it is - var derBlock *pem.Block - derBlock, bundle = pem.Decode(bundle) - if derBlock == nil { - break - } - - if derBlock.Type == "CERTIFICATE" { - // Re-encode certificate as PEM, appending to certificate chain - err = pem.Encode(certBuilder, derBlock) - if err != nil { - return tls.Certificate{}, err - } - } else if derBlock.Type == "EC PARAMETERS" { - // EC keys generated from openssl can be composed of two blocks: - // parameters and key (parameter block should come first) - if !foundKey { - // Encode parameters - err = pem.Encode(keyBuilder, derBlock) - if err != nil { - return tls.Certificate{}, err - } - - // Key must immediately follow - derBlock, bundle = pem.Decode(bundle) - if derBlock == nil || derBlock.Type != "EC PRIVATE KEY" { - return tls.Certificate{}, fmt.Errorf("%s: expected elliptic private key to immediately follow EC parameters", fpath) - } - err = pem.Encode(keyBuilder, derBlock) - if err != nil { - return tls.Certificate{}, err - } - foundKey = true - } - } else if derBlock.Type == "PRIVATE KEY" || strings.HasSuffix(derBlock.Type, " PRIVATE KEY") { - // RSA key - if !foundKey { - err = pem.Encode(keyBuilder, derBlock) - if err != nil { - return tls.Certificate{}, err - } - foundKey = true - } - } else { - return tls.Certificate{}, fmt.Errorf("%s: unrecognized PEM block type: %s", fpath, derBlock.Type) - } - } - - certPEMBytes, keyPEMBytes := certBuilder.Bytes(), keyBuilder.Bytes() - if len(certPEMBytes) == 0 { - return tls.Certificate{}, fmt.Errorf("%s: failed to parse PEM data", fpath) - } - if len(keyPEMBytes) == 0 { - return tls.Certificate{}, fmt.Errorf("%s: no private key block found", fpath) - } - - cert, err := tls.X509KeyPair(certPEMBytes, keyPEMBytes) - if err != nil { - return tls.Certificate{}, fmt.Errorf("%s: making X509 key pair: %v", fpath, err) - } - - return cert, nil -} - -var _ CertificateLoader = (FolderLoader)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/internalissuer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/internalissuer.go deleted file mode 100644 index 7a25f6d3..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/internalissuer.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "bytes" - "context" - "crypto/x509" - "encoding/pem" - "fmt" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/caddy/v2/modules/caddypki" - "github.com/caddyserver/certmagic" - "github.com/smallstep/certificates/authority/provisioner" - "go.uber.org/zap" -) - -func init() { - caddy.RegisterModule(InternalIssuer{}) -} - -// InternalIssuer is a certificate issuer that generates -// certificates internally using a locally-configured -// CA which can be customized using the `pki` app. -type InternalIssuer struct { - // The ID of the CA to use for signing. The default - // CA ID is "local". The CA can be configured with the - // `pki` app. - CA string `json:"ca,omitempty"` - - // The validity period of certificates. - Lifetime caddy.Duration `json:"lifetime,omitempty"` - - // If true, the root will be the issuer instead of - // the intermediate. This is NOT recommended and should - // only be used when devices/clients do not properly - // validate certificate chains. - SignWithRoot bool `json:"sign_with_root,omitempty"` - - ca *caddypki.CA - logger *zap.Logger -} - -// CaddyModule returns the Caddy module information. -func (InternalIssuer) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.issuance.internal", - New: func() caddy.Module { return new(InternalIssuer) }, - } -} - -// Provision sets up the issuer. -func (iss *InternalIssuer) Provision(ctx caddy.Context) error { - iss.logger = ctx.Logger(iss) - - // get a reference to the configured CA - appModule, err := ctx.App("pki") - if err != nil { - return err - } - pkiApp := appModule.(*caddypki.PKI) - if iss.CA == "" { - iss.CA = caddypki.DefaultCAID - } - ca, ok := pkiApp.CAs[iss.CA] - if !ok { - return fmt.Errorf("no certificate authority configured with id: %s", iss.CA) - } - iss.ca = ca - - // set any other default values - if iss.Lifetime == 0 { - iss.Lifetime = caddy.Duration(defaultInternalCertLifetime) - } - - return nil -} - -// IssuerKey returns the unique issuer key for the -// confgured CA endpoint. -func (iss InternalIssuer) IssuerKey() string { - return iss.ca.ID -} - -// Issue issues a certificate to satisfy the CSR. -func (iss InternalIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) { - // prepare the signing authority - authCfg := caddypki.AuthorityConfig{ - SignWithRoot: iss.SignWithRoot, - } - auth, err := iss.ca.NewAuthority(authCfg) - if err != nil { - return nil, err - } - - // get the cert (public key) that will be used for signing - var issuerCert *x509.Certificate - if iss.SignWithRoot { - issuerCert = iss.ca.RootCertificate() - } else { - issuerCert = iss.ca.IntermediateCertificate() - } - - // ensure issued certificate does not expire later than its issuer - lifetime := time.Duration(iss.Lifetime) - if time.Now().Add(lifetime).After(issuerCert.NotAfter) { - lifetime = time.Until(issuerCert.NotAfter) - iss.logger.Warn("cert lifetime would exceed issuer NotAfter, clamping lifetime", - zap.Duration("orig_lifetime", time.Duration(iss.Lifetime)), - zap.Duration("lifetime", lifetime), - zap.Time("not_after", issuerCert.NotAfter), - ) - } - - certChain, err := auth.Sign(csr, provisioner.SignOptions{}, customCertLifetime(caddy.Duration(lifetime))) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - for _, cert := range certChain { - err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) - if err != nil { - return nil, err - } - } - - return &certmagic.IssuedCertificate{ - Certificate: buf.Bytes(), - }, nil -} - -// UnmarshalCaddyfile deserializes Caddyfile tokens into iss. -// -// ... internal { -// ca -// } -// -func (iss *InternalIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - for d.NextBlock(0) { - switch d.Val() { - case "ca": - if !d.AllArgs(&iss.CA) { - return d.ArgErr() - } - } - } - } - return nil -} - -// customCertLifetime allows us to customize certificates that are issued -// by Smallstep libs, particularly the NotBefore & NotAfter dates. -type customCertLifetime time.Duration - -func (d customCertLifetime) Modify(cert *x509.Certificate, _ provisioner.SignOptions) error { - cert.NotBefore = time.Now() - cert.NotAfter = cert.NotBefore.Add(time.Duration(d)) - return nil -} - -const defaultInternalCertLifetime = 12 * time.Hour - -// Interface guards -var ( - _ caddy.Provisioner = (*InternalIssuer)(nil) - _ certmagic.Issuer = (*InternalIssuer)(nil) - _ provisioner.CertificateModifier = (*customCertLifetime)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/matchers.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/matchers.go deleted file mode 100644 index aee0e726..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/matchers.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "fmt" - "net" - "strings" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/certmagic" - "go.uber.org/zap" -) - -func init() { - caddy.RegisterModule(MatchServerName{}) - caddy.RegisterModule(MatchRemoteIP{}) -} - -// MatchServerName matches based on SNI. Names in -// this list may use left-most-label wildcards, -// similar to wildcard certificates. -type MatchServerName []string - -// CaddyModule returns the Caddy module information. -func (MatchServerName) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.handshake_match.sni", - New: func() caddy.Module { return new(MatchServerName) }, - } -} - -// Match matches hello based on SNI. -func (m MatchServerName) Match(hello *tls.ClientHelloInfo) bool { - for _, name := range m { - if certmagic.MatchWildcard(hello.ServerName, name) { - return true - } - } - return false -} - -// MatchRemoteIP matches based on the remote IP of the -// connection. Specific IPs or CIDR ranges can be specified. -// -// Note that IPs can sometimes be spoofed, so do not rely -// on this as a replacement for actual authentication. -type MatchRemoteIP struct { - // The IPs or CIDR ranges to match. - Ranges []string `json:"ranges,omitempty"` - - // The IPs or CIDR ranges to *NOT* match. - NotRanges []string `json:"not_ranges,omitempty"` - - cidrs []*net.IPNet - notCidrs []*net.IPNet - logger *zap.Logger -} - -// CaddyModule returns the Caddy module information. -func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.handshake_match.remote_ip", - New: func() caddy.Module { return new(MatchRemoteIP) }, - } -} - -// Provision parses m's IP ranges, either from IP or CIDR expressions. -func (m *MatchRemoteIP) Provision(ctx caddy.Context) error { - m.logger = ctx.Logger(m) - for _, str := range m.Ranges { - cidrs, err := m.parseIPRange(str) - if err != nil { - return err - } - m.cidrs = cidrs - } - for _, str := range m.NotRanges { - cidrs, err := m.parseIPRange(str) - if err != nil { - return err - } - m.notCidrs = cidrs - } - return nil -} - -// Match matches hello based on the connection's remote IP. -func (m MatchRemoteIP) Match(hello *tls.ClientHelloInfo) bool { - remoteAddr := hello.Conn.RemoteAddr().String() - ipStr, _, err := net.SplitHostPort(remoteAddr) - if err != nil { - ipStr = remoteAddr // weird; maybe no port? - } - ip := net.ParseIP(ipStr) - if ip == nil { - m.logger.Error("invalid client IP addresss", zap.String("ip", ipStr)) - return false - } - return (len(m.cidrs) == 0 || m.matches(ip, m.cidrs)) && - (len(m.notCidrs) == 0 || !m.matches(ip, m.notCidrs)) -} - -func (MatchRemoteIP) parseIPRange(str string) ([]*net.IPNet, error) { - var cidrs []*net.IPNet - if strings.Contains(str, "/") { - _, ipNet, err := net.ParseCIDR(str) - if err != nil { - return nil, fmt.Errorf("parsing CIDR expression: %v", err) - } - cidrs = append(cidrs, ipNet) - } else { - ip := net.ParseIP(str) - if ip == nil { - return nil, fmt.Errorf("invalid IP address: %s", str) - } - mask := len(ip) * 8 - cidrs = append(cidrs, &net.IPNet{ - IP: ip, - Mask: net.CIDRMask(mask, mask), - }) - } - return cidrs, nil -} - -func (MatchRemoteIP) matches(ip net.IP, ranges []*net.IPNet) bool { - for _, ipRange := range ranges { - if ipRange.Contains(ip) { - return true - } - } - return false -} - -// Interface guards -var ( - _ ConnectionMatcher = (*MatchServerName)(nil) - _ ConnectionMatcher = (*MatchRemoteIP)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/pemloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/pemloader.go deleted file mode 100644 index 61b08851..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/pemloader.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "fmt" - - "github.com/caddyserver/caddy/v2" -) - -func init() { - caddy.RegisterModule(PEMLoader{}) -} - -// PEMLoader loads certificates and their associated keys by -// decoding their PEM blocks directly. This has the advantage -// of not needing to store them on disk at all. -type PEMLoader []CertKeyPEMPair - -// CaddyModule returns the Caddy module information. -func (PEMLoader) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.certificates.load_pem", - New: func() caddy.Module { return new(PEMLoader) }, - } -} - -// CertKeyPEMPair pairs certificate and key PEM blocks. -type CertKeyPEMPair struct { - // The certificate (public key) in PEM format. - CertificatePEM string `json:"certificate"` - - // The private key in PEM format. - KeyPEM string `json:"key"` - - // Arbitrary values to associate with this certificate. - // Can be useful when you want to select a particular - // certificate when there may be multiple valid candidates. - Tags []string `json:"tags,omitempty"` -} - -// LoadCertificates returns the certificates contained in pl. -func (pl PEMLoader) LoadCertificates() ([]Certificate, error) { - certs := make([]Certificate, 0, len(pl)) - for i, pair := range pl { - cert, err := tls.X509KeyPair([]byte(pair.CertificatePEM), []byte(pair.KeyPEM)) - if err != nil { - return nil, fmt.Errorf("PEM pair %d: %v", i, err) - } - certs = append(certs, Certificate{ - Certificate: cert, - Tags: pair.Tags, - }) - } - return certs, nil -} - -// Interface guard -var _ CertificateLoader = (PEMLoader)(nil) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/sessiontickets.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/sessiontickets.go deleted file mode 100644 index bfc5628a..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/sessiontickets.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/rand" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "log" - "runtime/debug" - "sync" - "time" - - "github.com/caddyserver/caddy/v2" -) - -// SessionTicketService configures and manages TLS session tickets. -type SessionTicketService struct { - // KeySource is the method by which Caddy produces or obtains - // TLS session ticket keys (STEKs). By default, Caddy generates - // them internally using a secure pseudorandom source. - KeySource json.RawMessage `json:"key_source,omitempty" caddy:"namespace=tls.stek inline_key=provider"` - - // How often Caddy rotates STEKs. Default: 12h. - RotationInterval caddy.Duration `json:"rotation_interval,omitempty"` - - // The maximum number of keys to keep in rotation. Default: 4. - MaxKeys int `json:"max_keys,omitempty"` - - // Disables STEK rotation. - DisableRotation bool `json:"disable_rotation,omitempty"` - - // Disables TLS session resumption by tickets. - Disabled bool `json:"disabled,omitempty"` - - keySource STEKProvider - configs map[*tls.Config]struct{} - stopChan chan struct{} - currentKeys [][32]byte - mu *sync.Mutex -} - -func (s *SessionTicketService) provision(ctx caddy.Context) error { - s.configs = make(map[*tls.Config]struct{}) - s.mu = new(sync.Mutex) - - // establish sane defaults - if s.RotationInterval == 0 { - s.RotationInterval = caddy.Duration(defaultSTEKRotationInterval) - } - if s.MaxKeys <= 0 { - s.MaxKeys = defaultMaxSTEKs - } - if s.KeySource == nil { - s.KeySource = json.RawMessage(`{"provider":"standard"}`) - } - - // load the STEK module, which will provide keys - val, err := ctx.LoadModule(s, "KeySource") - if err != nil { - return fmt.Errorf("loading TLS session ticket ephemeral keys provider module: %s", err) - } - s.keySource = val.(STEKProvider) - - // if session tickets or just rotation are - // disabled, no need to start service - if s.Disabled || s.DisableRotation { - return nil - } - - // start the STEK module; this ensures we have - // a starting key before any config needs one - return s.start() -} - -// start loads the starting STEKs and spawns a goroutine -// which loops to rotate the STEKs, which continues until -// stop() is called. If start() was already called, this -// is a no-op. -func (s *SessionTicketService) start() error { - if s.stopChan != nil { - return nil - } - s.stopChan = make(chan struct{}) - - // initializing the key source gives us our - // initial key(s) to start with; if successful, - // we need to be sure to call Next() so that - // the key source can know when it is done - initialKeys, err := s.keySource.Initialize(s) - if err != nil { - return fmt.Errorf("setting STEK module configuration: %v", err) - } - - s.mu.Lock() - s.currentKeys = initialKeys - s.mu.Unlock() - - // keep the keys rotated - go s.stayUpdated() - - return nil -} - -// stayUpdated is a blocking function which rotates -// the keys whenever new ones are sent. It reads -// from keysChan until s.stop() is called. -func (s *SessionTicketService) stayUpdated() { - defer func() { - if err := recover(); err != nil { - log.Printf("[PANIC] session ticket service: %v\n%s", err, debug.Stack()) - } - }() - - // this call is essential when Initialize() - // returns without error, because the stop - // channel is the only way the key source - // will know when to clean up - keysChan := s.keySource.Next(s.stopChan) - - for { - select { - case newKeys := <-keysChan: - s.mu.Lock() - s.currentKeys = newKeys - configs := s.configs - s.mu.Unlock() - for cfg := range configs { - cfg.SetSessionTicketKeys(newKeys) - } - case <-s.stopChan: - return - } - } -} - -// stop terminates the key rotation goroutine. -func (s *SessionTicketService) stop() { - if s.stopChan != nil { - close(s.stopChan) - } -} - -// register sets the session ticket keys on cfg -// and keeps them updated. Any values registered -// must be unregistered, or they will not be -// garbage-collected. s.start() must have been -// called first. If session tickets are disabled -// or if ticket key rotation is disabled, this -// function is a no-op. -func (s *SessionTicketService) register(cfg *tls.Config) { - if s.Disabled || s.DisableRotation { - return - } - s.mu.Lock() - cfg.SetSessionTicketKeys(s.currentKeys) - s.configs[cfg] = struct{}{} - s.mu.Unlock() -} - -// unregister stops session key management on cfg and -// removes the internal stored reference to cfg. If -// session tickets are disabled or if ticket key rotation -// is disabled, this function is a no-op. -func (s *SessionTicketService) unregister(cfg *tls.Config) { - if s.Disabled || s.DisableRotation { - return - } - s.mu.Lock() - delete(s.configs, cfg) - s.mu.Unlock() -} - -// RotateSTEKs rotates the keys in keys by producing a new key and eliding -// the oldest one. The new slice of keys is returned. -func (s SessionTicketService) RotateSTEKs(keys [][32]byte) ([][32]byte, error) { - // produce a new key - newKey, err := s.generateSTEK() - if err != nil { - return nil, fmt.Errorf("generating STEK: %v", err) - } - - // we need to prepend this new key to the list of - // keys so that it is preferred, but we need to be - // careful that we do not grow the slice larger - // than MaxKeys, otherwise we'll be storing one - // more key in memory than we expect; so be sure - // that the slice does not grow beyond the limit - // even for a brief period of time, since there's - // no guarantee when that extra allocation will - // be overwritten; this is why we first trim the - // length to one less the max, THEN prepend the - // new key - if len(keys) >= s.MaxKeys { - keys[len(keys)-1] = [32]byte{} // zero-out memory of oldest key - keys = keys[:s.MaxKeys-1] // trim length of slice - } - keys = append([][32]byte{newKey}, keys...) // prepend new key - - return keys, nil -} - -// generateSTEK generates key material suitable for use as a -// session ticket ephemeral key. -func (s *SessionTicketService) generateSTEK() ([32]byte, error) { - var newTicketKey [32]byte - _, err := io.ReadFull(rand.Reader, newTicketKey[:]) - return newTicketKey, err -} - -// STEKProvider is a type that can provide session ticket ephemeral -// keys (STEKs). -type STEKProvider interface { - // Initialize provides the STEK configuration to the STEK - // module so that it can obtain and manage keys accordingly. - // It returns the initial key(s) to use. Implementations can - // rely on Next() being called if Initialize() returns - // without error, so that it may know when it is done. - Initialize(config *SessionTicketService) ([][32]byte, error) - - // Next returns the channel through which the next session - // ticket keys will be transmitted until doneChan is closed. - // Keys should be sent on keysChan as they are updated. - // When doneChan is closed, any resources allocated in - // Initialize() must be cleaned up. - Next(doneChan <-chan struct{}) (keysChan <-chan [][32]byte) -} - -const ( - defaultSTEKRotationInterval = 12 * time.Hour - defaultMaxSTEKs = 4 -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/storageloader.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/storageloader.go deleted file mode 100644 index e78996f1..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/storageloader.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "fmt" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/certmagic" -) - -func init() { - caddy.RegisterModule(StorageLoader{}) -} - -// StorageLoader loads certificates and their associated keys -// from the globally configured storage module. -type StorageLoader struct { - // A list of pairs of certificate and key file names along with their - // encoding format so that they can be loaded from storage. - Pairs []CertKeyFilePair `json:"pairs,omitempty"` - - // Reference to the globally configured storage module. - storage certmagic.Storage -} - -// CaddyModule returns the Caddy module information. -func (StorageLoader) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.certificates.load_storage", - New: func() caddy.Module { return new(StorageLoader) }, - } -} - -// Provision loads the storage module for sl. -func (sl *StorageLoader) Provision(ctx caddy.Context) error { - sl.storage = ctx.Storage() - return nil -} - -// LoadCertificates returns the certificates to be loaded by sl. -func (sl StorageLoader) LoadCertificates() ([]Certificate, error) { - certs := make([]Certificate, 0, len(sl.Pairs)) - for _, pair := range sl.Pairs { - certData, err := sl.storage.Load(pair.Certificate) - if err != nil { - return nil, err - } - keyData, err := sl.storage.Load(pair.Key) - if err != nil { - return nil, err - } - - var cert tls.Certificate - switch pair.Format { - case "": - fallthrough - case "pem": - cert, err = tls.X509KeyPair(certData, keyData) - default: - return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format) - } - if err != nil { - return nil, err - } - - certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags}) - } - return certs, nil -} - -// Interface guard -var ( - _ CertificateLoader = (*StorageLoader)(nil) - _ caddy.Provisioner = (*StorageLoader)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/tls.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/tls.go deleted file mode 100644 index a93183e6..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/tls.go +++ /dev/null @@ -1,571 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "log" - "net/http" - "runtime/debug" - "sync" - "time" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/certmagic" - "go.uber.org/zap" -) - -func init() { - caddy.RegisterModule(TLS{}) - caddy.RegisterModule(AutomateLoader{}) -} - -// TLS provides TLS facilities including certificate -// loading and management, client auth, and more. -type TLS struct { - // Caches certificates in memory for quick use during - // TLS handshakes. Each key is the name of a certificate - // loader module. All loaded certificates get pooled - // into the same cache and may be used to complete TLS - // handshakes for the relevant server names (SNI). - // Certificates loaded manually (anything other than - // "automate") are not automatically managed and will - // have to be refreshed manually before they expire. - CertificatesRaw caddy.ModuleMap `json:"certificates,omitempty" caddy:"namespace=tls.certificates"` - - // Configures the automation of certificate management. - Automation *AutomationConfig `json:"automation,omitempty"` - - // Configures session ticket ephemeral keys (STEKs). - SessionTickets *SessionTicketService `json:"session_tickets,omitempty"` - - // Configures the in-memory certificate cache. - Cache *CertCacheOptions `json:"cache,omitempty"` - - // Disables OCSP stapling for manually-managed certificates only. - // To configure OCSP stapling for automated certificates, use an - // automation policy instead. - // - // Disabling OCSP stapling puts clients at greater risk, reduces their - // privacy, and usually lowers client performance. It is NOT recommended - // to disable this unless you are able to justify the costs. - // EXPERIMENTAL. Subject to change. - DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"` - - certificateLoaders []CertificateLoader - automateNames []string - certCache *certmagic.Cache - ctx caddy.Context - storageCleanTicker *time.Ticker - storageCleanStop chan struct{} - logger *zap.Logger -} - -// CaddyModule returns the Caddy module information. -func (TLS) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls", - New: func() caddy.Module { return new(TLS) }, - } -} - -// Provision sets up the configuration for the TLS app. -func (t *TLS) Provision(ctx caddy.Context) error { - t.ctx = ctx - t.logger = ctx.Logger(t) - repl := caddy.NewReplacer() - - // set up a new certificate cache; this (re)loads all certificates - cacheOpts := certmagic.CacheOptions{ - GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) { - return t.getConfigForName(cert.Names[0]), nil - }, - Logger: t.logger.Named("cache"), - } - if t.Automation != nil { - cacheOpts.OCSPCheckInterval = time.Duration(t.Automation.OCSPCheckInterval) - cacheOpts.RenewCheckInterval = time.Duration(t.Automation.RenewCheckInterval) - } - if t.Cache != nil { - cacheOpts.Capacity = t.Cache.Capacity - } - if cacheOpts.Capacity <= 0 { - cacheOpts.Capacity = 10000 - } - t.certCache = certmagic.NewCache(cacheOpts) - - // certificate loaders - val, err := ctx.LoadModule(t, "CertificatesRaw") - if err != nil { - return fmt.Errorf("loading certificate loader modules: %s", err) - } - for modName, modIface := range val.(map[string]interface{}) { - if modName == "automate" { - // special case; these will be loaded in later using our automation facilities, - // which we want to avoid doing during provisioning - if automateNames, ok := modIface.(*AutomateLoader); ok && automateNames != nil { - t.automateNames = []string(*automateNames) - } else { - return fmt.Errorf("loading certificates with 'automate' requires array of strings, got: %T", modIface) - } - continue - } - t.certificateLoaders = append(t.certificateLoaders, modIface.(CertificateLoader)) - } - - // automation/management policies - if t.Automation == nil { - t.Automation = new(AutomationConfig) - } - t.Automation.defaultPublicAutomationPolicy = new(AutomationPolicy) - err = t.Automation.defaultPublicAutomationPolicy.Provision(t) - if err != nil { - return fmt.Errorf("provisioning default public automation policy: %v", err) - } - for _, n := range t.automateNames { - // if any names specified by the "automate" loader do not qualify for a public - // certificate, we should initialize a default internal automation policy - // (but we don't want to do this unnecessarily, since it may prompt for password!) - if certmagic.SubjectQualifiesForPublicCert(n) { - continue - } - t.Automation.defaultInternalAutomationPolicy = &AutomationPolicy{ - IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)}, - } - err = t.Automation.defaultInternalAutomationPolicy.Provision(t) - if err != nil { - return fmt.Errorf("provisioning default internal automation policy: %v", err) - } - break - } - for i, ap := range t.Automation.Policies { - err := ap.Provision(t) - if err != nil { - return fmt.Errorf("provisioning automation policy %d: %v", i, err) - } - } - - // session ticket ephemeral keys (STEK) service and provider - if t.SessionTickets != nil { - err := t.SessionTickets.provision(ctx) - if err != nil { - return fmt.Errorf("provisioning session tickets configuration: %v", err) - } - } - - // on-demand rate limiting - if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.RateLimit != nil { - onDemandRateLimiter.SetMaxEvents(t.Automation.OnDemand.RateLimit.Burst) - onDemandRateLimiter.SetWindow(time.Duration(t.Automation.OnDemand.RateLimit.Interval)) - } else { - // remove any existing rate limiter - onDemandRateLimiter.SetMaxEvents(0) - onDemandRateLimiter.SetWindow(0) - } - - // run replacer on ask URL (for environment variables) - if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.Ask != "" { - t.Automation.OnDemand.Ask = repl.ReplaceAll(t.Automation.OnDemand.Ask, "") - } - - // load manual/static (unmanaged) certificates - we do this in - // provision so that other apps (such as http) can know which - // certificates have been manually loaded, and also so that - // commands like validate can be a better test - magic := certmagic.New(t.certCache, certmagic.Config{ - Storage: ctx.Storage(), - Logger: t.logger, - OCSP: certmagic.OCSPConfig{ - DisableStapling: t.DisableOCSPStapling, - }, - }) - for _, loader := range t.certificateLoaders { - certs, err := loader.LoadCertificates() - if err != nil { - return fmt.Errorf("loading certificates: %v", err) - } - for _, cert := range certs { - err := magic.CacheUnmanagedTLSCertificate(cert.Certificate, cert.Tags) - if err != nil { - return fmt.Errorf("caching unmanaged certificate: %v", err) - } - } - } - - return nil -} - -// Validate validates t's configuration. -func (t *TLS) Validate() error { - if t.Automation != nil { - // ensure that host aren't repeated; since only the first - // automation policy is used, repeating a host in the lists - // isn't useful and is probably a mistake; same for two - // catch-all/default policies - var hasDefault bool - hostSet := make(map[string]int) - for i, ap := range t.Automation.Policies { - if len(ap.Subjects) == 0 { - if hasDefault { - return fmt.Errorf("automation policy %d is the second policy that acts as default/catch-all, but will never be used", i) - } - hasDefault = true - } - for _, h := range ap.Subjects { - if first, ok := hostSet[h]; ok { - return fmt.Errorf("automation policy %d: cannot apply more than one automation policy to host: %s (first match in policy %d)", i, h, first) - } - hostSet[h] = i - } - } - } - if t.Cache != nil { - if t.Cache.Capacity < 0 { - return fmt.Errorf("cache capacity must be >= 0") - } - } - return nil -} - -// Start activates the TLS module. -func (t *TLS) Start() error { - // warn if on-demand TLS is enabled but no restrictions are in place - if t.Automation.OnDemand == nil || - (t.Automation.OnDemand.Ask == "" && t.Automation.OnDemand.RateLimit == nil) { - for _, ap := range t.Automation.Policies { - if ap.OnDemand { - t.logger.Warn("YOUR SERVER MAY BE VULNERABLE TO ABUSE: on-demand TLS is enabled, but no protections are in place", - zap.String("docs", "https://caddyserver.com/docs/automatic-https#on-demand-tls")) - break - } - } - } - - // now that we are running, and all manual certificates have - // been loaded, time to load the automated/managed certificates - err := t.Manage(t.automateNames) - if err != nil { - return fmt.Errorf("automate: managing %v: %v", t.automateNames, err) - } - - t.keepStorageClean() - - return nil -} - -// Stop stops the TLS module and cleans up any allocations. -func (t *TLS) Stop() error { - // stop the storage cleaner goroutine and ticker - if t.storageCleanStop != nil { - close(t.storageCleanStop) - } - if t.storageCleanTicker != nil { - t.storageCleanTicker.Stop() - } - return nil -} - -// Cleanup frees up resources allocated during Provision. -func (t *TLS) Cleanup() error { - // stop the certificate cache - if t.certCache != nil { - t.certCache.Stop() - } - - // stop the session ticket rotation goroutine - if t.SessionTickets != nil { - t.SessionTickets.stop() - } - - return nil -} - -// Manage immediately begins managing names according to the -// matching automation policy. -func (t *TLS) Manage(names []string) error { - // for a large number of names, we can be more memory-efficient - // by making only one certmagic.Config for all the names that - // use that config, rather than calling ManageAsync once for - // every name; so first, bin names by AutomationPolicy - policyToNames := make(map[*AutomationPolicy][]string) - for _, name := range names { - ap := t.getAutomationPolicyForName(name) - policyToNames[ap] = append(policyToNames[ap], name) - } - - // now that names are grouped by policy, we can simply make one - // certmagic.Config for each (potentially large) group of names - // and call ManageAsync just once for the whole batch - for ap, names := range policyToNames { - err := ap.magic.ManageAsync(t.ctx.Context, names) - if err != nil { - return fmt.Errorf("automate: manage %v: %v", names, err) - } - } - - return nil -} - -// HandleHTTPChallenge ensures that the HTTP challenge is handled for the -// certificate named by r.Host, if it is an HTTP challenge request. It -// requires that the automation policy for r.Host has an issuer of type -// *certmagic.ACMEManager, or one that is ACME-enabled (GetACMEIssuer()). -func (t *TLS) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool { - // no-op if it's not an ACME challenge request - if !certmagic.LooksLikeHTTPChallenge(r) { - return false - } - - // try all the issuers until we find the one that initiated the challenge - ap := t.getAutomationPolicyForName(r.Host) - type acmeCapable interface{ GetACMEIssuer() *ACMEIssuer } - for _, iss := range ap.magic.Issuers { - if am, ok := iss.(acmeCapable); ok { - iss := am.GetACMEIssuer() - if certmagic.NewACMEManager(iss.magic, iss.template).HandleHTTPChallenge(w, r) { - return true - } - } - } - - // it's possible another server in this process initiated the challenge; - // users have requested that Caddy only handle HTTP challenges it initiated, - // so that users can proxy the others through to their backends; but we - // might not have an automation policy for all identifiers that are trying - // to get certificates (e.g. the admin endpoint), so we do this manual check - if challenge, ok := certmagic.GetACMEChallenge(r.Host); ok { - return certmagic.SolveHTTPChallenge(t.logger, w, r, challenge.Challenge) - } - - return false -} - -// AddAutomationPolicy provisions and adds ap to the list of the app's -// automation policies. If an existing automation policy exists that has -// fewer hosts in its list than ap does, ap will be inserted before that -// other policy (this helps ensure that ap will be prioritized/chosen -// over, say, a catch-all policy). -func (t *TLS) AddAutomationPolicy(ap *AutomationPolicy) error { - if t.Automation == nil { - t.Automation = new(AutomationConfig) - } - err := ap.Provision(t) - if err != nil { - return err - } - // sort new automation policies just before any other which is a superset - // of this one; if we find an existing policy that covers every subject in - // ap but less specifically (e.g. a catch-all policy, or one with wildcards - // or with fewer subjects), insert ap just before it, otherwise ap would - // never be used because the first matching policy is more general - for i, existing := range t.Automation.Policies { - // first see if existing is superset of ap for all names - var otherIsSuperset bool - outer: - for _, thisSubj := range ap.Subjects { - for _, otherSubj := range existing.Subjects { - if certmagic.MatchWildcard(thisSubj, otherSubj) { - otherIsSuperset = true - break outer - } - } - } - // if existing AP is a superset or if it contains fewer names (i.e. is - // more general), then new AP is more specific, so insert before it - if otherIsSuperset || len(existing.Subjects) < len(ap.Subjects) { - t.Automation.Policies = append(t.Automation.Policies[:i], - append([]*AutomationPolicy{ap}, t.Automation.Policies[i:]...)...) - return nil - } - } - // otherwise just append the new one - t.Automation.Policies = append(t.Automation.Policies, ap) - return nil -} - -func (t *TLS) getConfigForName(name string) *certmagic.Config { - ap := t.getAutomationPolicyForName(name) - return ap.magic -} - -// getAutomationPolicyForName returns the first matching automation policy -// for the given subject name. If no matching policy can be found, the -// default policy is used, depending on whether the name qualifies for a -// public certificate or not. -func (t *TLS) getAutomationPolicyForName(name string) *AutomationPolicy { - for _, ap := range t.Automation.Policies { - if len(ap.Subjects) == 0 { - return ap // no host filter is an automatic match - } - for _, h := range ap.Subjects { - if certmagic.MatchWildcard(name, h) { - return ap - } - } - } - if certmagic.SubjectQualifiesForPublicCert(name) || t.Automation.defaultInternalAutomationPolicy == nil { - return t.Automation.defaultPublicAutomationPolicy - } - return t.Automation.defaultInternalAutomationPolicy -} - -// AllMatchingCertificates returns the list of all certificates in -// the cache which could be used to satisfy the given SAN. -func (t *TLS) AllMatchingCertificates(san string) []certmagic.Certificate { - return t.certCache.AllMatchingCertificates(san) -} - -// keepStorageClean starts a goroutine that immediately cleans up all -// known storage units if it was not recently done, and then runs the -// operation at every tick from t.storageCleanTicker. -func (t *TLS) keepStorageClean() { - t.storageCleanTicker = time.NewTicker(t.storageCleanInterval()) - t.storageCleanStop = make(chan struct{}) - go func() { - defer func() { - if err := recover(); err != nil { - log.Printf("[PANIC] storage cleaner: %v\n%s", err, debug.Stack()) - } - }() - t.cleanStorageUnits() - for { - select { - case <-t.storageCleanStop: - return - case <-t.storageCleanTicker.C: - t.cleanStorageUnits() - } - } - }() -} - -func (t *TLS) cleanStorageUnits() { - storageCleanMu.Lock() - defer storageCleanMu.Unlock() - - // If storage was cleaned recently, don't do it again for now. Although the ticker - // drops missed ticks for us, config reloads discard the old ticker and replace it - // with a new one, possibly invoking a cleaning to happen again too soon. - // (We divide the interval by 2 because the actual cleaning takes non-zero time, - // and we don't want to skip cleanings if we don't have to; whereas if a cleaning - // took the entire interval, we'd probably want to skip the next one so we aren't - // constantly cleaning. This allows cleanings to take up to half the interval's - // duration before we decide to skip the next one.) - if !storageClean.IsZero() && time.Since(storageClean) < t.storageCleanInterval()/2 { - return - } - - // mark when storage cleaning was last initiated - storageClean = time.Now() - - options := certmagic.CleanStorageOptions{ - OCSPStaples: true, - ExpiredCerts: true, - ExpiredCertGracePeriod: 24 * time.Hour * 14, - } - - // avoid cleaning same storage more than once per cleaning cycle - storagesCleaned := make(map[string]struct{}) - - // start with the default/global storage - storage := t.ctx.Storage() - storageStr := fmt.Sprintf("%v", storage) - t.logger.Info("cleaning storage unit", zap.String("description", storageStr)) - certmagic.CleanStorage(t.ctx, storage, options) - storagesCleaned[storageStr] = struct{}{} - - // then clean each storage defined in ACME automation policies - if t.Automation != nil { - for _, ap := range t.Automation.Policies { - if ap.storage == nil { - continue - } - storageStr := fmt.Sprintf("%v", ap.storage) - if _, ok := storagesCleaned[storageStr]; ok { - continue - } - t.logger.Info("cleaning storage unit", zap.String("description", storageStr)) - certmagic.CleanStorage(t.ctx, ap.storage, options) - storagesCleaned[storageStr] = struct{}{} - } - } - - t.logger.Info("finished cleaning storage units") -} - -func (t *TLS) storageCleanInterval() time.Duration { - if t.Automation != nil && t.Automation.StorageCleanInterval > 0 { - return time.Duration(t.Automation.StorageCleanInterval) - } - return defaultStorageCleanInterval -} - -// CertificateLoader is a type that can load certificates. -// Certificates can optionally be associated with tags. -type CertificateLoader interface { - LoadCertificates() ([]Certificate, error) -} - -// Certificate is a TLS certificate, optionally -// associated with arbitrary tags. -type Certificate struct { - tls.Certificate - Tags []string -} - -// AutomateLoader will automatically manage certificates for the names -// in the list, including obtaining and renewing certificates. Automated -// certificates are managed according to their matching automation policy, -// configured elsewhere in this app. -// -// This is a no-op certificate loader module that is treated as a special -// case: it uses this app's automation features to load certificates for the -// list of hostnames, rather than loading certificates manually. -type AutomateLoader []string - -// CaddyModule returns the Caddy module information. -func (AutomateLoader) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.certificates.automate", - New: func() caddy.Module { return new(AutomateLoader) }, - } -} - -// CertCacheOptions configures the certificate cache. -type CertCacheOptions struct { - // Maximum number of certificates to allow in the - // cache. If reached, certificates will be randomly - // evicted to make room for new ones. Default: 0 - // (no limit). - Capacity int `json:"capacity,omitempty"` -} - -// Variables related to storage cleaning. -var ( - defaultStorageCleanInterval = 24 * time.Hour - - storageClean time.Time - storageCleanMu sync.Mutex -) - -// Interface guards -var ( - _ caddy.App = (*TLS)(nil) - _ caddy.Provisioner = (*TLS)(nil) - _ caddy.Validator = (*TLS)(nil) - _ caddy.CleanerUpper = (*TLS)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/values.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/values.go deleted file mode 100644 index 4e8c1adc..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/values.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - - "github.com/caddyserver/certmagic" - "github.com/klauspost/cpuid/v2" -) - -// CipherSuiteNameSupported returns true if name is -// a supported cipher suite. -func CipherSuiteNameSupported(name string) bool { - return CipherSuiteID(name) != 0 -} - -// CipherSuiteID returns the ID of the cipher suite associated with -// the given name, or 0 if the name is not recognized/supported. -func CipherSuiteID(name string) uint16 { - for _, cs := range SupportedCipherSuites() { - if cs.Name == name { - return cs.ID - } - } - return 0 -} - -// SupportedCipherSuites returns a list of all the cipher suites -// Caddy supports. The list is NOT ordered by security preference. -func SupportedCipherSuites() []*tls.CipherSuite { - return tls.CipherSuites() -} - -// defaultCipherSuites is the ordered list of all the cipher -// suites we want to support by default, assuming AES-NI -// (hardware acceleration for AES). -var defaultCipherSuitesWithAESNI = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, -} - -// defaultCipherSuites is the ordered list of all the cipher -// suites we want to support by default, assuming lack of -// AES-NI (NO hardware acceleration for AES). -var defaultCipherSuitesWithoutAESNI = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - -// getOptimalDefaultCipherSuites returns an appropriate cipher -// suite to use depending on the hardware support for AES. -// -// See https://github.com/caddyserver/caddy/issues/1674 -func getOptimalDefaultCipherSuites() []uint16 { - if cpuid.CPU.Supports(cpuid.AESNI) { - return defaultCipherSuitesWithAESNI - } - return defaultCipherSuitesWithoutAESNI -} - -// SupportedCurves is the unordered map of supported curves. -// https://golang.org/pkg/crypto/tls/#CurveID -var SupportedCurves = map[string]tls.CurveID{ - "x25519": tls.X25519, - "secp256r1": tls.CurveP256, - "secp384r1": tls.CurveP384, - "secp521r1": tls.CurveP521, -} - -// supportedCertKeyTypes is all the key types that are supported -// for certificates that are obtained through ACME. -var supportedCertKeyTypes = map[string]certmagic.KeyType{ - "rsa2048": certmagic.RSA2048, - "rsa4096": certmagic.RSA4096, - "p256": certmagic.P256, - "p384": certmagic.P384, - "ed25519": certmagic.ED25519, -} - -// defaultCurves is the list of only the curves we want to use -// by default, in descending order of preference. -// -// This list should only include curves which are fast by design -// (e.g. X25519) and those for which an optimized assembly -// implementation exists (e.g. P256). The latter ones can be -// found here: -// https://github.com/golang/go/tree/master/src/crypto/elliptic -var defaultCurves = []tls.CurveID{ - tls.X25519, - tls.CurveP256, -} - -// SupportedProtocols is a map of supported protocols. -var SupportedProtocols = map[string]uint16{ - "tls1.2": tls.VersionTLS12, - "tls1.3": tls.VersionTLS13, -} - -// unsupportedProtocols is a map of unsupported protocols. -// Used for logging only, not enforcement. -var unsupportedProtocols = map[string]uint16{ - //nolint:staticcheck - "ssl3.0": tls.VersionSSL30, - "tls1.0": tls.VersionTLS10, - "tls1.1": tls.VersionTLS11, -} - -// publicKeyAlgorithms is the map of supported public key algorithms. -var publicKeyAlgorithms = map[string]x509.PublicKeyAlgorithm{ - "rsa": x509.RSA, - "dsa": x509.DSA, - "ecdsa": x509.ECDSA, -} - -// ProtocolName returns the standard name for the passed protocol version ID -// (e.g. "TLS1.3") or a fallback representation of the ID value if the version -// is not supported. -func ProtocolName(id uint16) string { - for k, v := range SupportedProtocols { - if v == id { - return k - } - } - - for k, v := range unsupportedProtocols { - if v == id { - return k - } - } - - return fmt.Sprintf("0x%04x", id) -} diff --git a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/zerosslissuer.go b/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/zerosslissuer.go deleted file mode 100644 index a8830a0b..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/modules/caddytls/zerosslissuer.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "context" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/caddyserver/caddy/v2" - "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" - "github.com/caddyserver/certmagic" - "github.com/mholt/acmez/acme" - "go.uber.org/zap" -) - -func init() { - caddy.RegisterModule(new(ZeroSSLIssuer)) -} - -// ZeroSSLIssuer makes an ACME manager -// for managing certificates using ACME. -type ZeroSSLIssuer struct { - *ACMEIssuer - - // The API key (or "access key") for using the ZeroSSL API. - APIKey string `json:"api_key,omitempty"` - - mu sync.Mutex - logger *zap.Logger -} - -// CaddyModule returns the Caddy module information. -func (*ZeroSSLIssuer) CaddyModule() caddy.ModuleInfo { - return caddy.ModuleInfo{ - ID: "tls.issuance.zerossl", - New: func() caddy.Module { return new(ZeroSSLIssuer) }, - } -} - -// Provision sets up iss. -func (iss *ZeroSSLIssuer) Provision(ctx caddy.Context) error { - iss.logger = ctx.Logger(iss) - if iss.ACMEIssuer == nil { - iss.ACMEIssuer = new(ACMEIssuer) - } - if iss.ACMEIssuer.CA == "" { - iss.ACMEIssuer.CA = certmagic.ZeroSSLProductionCA - } - return iss.ACMEIssuer.Provision(ctx) -} - -// newAccountCallback generates EAB if not already provided. It also sets a valid default contact on the account if not set. -func (iss *ZeroSSLIssuer) newAccountCallback(ctx context.Context, am *certmagic.ACMEManager, acct acme.Account) (acme.Account, error) { - if am.ExternalAccount != nil { - return acct, nil - } - var err error - am.ExternalAccount, acct, err = iss.generateEABCredentials(ctx, acct) - return acct, err -} - -// generateEABCredentials generates EAB credentials using the API key if provided, -// otherwise using the primary contact email on the issuer. If an email is not set -// on the issuer, a default generic email is used. -func (iss *ZeroSSLIssuer) generateEABCredentials(ctx context.Context, acct acme.Account) (*acme.EAB, acme.Account, error) { - var endpoint string - var body io.Reader - - // there are two ways to generate EAB credentials: authenticated with - // their API key, or unauthenticated with their email address - if iss.APIKey != "" { - apiKey := caddy.NewReplacer().ReplaceAll(iss.APIKey, "") - if apiKey == "" { - return nil, acct, fmt.Errorf("missing API key: '%v'", iss.APIKey) - } - qs := url.Values{"access_key": []string{apiKey}} - endpoint = fmt.Sprintf("%s/eab-credentials?%s", zerosslAPIBase, qs.Encode()) - } else { - email := iss.Email - if email == "" { - iss.logger.Warn("missing email address for ZeroSSL; it is strongly recommended to set one for next time") - email = "caddy@zerossl.com" // special email address that preserves backwards-compat, but which black-holes dashboard features, oh well - } - if len(acct.Contact) == 0 { - // we borrow the email from config or the default email, so ensure it's saved with the account - acct.Contact = []string{"mailto:" + email} - } - endpoint = zerosslAPIBase + "/eab-credentials-email" - form := url.Values{"email": []string{email}} - body = strings.NewReader(form.Encode()) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, body) - if err != nil { - return nil, acct, fmt.Errorf("forming request: %v", err) - } - if body != nil { - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - } - req.Header.Set("User-Agent", certmagic.UserAgent) - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, acct, fmt.Errorf("performing EAB credentials request: %v", err) - } - defer resp.Body.Close() - - var result struct { - Success bool `json:"success"` - Error struct { - Code int `json:"code"` - Type string `json:"type"` - } `json:"error"` - EABKID string `json:"eab_kid"` - EABHMACKey string `json:"eab_hmac_key"` - } - err = json.NewDecoder(resp.Body).Decode(&result) - if err != nil { - return nil, acct, fmt.Errorf("decoding API response: %v", err) - } - if result.Error.Code != 0 { - return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d: %s (code %d)", - resp.StatusCode, result.Error.Type, result.Error.Code) - } - if resp.StatusCode != http.StatusOK { - return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d", resp.StatusCode) - } - - iss.logger.Info("generated EAB credentials", zap.String("key_id", result.EABKID)) - - return &acme.EAB{ - KeyID: result.EABKID, - MACKey: result.EABHMACKey, - }, acct, nil -} - -// initialize modifies the template for the underlying ACMEManager -// values by setting the CA endpoint to the ZeroSSL directory and -// setting the NewAccountFunc callback to one which allows us to -// generate EAB credentials only if a new account is being made. -// Since it modifies the stored template, its effect should only -// be needed once, but it is fine to call it repeatedly. -func (iss *ZeroSSLIssuer) initialize() { - iss.mu.Lock() - defer iss.mu.Unlock() - if iss.template.NewAccountFunc == nil { - iss.template.NewAccountFunc = iss.newAccountCallback - } -} - -// PreCheck implements the certmagic.PreChecker interface. -func (iss *ZeroSSLIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error { - iss.initialize() - return iss.ACMEIssuer.PreCheck(ctx, names, interactive) -} - -// Issue obtains a certificate for the given csr. -func (iss *ZeroSSLIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) { - iss.initialize() - return iss.ACMEIssuer.Issue(ctx, csr) -} - -// IssuerKey returns the unique issuer key for the configured CA endpoint. -func (iss *ZeroSSLIssuer) IssuerKey() string { - iss.initialize() - return iss.ACMEIssuer.IssuerKey() -} - -// Revoke revokes the given certificate. -func (iss *ZeroSSLIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error { - iss.initialize() - return iss.ACMEIssuer.Revoke(ctx, cert, reason) -} - -// UnmarshalCaddyfile deserializes Caddyfile tokens into iss. -// -// ... zerossl [] { -// ... -// } -// -// Any of the subdirectives for the ACME issuer can be used in the block. -func (iss *ZeroSSLIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { - for d.Next() { - if d.NextArg() { - iss.APIKey = d.Val() - if d.NextArg() { - return d.ArgErr() - } - } - - if iss.ACMEIssuer == nil { - iss.ACMEIssuer = new(ACMEIssuer) - } - err := iss.ACMEIssuer.UnmarshalCaddyfile(d.NewFromNextSegment()) - if err != nil { - return err - } - } - return nil -} - -const zerosslAPIBase = "https://api.zerossl.com/acme" - -// Interface guards -var ( - _ certmagic.PreChecker = (*ZeroSSLIssuer)(nil) - _ certmagic.Issuer = (*ZeroSSLIssuer)(nil) - _ certmagic.Revoker = (*ZeroSSLIssuer)(nil) - _ caddy.Provisioner = (*ZeroSSLIssuer)(nil) - _ ConfigSetter = (*ZeroSSLIssuer)(nil) - - // a type which properly embeds an ACMEIssuer should implement - // this interface so it can be treated as an ACMEIssuer - _ interface{ GetACMEIssuer() *ACMEIssuer } = (*ZeroSSLIssuer)(nil) -) diff --git a/vendor/github.com/caddyserver/caddy/v2/notify/notify.go b/vendor/github.com/caddyserver/caddy/v2/notify/notify.go deleted file mode 100644 index bca80c1f..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/notify/notify.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package notify - -// NotifyReadiness notifies process manager of readiness. -func NotifyReadiness() error { - return notifyReadiness() -} - -// NotifyReloading notifies process manager of reloading. -func NotifyReloading() error { - return notifyReloading() -} - -// NotifyStopping notifies process manager of stopping. -func NotifyStopping() error { - return notifyStopping() -} diff --git a/vendor/github.com/caddyserver/caddy/v2/notify/notify_linux.go b/vendor/github.com/caddyserver/caddy/v2/notify/notify_linux.go deleted file mode 100644 index 8ba49d2c..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/notify/notify_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package notify - -import ( - "io" - "net" - "os" - "strings" -) - -// The documentation about this IPC protocol is available here: -// https://www.freedesktop.org/software/systemd/man/sd_notify.html - -func sdNotify(path, payload string) error { - socketAddr := &net.UnixAddr{ - Name: path, - Net: "unixgram", - } - - conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) - if err != nil { - return err - } - defer conn.Close() - - if _, err := io.Copy(conn, strings.NewReader(payload)); err != nil { - return err - } - return nil -} - -// notifyReadiness notifies systemd that caddy has finished its -// initialization routines. -func notifyReadiness() error { - val, ok := os.LookupEnv("NOTIFY_SOCKET") - if !ok || val == "" { - return nil - } - if err := sdNotify(val, "READY=1"); err != nil { - return err - } - return nil -} - -// notifyReloading notifies systemd that caddy is reloading its config. -func notifyReloading() error { - val, ok := os.LookupEnv("NOTIFY_SOCKET") - if !ok || val == "" { - return nil - } - if err := sdNotify(val, "RELOADING=1"); err != nil { - return err - } - return nil -} - -// notifyStopping notifies systemd that caddy is stopping. -func notifyStopping() error { - val, ok := os.LookupEnv("NOTIFY_SOCKET") - if !ok || val == "" { - return nil - } - if err := sdNotify(val, "STOPPING=1"); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/notify/notify_other.go b/vendor/github.com/caddyserver/caddy/v2/notify/notify_other.go deleted file mode 100644 index 17f62bab..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/notify/notify_other.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package notify - -func notifyReadiness() error { - return nil -} - -func notifyReloading() error { - return nil -} - -func notifyStopping() error { - return nil -} diff --git a/vendor/github.com/caddyserver/caddy/v2/replacer.go b/vendor/github.com/caddyserver/caddy/v2/replacer.go deleted file mode 100644 index bffc4244..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/replacer.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" -) - -// NewReplacer returns a new Replacer. -func NewReplacer() *Replacer { - rep := &Replacer{ - static: make(map[string]interface{}), - } - rep.providers = []ReplacerFunc{ - globalDefaultReplacements, - rep.fromStatic, - } - return rep -} - -// NewEmptyReplacer returns a new Replacer, -// without the global default replacements. -func NewEmptyReplacer() *Replacer { - rep := &Replacer{ - static: make(map[string]interface{}), - } - rep.providers = []ReplacerFunc{ - rep.fromStatic, - } - return rep -} - -// Replacer can replace values in strings. -// A default/empty Replacer is not valid; -// use NewReplacer to make one. -type Replacer struct { - providers []ReplacerFunc - static map[string]interface{} -} - -// Map adds mapFunc to the list of value providers. -// mapFunc will be executed only at replace-time. -func (r *Replacer) Map(mapFunc ReplacerFunc) { - r.providers = append(r.providers, mapFunc) -} - -// Set sets a custom variable to a static value. -func (r *Replacer) Set(variable string, value interface{}) { - r.static[variable] = value -} - -// Get gets a value from the replacer. It returns -// the value and whether the variable was known. -func (r *Replacer) Get(variable string) (interface{}, bool) { - for _, mapFunc := range r.providers { - if val, ok := mapFunc(variable); ok { - return val, true - } - } - return nil, false -} - -// GetString is the same as Get, but coerces the value to a -// string representation. -func (r *Replacer) GetString(variable string) (string, bool) { - s, found := r.Get(variable) - return toString(s), found -} - -// Delete removes a variable with a static value -// that was created using Set. -func (r *Replacer) Delete(variable string) { - delete(r.static, variable) -} - -// fromStatic provides values from r.static. -func (r *Replacer) fromStatic(key string) (interface{}, bool) { - val, ok := r.static[key] - return val, ok -} - -// ReplaceOrErr is like ReplaceAll, but any placeholders -// that are empty or not recognized will cause an error to -// be returned. -func (r *Replacer) ReplaceOrErr(input string, errOnEmpty, errOnUnknown bool) (string, error) { - return r.replace(input, "", false, errOnEmpty, errOnUnknown, nil) -} - -// ReplaceKnown is like ReplaceAll but only replaces -// placeholders that are known (recognized). Unrecognized -// placeholders will remain in the output. -func (r *Replacer) ReplaceKnown(input, empty string) string { - out, _ := r.replace(input, empty, false, false, false, nil) - return out -} - -// ReplaceAll efficiently replaces placeholders in input with -// their values. All placeholders are replaced in the output -// whether they are recognized or not. Values that are empty -// string will be substituted with empty. -func (r *Replacer) ReplaceAll(input, empty string) string { - out, _ := r.replace(input, empty, true, false, false, nil) - return out -} - -// ReplaceFunc is the same as ReplaceAll, but calls f for every -// replacement to be made, in case f wants to change or inspect -// the replacement. -func (r *Replacer) ReplaceFunc(input string, f ReplacementFunc) (string, error) { - return r.replace(input, "", true, false, false, f) -} - -func (r *Replacer) replace(input, empty string, - treatUnknownAsEmpty, errOnEmpty, errOnUnknown bool, - f ReplacementFunc) (string, error) { - if !strings.Contains(input, string(phOpen)) { - return input, nil - } - - var sb strings.Builder - - // it is reasonable to assume that the output - // will be approximately as long as the input - sb.Grow(len(input)) - - // iterate the input to find each placeholder - var lastWriteCursor int - -scan: - for i := 0; i < len(input); i++ { - - // check for escaped braces - if i > 0 && input[i-1] == phEscape && (input[i] == phClose || input[i] == phOpen) { - sb.WriteString(input[lastWriteCursor : i-1]) - lastWriteCursor = i - continue - } - - if input[i] != phOpen { - continue - } - - // find the end of the placeholder - end := strings.Index(input[i:], string(phClose)) + i - if end < i { - continue - } - - // if necessary look for the first closing brace that is not escaped - for end > 0 && end < len(input)-1 && input[end-1] == phEscape { - nextEnd := strings.Index(input[end+1:], string(phClose)) - if nextEnd < 0 { - continue scan - } - end += nextEnd + 1 - } - - // write the substring from the last cursor to this point - sb.WriteString(input[lastWriteCursor:i]) - - // trim opening bracket - key := input[i+1 : end] - - // try to get a value for this key, handle empty values accordingly - val, found := r.Get(key) - if !found { - // placeholder is unknown (unrecognized); handle accordingly - if errOnUnknown { - return "", fmt.Errorf("unrecognized placeholder %s%s%s", - string(phOpen), key, string(phClose)) - } else if !treatUnknownAsEmpty { - // if treatUnknownAsEmpty is true, we'll handle an empty - // val later; so only continue otherwise - lastWriteCursor = i - continue - } - } - - // apply any transformations - if f != nil { - var err error - val, err = f(key, val) - if err != nil { - return "", err - } - } - - // convert val to a string as efficiently as possible - valStr := toString(val) - - // write the value; if it's empty, either return - // an error or write a default value - if valStr == "" { - if errOnEmpty { - return "", fmt.Errorf("evaluated placeholder %s%s%s is empty", - string(phOpen), key, string(phClose)) - } else if empty != "" { - sb.WriteString(empty) - } - } else { - sb.WriteString(valStr) - } - - // advance cursor to end of placeholder - i = end - lastWriteCursor = i + 1 - } - - // flush any unwritten remainder - sb.WriteString(input[lastWriteCursor:]) - - return sb.String(), nil -} - -func toString(val interface{}) string { - switch v := val.(type) { - case nil: - return "" - case string: - return v - case fmt.Stringer: - return v.String() - case byte: - return string(v) - case []byte: - return string(v) - case []rune: - return string(v) - case int: - return strconv.Itoa(v) - case int32: - return strconv.Itoa(int(v)) - case int64: - return strconv.Itoa(int(v)) - case uint: - return strconv.Itoa(int(v)) - case uint32: - return strconv.Itoa(int(v)) - case uint64: - return strconv.Itoa(int(v)) - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32) - case float64: - return strconv.FormatFloat(v, 'f', -1, 64) - case bool: - if v { - return "true" - } - return "false" - default: - return fmt.Sprintf("%+v", v) - } -} - -// ReplacerFunc is a function that returns a replacement -// for the given key along with true if the function is able -// to service that key (even if the value is blank). If the -// function does not recognize the key, false should be -// returned. -type ReplacerFunc func(key string) (interface{}, bool) - -func globalDefaultReplacements(key string) (interface{}, bool) { - // check environment variable - const envPrefix = "env." - if strings.HasPrefix(key, envPrefix) { - return os.Getenv(key[len(envPrefix):]), true - } - - switch key { - case "system.hostname": - // OK if there is an error; just return empty string - name, _ := os.Hostname() - return name, true - case "system.slash": - return string(filepath.Separator), true - case "system.os": - return runtime.GOOS, true - case "system.arch": - return runtime.GOARCH, true - case "time.now": - return nowFunc(), true - case "time.now.common_log": - return nowFunc().Format("02/Jan/2006:15:04:05 -0700"), true - case "time.now.year": - return strconv.Itoa(nowFunc().Year()), true - case "time.now.unix": - return strconv.FormatInt(nowFunc().Unix(), 10), true - case "time.now.unix_ms": - return strconv.FormatInt(nowFunc().UnixNano()/int64(time.Millisecond), 10), true - } - - return nil, false -} - -// ReplacementFunc is a function that is called when a -// replacement is being performed. It receives the -// variable (i.e. placeholder name) and the value that -// will be the replacement, and returns the value that -// will actually be the replacement, or an error. Note -// that errors are sometimes ignored by replacers. -type ReplacementFunc func(variable string, val interface{}) (interface{}, error) - -// nowFunc is a variable so tests can change it -// in order to obtain a deterministic time. -var nowFunc = time.Now - -// ReplacerCtxKey is the context key for a replacer. -const ReplacerCtxKey CtxKey = "replacer" - -const phOpen, phClose, phEscape = '{', '}', '\\' diff --git a/vendor/github.com/caddyserver/caddy/v2/replacer_fuzz.go b/vendor/github.com/caddyserver/caddy/v2/replacer_fuzz.go deleted file mode 100644 index 2c27f01a..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/replacer_fuzz.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build gofuzz - -package caddy - -func FuzzReplacer(data []byte) (score int) { - NewReplacer().ReplaceAll(string(data), "") - NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), "") - NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), NewReplacer().ReplaceAll(string(data), "")) - NewReplacer().ReplaceAll(string(data[:len(data)/2]), string(data[len(data)/2:])) - return 0 -} diff --git a/vendor/github.com/caddyserver/caddy/v2/sigtrap.go b/vendor/github.com/caddyserver/caddy/v2/sigtrap.go deleted file mode 100644 index 0fce6d0d..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/sigtrap.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "os" - "os/signal" - - "go.uber.org/zap" -) - -// TrapSignals create signal/interrupt handlers as best it can for the -// current OS. This is a rather invasive function to call in a Go program -// that captures signals already, so in that case it would be better to -// implement these handlers yourself. -func TrapSignals() { - trapSignalsCrossPlatform() - trapSignalsPosix() -} - -// trapSignalsCrossPlatform captures SIGINT or interrupt (depending -// on the OS), which initiates a graceful shutdown. A second SIGINT -// or interrupt will forcefully exit the process immediately. -func trapSignalsCrossPlatform() { - go func() { - shutdown := make(chan os.Signal, 1) - signal.Notify(shutdown, os.Interrupt) - - for i := 0; true; i++ { - <-shutdown - - if i > 0 { - Log().Warn("force quit", zap.String("signal", "SIGINT")) - os.Exit(ExitCodeForceQuit) - } - - Log().Info("shutting down", zap.String("signal", "SIGINT")) - go exitProcessFromSignal("SIGINT") - } - }() -} - -// exitProcessFromSignal exits the process from a system signal. -func exitProcessFromSignal(sigName string) { - logger := Log().With(zap.String("signal", sigName)) - exitProcess(logger) -} - -// Exit codes. Generally, you should NOT -// automatically restart the process if the -// exit code is ExitCodeFailedStartup (1). -const ( - ExitCodeSuccess = iota - ExitCodeFailedStartup - ExitCodeForceQuit - ExitCodeFailedQuit -) diff --git a/vendor/github.com/caddyserver/caddy/v2/sigtrap_nonposix.go b/vendor/github.com/caddyserver/caddy/v2/sigtrap_nonposix.go deleted file mode 100644 index 3b4595a6..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/sigtrap_nonposix.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows plan9 nacl js - -package caddy - -func trapSignalsPosix() {} diff --git a/vendor/github.com/caddyserver/caddy/v2/sigtrap_posix.go b/vendor/github.com/caddyserver/caddy/v2/sigtrap_posix.go deleted file mode 100644 index d5a03a94..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/sigtrap_posix.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows,!plan9,!nacl,!js - -package caddy - -import ( - "os" - "os/signal" - "syscall" - - "github.com/caddyserver/certmagic" - "go.uber.org/zap" -) - -// trapSignalsPosix captures POSIX-only signals. -func trapSignalsPosix() { - go func() { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGUSR1, syscall.SIGUSR2) - - for sig := range sigchan { - switch sig { - case syscall.SIGQUIT: - Log().Info("quitting process immediately", zap.String("signal", "SIGQUIT")) - certmagic.CleanUpOwnLocks(Log()) // try to clean up locks anyway, it's important - os.Exit(ExitCodeForceQuit) - - case syscall.SIGTERM: - Log().Info("shutting down apps, then terminating", zap.String("signal", "SIGTERM")) - exitProcessFromSignal("SIGTERM") - - case syscall.SIGUSR1: - Log().Info("not implemented", zap.String("signal", "SIGUSR1")) - - case syscall.SIGUSR2: - Log().Info("not implemented", zap.String("signal", "SIGUSR2")) - - case syscall.SIGHUP: - // ignore; this signal is sometimes sent outside of the user's control - Log().Info("not implemented", zap.String("signal", "SIGHUP")) - } - } - }() -} diff --git a/vendor/github.com/caddyserver/caddy/v2/storage.go b/vendor/github.com/caddyserver/caddy/v2/storage.go deleted file mode 100644 index 62f9b1c6..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/storage.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "os" - "path/filepath" - "runtime" - - "github.com/caddyserver/certmagic" - "go.uber.org/zap" -) - -// StorageConverter is a type that can convert itself -// to a valid, usable certmagic.Storage value. (The -// value might be short-lived.) This interface allows -// us to adapt any CertMagic storage implementation -// into a consistent API for Caddy configuration. -type StorageConverter interface { - CertMagicStorage() (certmagic.Storage, error) -} - -// HomeDir returns the best guess of the current user's home -// directory from environment variables. If unknown, "." (the -// current directory) is returned instead, except GOOS=android, -// which returns "/sdcard". -func HomeDir() string { - home := homeDirUnsafe() - if home == "" && runtime.GOOS == "android" { - home = "/sdcard" - } - if home == "" { - home = "." - } - return home -} - -// homeDirUnsafe is a low-level function that returns -// the user's home directory from environment -// variables. Careful: if it cannot be determined, an -// empty string is returned. If not accounting for -// that case, use HomeDir() instead; otherwise you -// may end up using the root of the file system. -func homeDirUnsafe() string { - home := os.Getenv("HOME") - if home == "" && runtime.GOOS == "windows" { - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home = drive + path - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - } - if home == "" && runtime.GOOS == "plan9" { - home = os.Getenv("home") - } - return home -} - -// AppConfigDir returns the directory where to store user's config. -// -// If XDG_CONFIG_HOME is set, it returns: $XDG_CONFIG_HOME/caddy. -// Otherwise, os.UserConfigDir() is used; if successful, it appends -// "Caddy" (Windows & Mac) or "caddy" (every other OS) to the path. -// If it returns an error, the fallback path "./caddy" is returned. -// -// The config directory is not guaranteed to be different from -// AppDataDir(). -// -// Unlike os.UserConfigDir(), this function prefers the -// XDG_CONFIG_HOME env var on all platforms, not just Unix. -// -// Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html -func AppConfigDir() string { - if basedir := os.Getenv("XDG_CONFIG_HOME"); basedir != "" { - return filepath.Join(basedir, "caddy") - } - basedir, err := os.UserConfigDir() - if err != nil { - Log().Warn("unable to determine directory for user configuration; falling back to current directory", zap.Error(err)) - return "./caddy" - } - subdir := "caddy" - switch runtime.GOOS { - case "windows", "darwin": - subdir = "Caddy" - } - return filepath.Join(basedir, subdir) -} - -// AppDataDir returns a directory path that is suitable for storing -// application data on disk. It uses the environment for finding the -// best place to store data, and appends a "caddy" or "Caddy" (depending -// on OS and environment) subdirectory. -// -// For a base directory path: -// If XDG_DATA_HOME is set, it returns: $XDG_DATA_HOME/caddy; otherwise, -// on Windows it returns: %AppData%/Caddy, -// on Mac: $HOME/Library/Application Support/Caddy, -// on Plan9: $home/lib/caddy, -// on Android: $HOME/caddy, -// and on everything else: $HOME/.local/share/caddy. -// -// If a data directory cannot be determined, it returns "./caddy" -// (this is not ideal, and the environment should be fixed). -// -// The data directory is not guaranteed to be different from AppConfigDir(). -// -// Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html -func AppDataDir() string { - if basedir := os.Getenv("XDG_DATA_HOME"); basedir != "" { - return filepath.Join(basedir, "caddy") - } - switch runtime.GOOS { - case "windows": - appData := os.Getenv("AppData") - if appData != "" { - return filepath.Join(appData, "Caddy") - } - case "darwin": - home := homeDirUnsafe() - if home != "" { - return filepath.Join(home, "Library", "Application Support", "Caddy") - } - case "plan9": - home := homeDirUnsafe() - if home != "" { - return filepath.Join(home, "lib", "caddy") - } - case "android": - home := homeDirUnsafe() - if home != "" { - return filepath.Join(home, "caddy") - } - default: - home := homeDirUnsafe() - if home != "" { - return filepath.Join(home, ".local", "share", "caddy") - } - } - return "./caddy" -} - -// ConfigAutosavePath is the default path to which the last config will be persisted. -var ConfigAutosavePath = filepath.Join(AppConfigDir(), "autosave.json") - -// DefaultStorage is Caddy's default storage module. -var DefaultStorage = &certmagic.FileStorage{Path: AppDataDir()} diff --git a/vendor/github.com/caddyserver/caddy/v2/usagepool.go b/vendor/github.com/caddyserver/caddy/v2/usagepool.go deleted file mode 100644 index 6fd48f5b..00000000 --- a/vendor/github.com/caddyserver/caddy/v2/usagepool.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 Matthew Holt and The Caddy Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "fmt" - "sync" - "sync/atomic" -) - -// UsagePool is a thread-safe map that pools values -// based on usage (reference counting). Values are -// only inserted if they do not already exist. There -// are two ways to add values to the pool: -// -// 1) LoadOrStore will increment usage and store the -// value immediately if it does not already exist. -// 2) LoadOrNew will atomically check for existence -// and construct the value immediately if it does -// not already exist, or increment the usage -// otherwise, then store that value in the pool. -// When the constructed value is finally deleted -// from the pool (when its usage reaches 0), it -// will be cleaned up by calling Destruct(). -// -// The use of LoadOrNew allows values to be created -// and reused and finally cleaned up only once, even -// though they may have many references throughout -// their lifespan. This is helpful, for example, when -// sharing thread-safe io.Writers that you only want -// to open and close once. -// -// There is no way to overwrite existing keys in the -// pool without first deleting it as many times as it -// was stored. Deleting too many times will panic. -// -// The implementation does not use a sync.Pool because -// UsagePool needs additional atomicity to run the -// constructor functions when creating a new value when -// LoadOrNew is used. (We could probably use sync.Pool -// but we'd still have to layer our own additional locks -// on top.) -// -// An empty UsagePool is NOT safe to use; always call -// NewUsagePool() to make a new one. -type UsagePool struct { - sync.RWMutex - pool map[interface{}]*usagePoolVal -} - -// NewUsagePool returns a new usage pool that is ready to use. -func NewUsagePool() *UsagePool { - return &UsagePool{ - pool: make(map[interface{}]*usagePoolVal), - } -} - -// LoadOrNew loads the value associated with key from the pool if it -// already exists. If the key doesn't exist, it will call construct -// to create a new value and then stores that in the pool. An error -// is only returned if the constructor returns an error. The loaded -// or constructed value is returned. The loaded return value is true -// if the value already existed and was loaded, or false if it was -// newly constructed. -func (up *UsagePool) LoadOrNew(key interface{}, construct Constructor) (value interface{}, loaded bool, err error) { - var upv *usagePoolVal - up.Lock() - upv, loaded = up.pool[key] - if loaded { - atomic.AddInt32(&upv.refs, 1) - up.Unlock() - upv.RLock() - value = upv.value - err = upv.err - upv.RUnlock() - } else { - upv = &usagePoolVal{refs: 1} - upv.Lock() - up.pool[key] = upv - up.Unlock() - value, err = construct() - if err == nil { - upv.value = value - } else { - // TODO: remove error'ed entries from map - upv.err = err - } - upv.Unlock() - } - return -} - -// LoadOrStore loads the value associated with key from the pool if it -// already exists, or stores it if it does not exist. It returns the -// value that was either loaded or stored, and true if the value already -// existed and was -func (up *UsagePool) LoadOrStore(key, val interface{}) (value interface{}, loaded bool) { - var upv *usagePoolVal - up.Lock() - upv, loaded = up.pool[key] - if loaded { - atomic.AddInt32(&upv.refs, 1) - up.Unlock() - upv.Lock() - if upv.err == nil { - value = upv.value - } else { - upv.value = val - upv.err = nil - } - upv.Unlock() - } else { - upv = &usagePoolVal{refs: 1, value: val} - up.pool[key] = upv - up.Unlock() - value = val - } - return -} - -// Range iterates the pool similarly to how sync.Map.Range() does: -// it calls f for every key in the pool, and if f returns false, -// iteration is stopped. Ranging does not affect usage counts. -// -// This method is somewhat naive and acquires a read lock on the -// entire pool during iteration, so do your best to make f() really -// fast, m'kay? -func (up *UsagePool) Range(f func(key, value interface{}) bool) { - up.RLock() - defer up.RUnlock() - for key, upv := range up.pool { - upv.RLock() - if upv.err != nil { - upv.RUnlock() - continue - } - val := upv.value - upv.RUnlock() - if !f(key, val) { - break - } - } -} - -// Delete decrements the usage count for key and removes the -// value from the underlying map if the usage is 0. It returns -// true if the usage count reached 0 and the value was deleted. -// It panics if the usage count drops below 0; always call -// Delete precisely as many times as LoadOrStore. -func (up *UsagePool) Delete(key interface{}) (deleted bool, err error) { - up.Lock() - upv, ok := up.pool[key] - if !ok { - up.Unlock() - return false, nil - } - refs := atomic.AddInt32(&upv.refs, -1) - if refs == 0 { - delete(up.pool, key) - up.Unlock() - upv.RLock() - val := upv.value - upv.RUnlock() - if destructor, ok := val.(Destructor); ok { - err = destructor.Destruct() - } - deleted = true - } else { - up.Unlock() - if refs < 0 { - panic(fmt.Sprintf("deleted more than stored: %#v (usage: %d)", - upv.value, upv.refs)) - } - } - return -} - -// Constructor is a function that returns a new value -// that can destruct itself when it is no longer needed. -type Constructor func() (Destructor, error) - -// Destructor is a value that can clean itself up when -// it is deallocated. -type Destructor interface { - Destruct() error -} - -type usagePoolVal struct { - refs int32 // accessed atomically; must be 64-bit aligned for 32-bit systems - value interface{} - err error - sync.RWMutex -} diff --git a/vendor/github.com/caddyserver/certmagic/.gitignore b/vendor/github.com/caddyserver/certmagic/.gitignore deleted file mode 100644 index fbd281d1..00000000 --- a/vendor/github.com/caddyserver/certmagic/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_gitignore/ diff --git a/vendor/github.com/caddyserver/certmagic/LICENSE.txt b/vendor/github.com/caddyserver/certmagic/LICENSE.txt deleted file mode 100644 index 8dada3ed..00000000 --- a/vendor/github.com/caddyserver/certmagic/LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/caddyserver/certmagic/README.md b/vendor/github.com/caddyserver/certmagic/README.md deleted file mode 100644 index 015faf84..00000000 --- a/vendor/github.com/caddyserver/certmagic/README.md +++ /dev/null @@ -1,522 +0,0 @@ -

- CertMagic -

-

Easy and Powerful TLS Automation

-

The same library used by the Caddy Web Server

-

- - - -

- - -Caddy's [automagic TLS features](https://caddyserver.com/docs/automatic-https)—now for your own Go programs—in one powerful and easy-to-use library! - -CertMagic is the most mature, robust, and powerful ACME client integration for Go... and perhaps ever. - -With CertMagic, you can add one line to your Go application to serve securely over TLS, without ever having to touch certificates. - -Instead of: - -```go -// plaintext HTTP, gross 🤢 -http.ListenAndServe(":80", mux) -``` - -Use CertMagic: - -```go -// encrypted HTTPS with HTTP->HTTPS redirects - yay! 🔒😠-certmagic.HTTPS([]string{"example.com"}, mux) -``` - -That line of code will serve your HTTP router `mux` over HTTPS, complete with HTTP->HTTPS redirects. It obtains and renews the TLS certificates. It staples OCSP responses for greater privacy and security. As long as your domain name points to your server, CertMagic will keep its connections secure. - -Compared to other ACME client libraries for Go, only CertMagic supports the full suite of ACME features, and no other library matches CertMagic's maturity and reliability. - - - - -CertMagic - Automatic HTTPS using Let's Encrypt -=============================================== - -## Menu - -- [Features](#features) -- [Requirements](#requirements) -- [Installation](#installation) -- [Usage](#usage) - - [Package Overview](#package-overview) - - [Certificate authority](#certificate-authority) - - [The `Config` type](#the-config-type) - - [Defaults](#defaults) - - [Providing an email address](#providing-an-email-address) - - [Rate limiting](#rate-limiting) - - [Development and testing](#development-and-testing) - - [Examples](#examples) - - [Serving HTTP handlers with HTTPS](#serving-http-handlers-with-https) - - [Starting a TLS listener](#starting-a-tls-listener) - - [Getting a tls.Config](#getting-a-tlsconfig) - - [Advanced use](#advanced-use) - - [Wildcard Certificates](#wildcard-certificates) - - [Behind a load balancer (or in a cluster)](#behind-a-load-balancer-or-in-a-cluster) - - [The ACME Challenges](#the-acme-challenges) - - [HTTP Challenge](#http-challenge) - - [TLS-ALPN Challenge](#tls-alpn-challenge) - - [DNS Challenge](#dns-challenge) - - [On-Demand TLS](#on-demand-tls) - - [Storage](#storage) - - [Cache](#cache) -- [Contributing](#contributing) -- [Project History](#project-history) -- [Credits and License](#credits-and-license) - - -## Features - -- Fully automated certificate management including issuance and renewal -- One-liner, fully managed HTTPS servers -- Full control over almost every aspect of the system -- HTTP->HTTPS redirects -- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS -- Most robust error handling of _any_ ACME client - - Challenges are randomized to avoid accidental dependence - - Challenges are rotated to overcome certain network blockages - - Robust retries for up to 30 days - - Exponential backoff with carefully-tuned intervals - - Retries with optional test/staging CA endpoint instead of production, to avoid rate limits -- Written in Go, a language with memory-safety guarantees -- Powered by [ACMEz](https://github.com/mholt/acmez), _the_ premier ACME client library for Go -- All [libdns](https://github.com/libdns) DNS providers work out-of-the-box -- Pluggable storage implementations (default: file system) -- Wildcard certificates -- Automatic OCSP stapling ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) [keeps your sites online!](https://twitter.com/caddyserver/status/1234874273724084226) - - Will [automatically attempt](https://twitter.com/mholt6/status/1235577699541762048) to replace [revoked certificates](https://community.letsencrypt.org/t/2020-02-29-caa-rechecking-bug/114591/3?u=mholt)! - - Staples stored to disk in case of responder outages -- Distributed solving of all challenges (works behind load balancers) - - Highly efficient, coordinated management in a fleet - - Active locking - - Smart queueing -- Supports "on-demand" issuance of certificates (during TLS handshakes!) - - Caddy / CertMagic pioneered this technology - - Custom decision functions to regulate and throttle on-demand behavior -- Optional event hooks for observation -- Works with any certificate authority (CA) compliant with the ACME specification -- Certificate revocation (please, only if private key is compromised) -- Must-Staple (optional; not default) -- Cross-platform support! Mac, Windows, Linux, BSD, Android... -- Scales to hundreds of thousands of names/certificates per instance -- Use in conjunction with your own certificates - - -## Requirements - -0. ACME server (can be a publicly-trusted CA, or your own) -1. Public DNS name(s) you control -2. Server reachable from public Internet - - Or use the DNS challenge to waive this requirement -3. Control over port 80 (HTTP) and/or 443 (HTTPS) - - Or they can be forwarded to other ports you control - - Or use the DNS challenge to waive this requirement - - (This is a requirement of the ACME protocol, not a library limitation) -4. Persistent storage - - Typically the local file system (default) - - Other integrations available/possible - -**_Before using this library, your domain names MUST be pointed (A/AAAA records) at your server (unless you use the DNS challenge)!_** - - -## Installation - -```bash -$ go get github.com/caddyserver/certmagic -``` - - -## Usage - -### Package Overview - -#### Certificate authority - -This library uses Let's Encrypt by default, but you can use any certificate authority that conforms to the ACME specification. Known/common CAs are provided as consts in the package, for example `LetsEncryptStagingCA` and `LetsEncryptProductionCA`. - -#### The `Config` type - -The `certmagic.Config` struct is how you can wield the power of this fully armed and operational battle station. However, an empty/uninitialized `Config` is _not_ a valid one! In time, you will learn to use the force of `certmagic.NewDefault()` as I have. - -#### Defaults - -The default `Config` value is called `certmagic.Default`. Change its fields to suit your needs, then call `certmagic.NewDefault()` when you need a valid `Config` value. In other words, `certmagic.Default` is a template and is not valid for use directly. - -You can set the default values easily, for example: `certmagic.Default.Issuer = ...`. - -Similarly, to configure ACME-specific defaults, use `certmagic.DefaultACME`. - -The high-level functions in this package (`HTTPS()`, `Listen()`, `ManageSync()`, and `ManageAsync()`) use the default config exclusively. This is how most of you will interact with the package. This is suitable when all your certificates are managed the same way. However, if you need to manage certificates differently depending on their name, you will need to make your own cache and configs (keep reading). - - -#### Providing an email address - -Although not strictly required, this is highly recommended best practice. It allows you to receive expiration emails if your certificates are expiring for some reason, and also allows the CA's engineers to potentially get in touch with you if something is wrong. I recommend setting `certmagic.DefaultACME.Email` or always setting the `Email` field of a new `Config` struct. - - -#### Rate limiting - -To avoid firehosing the CA's servers, CertMagic has built-in rate limiting. Currently, its default limit is up to 10 transactions (obtain or renew) every 1 minute (sliding window). This can be changed by setting the `RateLimitEvents` and `RateLimitEventsWindow` variables, if desired. - -The CA may still enforce their own rate limits, and there's nothing (well, nothing ethical) CertMagic can do to bypass them for you. - -Additionally, CertMagic will retry failed validations with exponential backoff for up to 30 days, with a reasonable maximum interval between attempts (an "attempt" means trying each enabled challenge type once). - - -### Development and Testing - -Note that Let's Encrypt imposes [strict rate limits](https://letsencrypt.org/docs/rate-limits/) at its production endpoint, so using it while developing your application may lock you out for a few days if you aren't careful! - -While developing your application and testing it, use [their staging endpoint](https://letsencrypt.org/docs/staging-environment/) which has much higher rate limits. Even then, don't hammer it: but it's much safer for when you're testing. When deploying, though, use their production CA because their staging CA doesn't issue trusted certificates. - -To use staging, set `certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA` or set `CA` of every `ACMEManager` struct. - - - -### Examples - -There are many ways to use this library. We'll start with the highest-level (simplest) and work down (more control). - -All these high-level examples use `certmagic.Default` and `certmagic.DefaultACME` for the config and the default cache and storage for serving up certificates. - -First, we'll follow best practices and do the following: - -```go -// read and agree to your CA's legal documents -certmagic.DefaultACME.Agreed = true - -// provide an email address -certmagic.DefaultACME.Email = "you@yours.com" - -// use the staging endpoint while we're developing -certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA -``` - -For fully-functional program examples, check out [this Twitter thread](https://twitter.com/mholt6/status/1073103805112147968) (or read it [unrolled into a single post](https://threadreaderapp.com/thread/1073103805112147968.html)). (Note that the package API has changed slightly since these posts.) - - -#### Serving HTTP handlers with HTTPS - -```go -err := certmagic.HTTPS([]string{"example.com", "www.example.com"}, mux) -if err != nil { - return err -} -``` - -This starts HTTP and HTTPS listeners and redirects HTTP to HTTPS! - -#### Starting a TLS listener - -```go -ln, err := certmagic.Listen([]string{"example.com"}) -if err != nil { - return err -} -``` - - -#### Getting a tls.Config - -```go -tlsConfig, err := certmagic.TLS([]string{"example.com"}) -if err != nil { - return err -} -``` - - -#### Advanced use - -For more control (particularly, if you need a different way of managing each certificate), you'll make and use a `Cache` and a `Config` like so: - -```go -cache := certmagic.NewCache(certmagic.CacheOptions{ - GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) { - // do whatever you need to do to get the right - // configuration for this certificate; keep in - // mind that this config value is used as a - // template, and will be completed with any - // defaults that are set in the Default config - return &certmagic.Config{ - // ... - }, nil - }, - ... -}) - -magic := certmagic.New(cache, certmagic.Config{ - // any customizations you need go here -}) - -myACME := certmagic.NewACMEManager(magic, certmagic.ACMEManager{ - CA: certmagic.LetsEncryptStagingCA, - Email: "you@yours.com", - Agreed: true, - // plus any other customizations you need -}) - -magic.Issuer = myACME - -// this obtains certificates or renews them if necessary -err := magic.ManageSync([]string{"example.com", "sub.example.com"}) -if err != nil { - return err -} - -// to use its certificates and solve the TLS-ALPN challenge, -// you can get a TLS config to use in a TLS listener! -tlsConfig := magic.TLSConfig() - -//// OR //// - -// if you already have a TLS config you don't want to replace, -// we can simply set its GetCertificate field and append the -// TLS-ALPN challenge protocol to the NextProtos -myTLSConfig.GetCertificate = magic.GetCertificate -myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol) - -// the HTTP challenge has to be handled by your HTTP server; -// if you don't have one, you should have disabled it earlier -// when you made the certmagic.Config -httpMux = myACME.HTTPChallengeHandler(httpMux) -``` - -Great! This example grants you much more flexibility for advanced programs. However, _the vast majority of you will only use the high-level functions described earlier_, especially since you can still customize them by setting the package-level `Default` config. - - -### Wildcard certificates - -At time of writing (December 2018), Let's Encrypt only issues wildcard certificates with the DNS challenge. You can easily enable the DNS challenge with CertMagic for numerous providers (see the relevant section in the docs). - - -### Behind a load balancer (or in a cluster) - -CertMagic runs effectively behind load balancers and/or in cluster/fleet environments. In other words, you can have 10 or 1,000 servers all serving the same domain names, all sharing certificates and OCSP staples. - -To do so, simply ensure that each instance is using the same Storage. That is the sole criteria for determining whether an instance is part of a cluster. - -The default Storage is implemented using the file system, so mounting the same shared folder is sufficient (see [Storage](#storage) for more on that)! If you need an alternate Storage implementation, feel free to use one, provided that all the instances use the _same_ one. :) - -See [Storage](#storage) and the associated [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage) for more information! - - -## The ACME Challenges - -This section describes how to solve the ACME challenges. Challenges are how you demonstrate to the certificate authority some control over your domain name, thus authorizing them to grant you a certificate for that name. [The great innovation of ACME](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) is that verification by CAs can now be automated, rather than having to click links in emails (who ever thought that was a good idea??). - -If you're using the high-level convenience functions like `HTTPS()`, `Listen()`, or `TLS()`, the HTTP and/or TLS-ALPN challenges are solved for you because they also start listeners. However, if you're making a `Config` and you start your own server manually, you'll need to be sure the ACME challenges can be solved so certificates can be renewed. - -The HTTP and TLS-ALPN challenges are the defaults because they don't require configuration from you, but they require that your server is accessible from external IPs on low ports. If that is not possible in your situation, you can enable the DNS challenge, which will disable the HTTP and TLS-ALPN challenges and use the DNS challenge exclusively. - -Technically, only one challenge needs to be enabled for things to work, but using multiple is good for reliability in case a challenge is discontinued by the CA. This happened to the TLS-SNI challenge in early 2018—many popular ACME clients such as Traefik and Autocert broke, resulting in downtime for some sites, until new releases were made and patches deployed, because they used only one challenge; Caddy, however—this library's forerunner—was unaffected because it also used the HTTP challenge. If multiple challenges are enabled, they are chosen randomly to help prevent false reliance on a single challenge type. And if one fails, any remaining enabled challenges are tried before giving up. - - -### HTTP Challenge - -Per the ACME spec, the HTTP challenge requires port 80, or at least packet forwarding from port 80. It works by serving a specific HTTP response that only the genuine server would have to a normal HTTP request at a special endpoint. - -If you are running an HTTP server, solving this challenge is very easy: just wrap your handler in `HTTPChallengeHandler` _or_ call `SolveHTTPChallenge()` inside your own `ServeHTTP()` method. - -For example, if you're using the standard library: - -```go -mux := http.NewServeMux() -mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Lookit my cool website over HTTPS!") -}) - -http.ListenAndServe(":80", myACME.HTTPChallengeHandler(mux)) -``` - -If wrapping your handler is not a good solution, try this inside your `ServeHTTP()` instead: - -```go -magic := certmagic.NewDefault() -myACME := certmagic.NewACMEManager(magic, certmagic.DefaultACME) - -func ServeHTTP(w http.ResponseWriter, req *http.Request) { - if myACME.HandleHTTPChallenge(w, r) { - return // challenge handled; nothing else to do - } - ... -} -``` - -If you are not running an HTTP server, you should disable the HTTP challenge _or_ run an HTTP server whose sole job it is to solve the HTTP challenge. - - -### TLS-ALPN Challenge - -Per the ACME spec, the TLS-ALPN challenge requires port 443, or at least packet forwarding from port 443. It works by providing a special certificate using a standard TLS extension, Application Layer Protocol Negotiation (ALPN), having a special value. This is the most convenient challenge type because it usually requires no extra configuration and uses the standard TLS port which is where the certificates are used, also. - -This challenge is easy to solve: just use the provided `tls.Config` when you make your TLS listener: - -```go -// use this to configure a TLS listener -tlsConfig := magic.TLSConfig() -``` - -Or make two simple changes to an existing `tls.Config`: - -```go -myTLSConfig.GetCertificate = magic.GetCertificate -myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol} -``` - -Then just make sure your TLS listener is listening on port 443: - -```go -ln, err := tls.Listen("tcp", ":443", myTLSConfig) -``` - - -### DNS Challenge - -The DNS challenge is perhaps the most useful challenge because it allows you to obtain certificates without your server needing to be publicly accessible on the Internet, and it's the only challenge by which Let's Encrypt will issue wildcard certificates. - -This challenge works by setting a special record in the domain's zone. To do this automatically, your DNS provider needs to offer an API by which changes can be made to domain names, and the changes need to take effect immediately for best results. CertMagic supports [all DNS providers with `libdns` implementations](https://github.com/libdns)! It always cleans up the temporary record after the challenge completes. - -To enable it, just set the `DNS01Solver` field on a `certmagic.ACMEManager` struct, or set the default `certmagic.ACMEManager.DNS01Solver` variable. For example, if my domains' DNS was served by Cloudflare: - -```go -import "github.com/libdns/cloudflare" - -certmagic.DefaultACME.DNS01Solver = &certmagic.DNS01Solver{ - DNSProvider: &cloudflare.Provider{ - APIToken: "topsecret", - }, -} -``` - -Now the DNS challenge will be used by default, and I can obtain certificates for wildcard domains, too. Enabling the DNS challenge disables the other challenges for that `certmagic.ACMEManager` instance. - - -## On-Demand TLS - -Normally, certificates are obtained and renewed before a listener starts serving, and then those certificates are maintained throughout the lifetime of the program. In other words, the certificate names are static. But sometimes you don't know all the names ahead of time, or you don't want to manage all the certificates up front. This is where On-Demand TLS shines. - -Originally invented for use in Caddy (which was the first program to use such technology), On-Demand TLS makes it possible and easy to serve certificates for arbitrary or specific names during the lifetime of the server. When a TLS handshake is received, CertMagic will read the Server Name Indication (SNI) value and either load and present that certificate in the ServerHello, or if one does not exist, it will obtain it from a CA right then-and-there. - -Of course, this has some obvious security implications. You don't want to DoS a CA or allow arbitrary clients to fill your storage with spammy TLS handshakes. That's why, when you enable On-Demand issuance, you should set limits or policy to allow getting certificates. CertMagic has an implicit whitelist built-in which is sufficient for nearly everyone, but also has a more advanced way to control on-demand issuance. - -The simplest way to enable on-demand issuance is to set the OnDemand field of a Config (or the default package-level value): - -```go -certmagic.Default.OnDemand = new(certmagic.OnDemandConfig) -``` - -By setting this to a non-nil value, on-demand TLS is enabled for that config. For convenient security, CertMagic's high-level abstraction functions such as `HTTPS()`, `TLS()`, `ManageSync()`, `ManageAsync()`, and `Listen()` (which all accept a list of domain names) will whitelist those names automatically so only certificates for those names can be obtained when using the Default config. Usually this is sufficient for most users. - -However, if you require advanced control over which domains can be issued certificates on-demand (for example, if you do not know which domain names you are managing, or just need to defer their operations until later), you should implement your own DecisionFunc: - -```go -// if the decision function returns an error, a certificate -// may not be obtained for that name at that time -certmagic.Default.OnDemand = &certmagic.OnDemandConfig{ - DecisionFunc: func(name string) error { - if name != "example.com" { - return fmt.Errorf("not allowed") - } - return nil - }, -} -``` - -The [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#OnDemandConfig) describes how to use this in full detail, so please check it out! - - -## Storage - -CertMagic relies on storage to store certificates and other TLS assets (OCSP staple cache, coordinating locks, etc). Persistent storage is a requirement when using CertMagic: ephemeral storage will likely lead to rate limiting on the CA-side as CertMagic will always have to get new certificates. - -By default, CertMagic stores assets on the local file system in `$HOME/.local/share/certmagic` (and honors `$XDG_DATA_HOME` if set). CertMagic will create the directory if it does not exist. If writes are denied, things will not be happy, so make sure CertMagic can write to it! - -The notion of a "cluster" or "fleet" of instances that may be serving the same site and sharing certificates, etc, is tied to storage. Simply, any instances that use the same storage facilities are considered part of the cluster. So if you deploy 100 instances of CertMagic behind a load balancer, they are all part of the same cluster if they share the same storage configuration. Sharing storage could be mounting a shared folder, or implementing some other distributed storage system such as a database server or KV store. - -The easiest way to change the storage being used is to set `certmagic.DefaultStorage` to a value that satisfies the [Storage interface](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage). Keep in mind that a valid `Storage` must be able to implement some operations atomically in order to provide locking and synchronization. - -If you write a Storage implementation, please add it to the [project wiki](https://github.com/caddyserver/certmagic/wiki/Storage-Implementations) so people can find it! - - -## Cache - -All of the certificates in use are de-duplicated and cached in memory for optimal performance at handshake-time. This cache must be backed by persistent storage as described above. - -Most applications will not need to interact with certificate caches directly. Usually, the closest you will come is to set the package-wide `certmagic.DefaultStorage` variable (before attempting to create any Configs). However, if your use case requires using different storage facilities for different Configs (that's highly unlikely and NOT recommended! Even Caddy doesn't get that crazy), you will need to call `certmagic.NewCache()` and pass in the storage you want to use, then get new `Config` structs with `certmagic.NewWithCache()` and pass in the cache. - -Again, if you're needing to do this, you've probably over-complicated your application design. - - -## FAQ - -### Can I use some of my own certificates while using CertMagic? - -Yes, just call the relevant method on the `Config` to add your own certificate to the cache: - -- [`CacheUnmanagedCertificatePEMBytes()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMBytes) -- [`CacheUnmanagedCertificatePEMFile()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMFile) -- [`CacheUnmanagedTLSCertificate()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedTLSCertificate) - -Keep in mind that unmanaged certificates are (obviously) not renewed for you, so you'll have to replace them when you do. However, OCSP stapling is performed even for unmanaged certificates that qualify. - - -### Does CertMagic obtain SAN certificates? - -Technically all certificates these days are SAN certificates because CommonName is deprecated. But if you're asking whether CertMagic issues and manages certificates with multiple SANs, the answer is no. But it does support serving them, if you provide your own. - - -### How can I listen on ports 80 and 443? Do I have to run as root? - -On Linux, you can use `setcap` to grant your binary the permission to bind low ports: - -```bash -$ sudo setcap cap_net_bind_service=+ep /path/to/your/binary -``` - -and then you will not need to run with root privileges. - - -## Contributing - -We welcome your contributions! Please see our **[contributing guidelines](https://github.com/caddyserver/certmagic/blob/master/.github/CONTRIBUTING.md)** for instructions. - - -## Project History - -CertMagic is the core of Caddy's advanced TLS automation code, extracted into a library. The underlying ACME client implementation is [ACMEz](https://github.com/mholt/acmez). CertMagic's code was originally a central part of Caddy even before Let's Encrypt entered public beta in 2015. - -In the years since then, Caddy's TLS automation techniques have been widely adopted, tried and tested in production, and served millions of sites and secured trillions of connections. - -Now, CertMagic is _the actual library used by Caddy_. It's incredibly powerful and feature-rich, but also easy to use for simple Go programs: one line of code can enable fully-automated HTTPS applications with HTTP->HTTPS redirects. - -Caddy is known for its robust HTTPS+ACME features. When ACME certificate authorities have had outages, in some cases Caddy was the only major client that didn't experience any downtime. Caddy can weather OCSP outages lasting days, or CA outages lasting weeks, without taking your sites offline. - -Caddy was also the first to sport "on-demand" issuance technology, which obtains certificates during the first TLS handshake for an allowed SNI name. - -Consequently, CertMagic brings all these (and more) features and capabilities right into your own Go programs. - -You can [watch a 2016 dotGo talk](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) by the author of this library about using ACME to automate certificate management in Go programs: - -[![Matthew Holt speaking at dotGo 2016 about ACME in Go](https://user-images.githubusercontent.com/1128849/49921557-2d506780-fe6b-11e8-97bf-6053b6b4eb48.png)](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) - - - -## Credits and License - -CertMagic is a project by [Matthew Holt](https://twitter.com/mholt6), who is the author; and various contributors, who are credited in the commit history of either CertMagic or Caddy. - -CertMagic is licensed under Apache 2.0, an open source license. For convenience, its main points are summarized as follows (but this is no replacement for the actual license text): - -- The author owns the copyright to this code -- Use, distribute, and modify the software freely -- Private and internal use is allowed -- License text and copyright notices must stay intact and be included with distributions -- Any and all changes to the code must be documented diff --git a/vendor/github.com/caddyserver/certmagic/account.go b/vendor/github.com/caddyserver/certmagic/account.go deleted file mode 100644 index 8633f92f..00000000 --- a/vendor/github.com/caddyserver/certmagic/account.go +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "bufio" - "bytes" - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "os" - "path" - "sort" - "strings" - "sync" - - "github.com/mholt/acmez/acme" -) - -// getAccount either loads or creates a new account, depending on if -// an account can be found in storage for the given CA + email combo. -func (am *ACMEManager) getAccount(ca, email string) (acme.Account, error) { - acct, err := am.loadAccount(ca, email) - if err != nil { - if _, ok := err.(ErrNotExist); ok { - return am.newAccount(email) - } - return acct, err - } - return acct, err -} - -// loadAccount loads an account from storage, but does not create a new one. -func (am *ACMEManager) loadAccount(ca, email string) (acme.Account, error) { - regBytes, err := am.config.Storage.Load(am.storageKeyUserReg(ca, email)) - if err != nil { - return acme.Account{}, err - } - keyBytes, err := am.config.Storage.Load(am.storageKeyUserPrivateKey(ca, email)) - if err != nil { - return acme.Account{}, err - } - - var acct acme.Account - err = json.Unmarshal(regBytes, &acct) - if err != nil { - return acct, err - } - acct.PrivateKey, err = decodePrivateKey(keyBytes) - if err != nil { - return acct, fmt.Errorf("could not decode account's private key: %v", err) - } - - return acct, nil -} - -// newAccount generates a new private key for a new ACME account, but -// it does not register or save the account. -func (*ACMEManager) newAccount(email string) (acme.Account, error) { - var acct acme.Account - if email != "" { - acct.Contact = []string{"mailto:" + email} // TODO: should we abstract the contact scheme? - } - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return acct, fmt.Errorf("generating private key: %v", err) - } - acct.PrivateKey = privateKey - return acct, nil -} - -// GetAccount first tries loading the account with the associated private key from storage. -// If it does not exist in storage, it will be retrieved from the ACME server and added to storage. -// The account must already exist; it does not create a new account. -func (am *ACMEManager) GetAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) { - account, err := am.loadAccountByKey(ctx, privateKeyPEM) - if err != nil { - if _, ok := err.(ErrNotExist); ok { - account, err = am.lookUpAccount(ctx, privateKeyPEM) - } else { - return account, err - } - } - return account, err -} - -// loadAccountByKey loads the account with the given private key from storage, if it exists. -// If it does not exist, an error of type ErrNotExist is returned. This is not very efficient -// for lots of accounts. -func (am *ACMEManager) loadAccountByKey(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) { - accountList, err := am.config.Storage.List(am.storageKeyUsersPrefix(am.CA), false) - if err != nil { - return acme.Account{}, err - } - for _, accountFolderKey := range accountList { - email := path.Base(accountFolderKey) - keyBytes, err := am.config.Storage.Load(am.storageKeyUserPrivateKey(am.CA, email)) - if err != nil { - return acme.Account{}, err - } - if bytes.Equal(bytes.TrimSpace(keyBytes), bytes.TrimSpace(privateKeyPEM)) { - return am.loadAccount(am.CA, email) - } - } - return acme.Account{}, ErrNotExist(fmt.Errorf("no account found with that key")) -} - -// lookUpAccount looks up the account associated with privateKeyPEM from the ACME server. -// If the account is found by the server, it will be saved to storage and returned. -func (am *ACMEManager) lookUpAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) { - client, err := am.newACMEClient(false) - if err != nil { - return acme.Account{}, fmt.Errorf("creating ACME client: %v", err) - } - - privateKey, err := decodePrivateKey([]byte(privateKeyPEM)) - if err != nil { - return acme.Account{}, fmt.Errorf("decoding private key: %v", err) - } - - // look up the account - account := acme.Account{PrivateKey: privateKey} - account, err = client.GetAccount(ctx, account) - if err != nil { - return acme.Account{}, fmt.Errorf("looking up account with server: %v", err) - } - - // save the account details to storage - err = am.saveAccount(client.Directory, account) - if err != nil { - return account, fmt.Errorf("could not save account to storage: %v", err) - } - - return account, nil -} - -// saveAccount persists an ACME account's info and private key to storage. -// It does NOT register the account via ACME or prompt the user. -func (am *ACMEManager) saveAccount(ca string, account acme.Account) error { - regBytes, err := json.MarshalIndent(account, "", "\t") - if err != nil { - return err - } - keyBytes, err := encodePrivateKey(account.PrivateKey) - if err != nil { - return err - } - // extract primary contact (email), without scheme (e.g. "mailto:") - primaryContact := getPrimaryContact(account) - all := []keyValue{ - { - key: am.storageKeyUserReg(ca, primaryContact), - value: regBytes, - }, - { - key: am.storageKeyUserPrivateKey(ca, primaryContact), - value: keyBytes, - }, - } - return storeTx(am.config.Storage, all) -} - -// getEmail does everything it can to obtain an email address -// from the user within the scope of memory and storage to use -// for ACME TLS. If it cannot get an email address, it does nothing -// (If user is prompted, it will warn the user of -// the consequences of an empty email.) This function MAY prompt -// the user for input. If allowPrompts is false, the user -// will NOT be prompted and an empty email may be returned. -func (am *ACMEManager) getEmail(allowPrompts bool) error { - leEmail := am.Email - - // First try package default email, or a discovered email address - if leEmail == "" { - leEmail = DefaultACME.Email - } - if leEmail == "" { - discoveredEmailMu.Lock() - leEmail = discoveredEmail - discoveredEmailMu.Unlock() - } - - // Then try to get most recent user email from storage - var gotRecentEmail bool - if leEmail == "" { - leEmail, gotRecentEmail = am.mostRecentAccountEmail(am.CA) - } - if !gotRecentEmail && leEmail == "" && allowPrompts { - // Looks like there is no email address readily available, - // so we will have to ask the user if we can. - var err error - leEmail, err = am.promptUserForEmail() - if err != nil { - return err - } - - // User might have just signified their agreement - am.Agreed = DefaultACME.Agreed - } - - // save the email for later and ensure it is consistent - // for repeated use; then update cfg with the email - leEmail = strings.TrimSpace(strings.ToLower(leEmail)) - discoveredEmailMu.Lock() - if discoveredEmail == "" { - discoveredEmail = leEmail - } - discoveredEmailMu.Unlock() - am.Email = leEmail - - return nil -} - -// promptUserForEmail prompts the user for an email address -// and returns the email address they entered (which could -// be the empty string). If no error is returned, then Agreed -// will also be set to true, since continuing through the -// prompt signifies agreement. -func (am *ACMEManager) promptUserForEmail() (string, error) { - // prompt the user for an email address and terms agreement - reader := bufio.NewReader(stdin) - am.promptUserAgreement("") - fmt.Println("Please enter your email address to signify agreement and to be notified") - fmt.Println("in case of issues. You can leave it blank, but we don't recommend it.") - fmt.Print(" Email address: ") - leEmail, err := reader.ReadString('\n') - if err != nil && err != io.EOF { - return "", fmt.Errorf("reading email address: %v", err) - } - leEmail = strings.TrimSpace(leEmail) - DefaultACME.Agreed = true - return leEmail, nil -} - -// promptUserAgreement simply outputs the standard user -// agreement prompt with the given agreement URL. -// It outputs a newline after the message. -func (am *ACMEManager) promptUserAgreement(agreementURL string) { - userAgreementPrompt := `Your sites will be served over HTTPS automatically using an automated CA. -By continuing, you agree to the CA's terms of service` - if agreementURL == "" { - fmt.Printf("\n\n%s.\n", userAgreementPrompt) - return - } - fmt.Printf("\n\n%s at:\n %s\n", userAgreementPrompt, agreementURL) -} - -// askUserAgreement prompts the user to agree to the agreement -// at the given agreement URL via stdin. It returns whether the -// user agreed or not. -func (am *ACMEManager) askUserAgreement(agreementURL string) bool { - am.promptUserAgreement(agreementURL) - fmt.Print("Do you agree to the terms? (y/n): ") - - reader := bufio.NewReader(stdin) - answer, err := reader.ReadString('\n') - if err != nil { - return false - } - answer = strings.ToLower(strings.TrimSpace(answer)) - - return answer == "y" || answer == "yes" -} - -func storageKeyACMECAPrefix(issuerKey string) string { - return path.Join(prefixACME, StorageKeys.Safe(issuerKey)) -} - -func (am *ACMEManager) storageKeyCAPrefix(caURL string) string { - return storageKeyACMECAPrefix(am.issuerKey(caURL)) -} - -func (am *ACMEManager) storageKeyUsersPrefix(caURL string) string { - return path.Join(am.storageKeyCAPrefix(caURL), "users") -} - -func (am *ACMEManager) storageKeyUserPrefix(caURL, email string) string { - if email == "" { - email = emptyEmail - } - return path.Join(am.storageKeyUsersPrefix(caURL), StorageKeys.Safe(email)) -} - -func (am *ACMEManager) storageKeyUserReg(caURL, email string) string { - return am.storageSafeUserKey(caURL, email, "registration", ".json") -} - -func (am *ACMEManager) storageKeyUserPrivateKey(caURL, email string) string { - return am.storageSafeUserKey(caURL, email, "private", ".key") -} - -// storageSafeUserKey returns a key for the given email, with the default -// filename, and the filename ending in the given extension. -func (am *ACMEManager) storageSafeUserKey(ca, email, defaultFilename, extension string) string { - if email == "" { - email = emptyEmail - } - email = strings.ToLower(email) - filename := am.emailUsername(email) - if filename == "" { - filename = defaultFilename - } - filename = StorageKeys.Safe(filename) - return path.Join(am.storageKeyUserPrefix(ca, email), filename+extension) -} - -// emailUsername returns the username portion of an email address (part before -// '@') or the original input if it can't find the "@" symbol. -func (*ACMEManager) emailUsername(email string) string { - at := strings.Index(email, "@") - if at == -1 { - return email - } else if at == 0 { - return email[1:] - } - return email[:at] -} - -// mostRecentAccountEmail finds the most recently-written account file -// in storage. Since this is part of a complex sequence to get a user -// account, errors here are discarded to simplify code flow in -// the caller, and errors are not important here anyway. -func (am *ACMEManager) mostRecentAccountEmail(caURL string) (string, bool) { - accountList, err := am.config.Storage.List(am.storageKeyUsersPrefix(caURL), false) - if err != nil || len(accountList) == 0 { - return "", false - } - - // get all the key infos ahead of sorting, because - // we might filter some out - stats := make(map[string]KeyInfo) - for i := 0; i < len(accountList); i++ { - u := accountList[i] - keyInfo, err := am.config.Storage.Stat(u) - if err != nil { - continue - } - if keyInfo.IsTerminal { - // I found a bug when macOS created a .DS_Store file in - // the users folder, and CertMagic tried to use that as - // the user email because it was newer than the other one - // which existed... sure, this isn't a perfect fix but - // frankly one's OS shouldn't mess with the data folder - // in the first place. - accountList = append(accountList[:i], accountList[i+1:]...) - i-- - continue - } - stats[u] = keyInfo - } - - sort.Slice(accountList, func(i, j int) bool { - iInfo := stats[accountList[i]] - jInfo := stats[accountList[j]] - return jInfo.Modified.Before(iInfo.Modified) - }) - - if len(accountList) == 0 { - return "", false - } - - account, err := am.getAccount(caURL, path.Base(accountList[0])) - if err != nil { - return "", false - } - - return getPrimaryContact(account), true -} - -// getPrimaryContact returns the first contact on the account (if any) -// without the scheme. (I guess we assume an email address.) -func getPrimaryContact(account acme.Account) string { - // TODO: should this be abstracted with some lower-level helper? - var primaryContact string - if len(account.Contact) > 0 { - primaryContact = account.Contact[0] - if idx := strings.Index(primaryContact, ":"); idx >= 0 { - primaryContact = primaryContact[idx+1:] - } - } - return primaryContact -} - -// When an email address is not explicitly specified, we can remember -// the last one we discovered to avoid having to ask again later. -// (We used to store this in DefaultACME.Email but it was racey; see #127) -var ( - discoveredEmail string - discoveredEmailMu sync.Mutex -) - -// agreementTestURL is set during tests to skip requiring -// setting up an entire ACME CA endpoint. -var agreementTestURL string - -// stdin is used to read the user's input if prompted; -// this is changed by tests during tests. -var stdin = io.ReadWriter(os.Stdin) - -// The name of the folder for accounts where the email -// address was not provided; default 'username' if you will, -// but only for local/storage use, not with the CA. -const emptyEmail = "default" diff --git a/vendor/github.com/caddyserver/certmagic/acmeclient.go b/vendor/github.com/caddyserver/certmagic/acmeclient.go deleted file mode 100644 index cc876b92..00000000 --- a/vendor/github.com/caddyserver/certmagic/acmeclient.go +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - weakrand "math/rand" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "github.com/mholt/acmez" - "github.com/mholt/acmez/acme" - "go.uber.org/zap" -) - -func init() { - weakrand.Seed(time.Now().UnixNano()) -} - -// acmeClient holds state necessary to perform ACME operations -// for certificate management with an ACME account. Call -// ACMEManager.newACMEClientWithAccount() to get a valid one. -type acmeClient struct { - mgr *ACMEManager - acmeClient *acmez.Client - account acme.Account -} - -// newACMEClientWithAccount creates an ACME client ready to use with an account, including -// loading one from storage or registering a new account with the CA if necessary. If -// useTestCA is true, am.TestCA will be used if set; otherwise, the primary CA will be used. -func (am *ACMEManager) newACMEClientWithAccount(ctx context.Context, useTestCA, interactive bool) (*acmeClient, error) { - // first, get underlying ACME client - client, err := am.newACMEClient(useTestCA) - if err != nil { - return nil, err - } - - // look up or create the ACME account - var account acme.Account - if am.AccountKeyPEM != "" { - account, err = am.GetAccount(ctx, []byte(am.AccountKeyPEM)) - } else { - account, err = am.getAccount(client.Directory, am.Email) - } - if err != nil { - return nil, fmt.Errorf("getting ACME account: %v", err) - } - - // register account if it is new - if account.Status == "" { - if am.NewAccountFunc != nil { - account, err = am.NewAccountFunc(ctx, am, account) - if err != nil { - return nil, fmt.Errorf("account pre-registration callback: %v", err) - } - } - - // agree to terms - if interactive { - if !am.Agreed { - var termsURL string - dir, err := client.GetDirectory(ctx) - if err != nil { - return nil, fmt.Errorf("getting directory: %w", err) - } - if dir.Meta != nil { - termsURL = dir.Meta.TermsOfService - } - if termsURL != "" { - am.Agreed = am.askUserAgreement(termsURL) - if !am.Agreed { - return nil, fmt.Errorf("user must agree to CA terms") - } - } - } - } else { - // can't prompt a user who isn't there; they should - // have reviewed the terms beforehand - am.Agreed = true - } - account.TermsOfServiceAgreed = am.Agreed - - // associate account with external binding, if configured - if am.ExternalAccount != nil { - err := account.SetExternalAccountBinding(ctx, client.Client, *am.ExternalAccount) - if err != nil { - return nil, err - } - } - - // create account - account, err = client.NewAccount(ctx, account) - if err != nil { - return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err) - } - - // persist the account to storage - err = am.saveAccount(client.Directory, account) - if err != nil { - return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err) - } - } - - c := &acmeClient{ - mgr: am, - acmeClient: client, - account: account, - } - - return c, nil -} - -// newACMEClient creates a new underlying ACME client using the settings in am, -// independent of any particular ACME account. If useTestCA is true, am.TestCA -// will be used if it is set; otherwise, the primary CA will be used. -func (am *ACMEManager) newACMEClient(useTestCA bool) (*acmez.Client, error) { - // ensure defaults are filled in - var caURL string - if useTestCA { - caURL = am.TestCA - } - if caURL == "" { - caURL = am.CA - } - if caURL == "" { - caURL = DefaultACME.CA - } - certObtainTimeout := am.CertObtainTimeout - if certObtainTimeout == 0 { - certObtainTimeout = DefaultACME.CertObtainTimeout - } - - // ensure endpoint is secure (assume HTTPS if scheme is missing) - if !strings.Contains(caURL, "://") { - caURL = "https://" + caURL - } - u, err := url.Parse(caURL) - if err != nil { - return nil, err - } - if u.Scheme != "https" && !isLoopback(u.Host) && !isInternal(u.Host) { - return nil, fmt.Errorf("%s: insecure CA URL (HTTPS required)", caURL) - } - - // set up the dialers and resolver for the ACME client's HTTP client - dialer := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 2 * time.Minute, - } - if am.Resolver != "" { - dialer.Resolver = &net.Resolver{ - PreferGo: true, - Dial: func(ctx context.Context, network, _ string) (net.Conn, error) { - return (&net.Dialer{ - Timeout: 15 * time.Second, - }).DialContext(ctx, network, am.Resolver) - }, - } - } - - // TODO: we could potentially reuse the HTTP transport and client - hc := am.httpClient // TODO: is this racey? - if am.httpClient == nil { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: dialer.DialContext, - TLSHandshakeTimeout: 15 * time.Second, - ResponseHeaderTimeout: 15 * time.Second, - ExpectContinueTimeout: 2 * time.Second, - ForceAttemptHTTP2: true, - } - if am.TrustedRoots != nil { - transport.TLSClientConfig = &tls.Config{ - RootCAs: am.TrustedRoots, - } - } - - hc = &http.Client{ - Transport: transport, - Timeout: HTTPTimeout, - } - - am.httpClient = hc - } - - client := &acmez.Client{ - Client: &acme.Client{ - Directory: caURL, - PollTimeout: certObtainTimeout, - UserAgent: buildUAString(), - HTTPClient: hc, - }, - ChallengeSolvers: make(map[string]acmez.Solver), - } - if am.Logger != nil { - l := am.Logger.Named("acme_client") - client.Client.Logger, client.Logger = l, l - } - - // configure challenges (most of the time, DNS challenge is - // exclusive of other ones because it is usually only used - // in situations where the default challenges would fail) - if am.DNS01Solver == nil { - // enable HTTP-01 challenge - if !am.DisableHTTPChallenge { - useHTTPPort := HTTPChallengePort - if HTTPPort > 0 && HTTPPort != HTTPChallengePort { - useHTTPPort = HTTPPort - } - if am.AltHTTPPort > 0 { - useHTTPPort = am.AltHTTPPort - } - client.ChallengeSolvers[acme.ChallengeTypeHTTP01] = distributedSolver{ - storage: am.config.Storage, - storageKeyIssuerPrefix: am.storageKeyCAPrefix(client.Directory), - solver: &httpSolver{ - acmeManager: am, - address: net.JoinHostPort(am.ListenHost, strconv.Itoa(useHTTPPort)), - }, - } - } - - // enable TLS-ALPN-01 challenge - if !am.DisableTLSALPNChallenge { - useTLSALPNPort := TLSALPNChallengePort - if HTTPSPort > 0 && HTTPSPort != TLSALPNChallengePort { - useTLSALPNPort = HTTPSPort - } - if am.AltTLSALPNPort > 0 { - useTLSALPNPort = am.AltTLSALPNPort - } - client.ChallengeSolvers[acme.ChallengeTypeTLSALPN01] = distributedSolver{ - storage: am.config.Storage, - storageKeyIssuerPrefix: am.storageKeyCAPrefix(client.Directory), - solver: &tlsALPNSolver{ - config: am.config, - address: net.JoinHostPort(am.ListenHost, strconv.Itoa(useTLSALPNPort)), - }, - } - } - } else { - // use DNS challenge exclusively - client.ChallengeSolvers[acme.ChallengeTypeDNS01] = am.DNS01Solver - } - - // wrap solvers in our wrapper so that we can keep track of challenge - // info: this is useful for solving challenges globally as a process; - // for example, usually there is only one process that can solve the - // HTTP and TLS-ALPN challenges, and only one server in that process - // that can bind the necessary port(s), so if a server listening on - // a different port needed a certificate, it would have to know about - // the other server listening on that port, and somehow convey its - // challenge info or share its config, but this isn't always feasible; - // what the wrapper does is it accesses a global challenge memory so - // that unrelated servers in this process can all solve each others' - // challenges without having to know about each other - Caddy's admin - // endpoint uses this functionality since it and the HTTP/TLS modules - // do not know about each other - // (doing this here in a separate loop ensures that even if we expose - // solver config to users later, we will even wrap their own solvers) - for name, solver := range client.ChallengeSolvers { - client.ChallengeSolvers[name] = solverWrapper{solver} - } - - return client, nil -} - -func (c *acmeClient) throttle(ctx context.Context, names []string) error { - // throttling is scoped to CA + account email - rateLimiterKey := c.acmeClient.Directory + "," + c.mgr.Email - rateLimitersMu.Lock() - rl, ok := rateLimiters[rateLimiterKey] - if !ok { - rl = NewRateLimiter(RateLimitEvents, RateLimitEventsWindow) - rateLimiters[rateLimiterKey] = rl - // TODO: stop rate limiter when it is garbage-collected... - } - rateLimitersMu.Unlock() - if c.mgr.Logger != nil { - c.mgr.Logger.Info("waiting on internal rate limiter", - zap.Strings("identifiers", names), - zap.String("ca", c.acmeClient.Directory), - zap.String("account", c.mgr.Email), - ) - } - err := rl.Wait(ctx) - if err != nil { - return err - } - if c.mgr.Logger != nil { - c.mgr.Logger.Info("done waiting on internal rate limiter", - zap.Strings("identifiers", names), - zap.String("ca", c.acmeClient.Directory), - zap.String("account", c.mgr.Email), - ) - } - return nil -} - -func (c *acmeClient) usingTestCA() bool { - return c.mgr.TestCA != "" && c.acmeClient.Directory == c.mgr.TestCA -} - -func (c *acmeClient) revoke(ctx context.Context, cert *x509.Certificate, reason int) error { - return c.acmeClient.RevokeCertificate(ctx, c.account, - cert, c.account.PrivateKey, reason) -} - -func buildUAString() string { - ua := "CertMagic" - if UserAgent != "" { - ua = UserAgent + " " + ua - } - return ua -} - -// These internal rate limits are designed to prevent accidentally -// firehosing a CA's ACME endpoints. They are not intended to -// replace or replicate the CA's actual rate limits. -// -// Let's Encrypt's rate limits can be found here: -// https://letsencrypt.org/docs/rate-limits/ -// -// Currently (as of December 2019), Let's Encrypt's most relevant -// rate limit for large deployments is 300 new orders per account -// per 3 hours (on average, or best case, that's about 1 every 36 -// seconds, or 2 every 72 seconds, etc.); but it's not reasonable -// to try to assume that our internal state is the same as the CA's -// (due to process restarts, config changes, failed validations, -// etc.) and ultimately, only the CA's actual rate limiter is the -// authority. Thus, our own rate limiters do not attempt to enforce -// external rate limits. Doing so causes problems when the domains -// are not in our control (i.e. serving customer sites) and/or lots -// of domains fail validation: they clog our internal rate limiter -// and nearly starve out (or at least slow down) the other domains -// that need certificates. Failed transactions are already retried -// with exponential backoff, so adding in rate limiting can slow -// things down even more. -// -// Instead, the point of our internal rate limiter is to avoid -// hammering the CA's endpoint when there are thousands or even -// millions of certificates under management. Our goal is to -// allow small bursts in a relatively short timeframe so as to -// not block any one domain for too long, without unleashing -// thousands of requests to the CA at once. -var ( - rateLimiters = make(map[string]*RingBufferRateLimiter) - rateLimitersMu sync.RWMutex - - // RateLimitEvents is how many new events can be allowed - // in RateLimitEventsWindow. - RateLimitEvents = 20 - - // RateLimitEventsWindow is the size of the sliding - // window that throttles events. - RateLimitEventsWindow = 1 * time.Minute -) - -// Some default values passed down to the underlying ACME client. -var ( - UserAgent string - HTTPTimeout = 30 * time.Second -) diff --git a/vendor/github.com/caddyserver/certmagic/acmemanager.go b/vendor/github.com/caddyserver/certmagic/acmemanager.go deleted file mode 100644 index 82b6cc12..00000000 --- a/vendor/github.com/caddyserver/certmagic/acmemanager.go +++ /dev/null @@ -1,466 +0,0 @@ -package certmagic - -import ( - "context" - "crypto/x509" - "errors" - "fmt" - "net/http" - "net/url" - "sort" - "strings" - "time" - - "github.com/mholt/acmez" - "github.com/mholt/acmez/acme" - "go.uber.org/zap" -) - -// ACMEManager gets certificates using ACME. It implements the PreChecker, -// Issuer, and Revoker interfaces. -// -// It is NOT VALID to use an ACMEManager without calling NewACMEManager(). -// It fills in any default values from DefaultACME as well as setting up -// internal state that is necessary for valid use. Always call -// NewACMEManager() to get a valid ACMEManager value. -type ACMEManager struct { - // The endpoint of the directory for the ACME - // CA we are to use - CA string - - // TestCA is the endpoint of the directory for - // an ACME CA to use to test domain validation, - // but any certs obtained from this CA are - // discarded - TestCA string - - // The email address to use when creating or - // selecting an existing ACME server account - Email string - - // The PEM-encoded private key of the ACME - // account to use; only needed if the account - // is already created on the server and - // can be looked up with the ACME protocol - AccountKeyPEM string - - // Set to true if agreed to the CA's - // subscriber agreement - Agreed bool - - // An optional external account to associate - // with this ACME account - ExternalAccount *acme.EAB - - // Disable all HTTP challenges - DisableHTTPChallenge bool - - // Disable all TLS-ALPN challenges - DisableTLSALPNChallenge bool - - // The host (ONLY the host, not port) to listen - // on if necessary to start a listener to solve - // an ACME challenge - ListenHost string - - // The alternate port to use for the ACME HTTP - // challenge; if non-empty, this port will be - // used instead of HTTPChallengePort to spin up - // a listener for the HTTP challenge - AltHTTPPort int - - // The alternate port to use for the ACME - // TLS-ALPN challenge; the system must forward - // TLSALPNChallengePort to this port for - // challenge to succeed - AltTLSALPNPort int - - // The solver for the dns-01 challenge; - // usually this is a DNS01Solver value - // from this package - DNS01Solver acmez.Solver - - // TrustedRoots specifies a pool of root CA - // certificates to trust when communicating - // over a network to a peer. - TrustedRoots *x509.CertPool - - // The maximum amount of time to allow for - // obtaining a certificate. If empty, the - // default from the underlying ACME lib is - // used. If set, it must not be too low so - // as to cancel challenges too early. - CertObtainTimeout time.Duration - - // Address of custom DNS resolver to be used - // when communicating with ACME server - Resolver string - - // Callback function that is called before a - // new ACME account is registered with the CA; - // it allows for last-second config changes - // of the ACMEManager and the Account. - // (TODO: this feature is still EXPERIMENTAL and subject to change) - NewAccountFunc func(context.Context, *ACMEManager, acme.Account) (acme.Account, error) - - // Preferences for selecting alternate - // certificate chains - PreferredChains ChainPreference - - // Set a logger to enable logging - Logger *zap.Logger - - config *Config - httpClient *http.Client -} - -// NewACMEManager constructs a valid ACMEManager based on a template -// configuration; any empty values will be filled in by defaults in -// DefaultACME, and if any required values are still empty, sensible -// defaults will be used. -// -// Typically, you'll create the Config first with New() or NewDefault(), -// then call NewACMEManager(), then assign the return value to the Issuers -// field of the Config. -func NewACMEManager(cfg *Config, template ACMEManager) *ACMEManager { - if cfg == nil { - panic("cannot make valid ACMEManager without an associated CertMagic config") - } - if template.CA == "" { - template.CA = DefaultACME.CA - } - if template.TestCA == "" && template.CA == DefaultACME.CA { - // only use the default test CA if the CA is also - // the default CA; no point in testing against - // Let's Encrypt's staging server if we are not - // using their production server too - template.TestCA = DefaultACME.TestCA - } - if template.Email == "" { - template.Email = DefaultACME.Email - } - if template.AccountKeyPEM == "" { - template.AccountKeyPEM = DefaultACME.AccountKeyPEM - } - if !template.Agreed { - template.Agreed = DefaultACME.Agreed - } - if template.ExternalAccount == nil { - template.ExternalAccount = DefaultACME.ExternalAccount - } - if !template.DisableHTTPChallenge { - template.DisableHTTPChallenge = DefaultACME.DisableHTTPChallenge - } - if !template.DisableTLSALPNChallenge { - template.DisableTLSALPNChallenge = DefaultACME.DisableTLSALPNChallenge - } - if template.ListenHost == "" { - template.ListenHost = DefaultACME.ListenHost - } - if template.AltHTTPPort == 0 { - template.AltHTTPPort = DefaultACME.AltHTTPPort - } - if template.AltTLSALPNPort == 0 { - template.AltTLSALPNPort = DefaultACME.AltTLSALPNPort - } - if template.DNS01Solver == nil { - template.DNS01Solver = DefaultACME.DNS01Solver - } - if template.TrustedRoots == nil { - template.TrustedRoots = DefaultACME.TrustedRoots - } - if template.CertObtainTimeout == 0 { - template.CertObtainTimeout = DefaultACME.CertObtainTimeout - } - if template.Resolver == "" { - template.Resolver = DefaultACME.Resolver - } - if template.NewAccountFunc == nil { - template.NewAccountFunc = DefaultACME.NewAccountFunc - } - if template.Logger == nil { - template.Logger = DefaultACME.Logger - } - template.config = cfg - return &template -} - -// IssuerKey returns the unique issuer key for the -// confgured CA endpoint. -func (am *ACMEManager) IssuerKey() string { - return am.issuerKey(am.CA) -} - -func (*ACMEManager) issuerKey(ca string) string { - key := ca - if caURL, err := url.Parse(key); err == nil { - key = caURL.Host - if caURL.Path != "" { - // keep the path, but make sure it's a single - // component (i.e. no forward slashes, and for - // good measure, no backward slashes either) - const hyphen = "-" - repl := strings.NewReplacer( - "/", hyphen, - "\\", hyphen, - ) - path := strings.Trim(repl.Replace(caURL.Path), hyphen) - if path != "" { - key += hyphen + path - } - } - } - return key -} - -// PreCheck performs a few simple checks before obtaining or -// renewing a certificate with ACME, and returns whether this -// batch is eligible for certificates if using Let's Encrypt. -// It also ensures that an email address is available. -func (am *ACMEManager) PreCheck(_ context.Context, names []string, interactive bool) error { - publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com") - if publicCA { - for _, name := range names { - if !SubjectQualifiesForPublicCert(name) { - return fmt.Errorf("subject does not qualify for a public certificate: %s", name) - } - } - } - return am.getEmail(interactive) -} - -// Issue implements the Issuer interface. It obtains a certificate for the given csr using -// the ACME configuration am. -func (am *ACMEManager) Issue(ctx context.Context, csr *x509.CertificateRequest) (*IssuedCertificate, error) { - if am.config == nil { - panic("missing config pointer (must use NewACMEManager)") - } - - var isRetry bool - if attempts, ok := ctx.Value(AttemptsCtxKey).(*int); ok { - isRetry = *attempts > 0 - } - - cert, usedTestCA, err := am.doIssue(ctx, csr, isRetry) - if err != nil { - return nil, err - } - - // important to note that usedTestCA is not necessarily the same as isRetry - // (usedTestCA can be true if the main CA and the test CA happen to be the same) - if isRetry && usedTestCA && am.CA != am.TestCA { - // succeeded with testing endpoint, so try again with production endpoint - // (only if the production endpoint is different from the testing endpoint) - // TODO: This logic is imperfect and could benefit from some refinement. - // The two CA endpoints likely have different states, which could cause one - // to succeed and the other to fail, even if it's not a validation error. - // Two common cases would be: - // 1) Rate limiter state. This is more likely to cause prod to fail while - // staging succeeds, since prod usually has tighter rate limits. Thus, if - // initial attempt failed in prod due to rate limit, first retry (on staging) - // might succeed, and then trying prod again right way would probably still - // fail; normally this would terminate retries but the right thing to do in - // this case is to back off and retry again later. We could refine this logic - // to stick with the production endpoint on retries unless the error changes. - // 2) Cached authorizations state. If a domain validates successfully with - // one endpoint, but then the other endpoint is used, it might fail, e.g. if - // DNS was just changed or is still propagating. In this case, the second CA - // should continue to be retried with backoff, without switching back to the - // other endpoint. This is more likely to happen if a user is testing with - // the staging CA as the main CA, then changes their configuration once they - // think they are ready for the production endpoint. - cert, _, err = am.doIssue(ctx, csr, false) - if err != nil { - // succeeded with test CA but failed just now with the production CA; - // either we are observing differing internal states of each CA that will - // work out with time, or there is a bug/misconfiguration somewhere - // externally; it is hard to tell which! one easy cue is whether the - // error is specifically a 429 (Too Many Requests); if so, we should - // probably keep retrying - var problem acme.Problem - if errors.As(err, &problem) { - if problem.Status == http.StatusTooManyRequests { - // DON'T abort retries; the test CA succeeded (even - // if it's cached, it recently succeeded!) so we just - // need to keep trying (with backoff) until this CA's - // rate limits expire... - // TODO: as mentioned in comment above, we would benefit - // by pinning the main CA at this point instead of - // needlessly retrying with the test CA first each time - return nil, err - } - } - return nil, ErrNoRetry{err} - } - } - - return cert, err -} - -func (am *ACMEManager) doIssue(ctx context.Context, csr *x509.CertificateRequest, useTestCA bool) (*IssuedCertificate, bool, error) { - client, err := am.newACMEClientWithAccount(ctx, useTestCA, false) - if err != nil { - return nil, false, err - } - usingTestCA := client.usingTestCA() - - nameSet := namesFromCSR(csr) - - if !useTestCA { - if err := client.throttle(ctx, nameSet); err != nil { - return nil, usingTestCA, err - } - } - - certChains, err := client.acmeClient.ObtainCertificateUsingCSR(ctx, client.account, csr) - if err != nil { - return nil, usingTestCA, fmt.Errorf("%v %w (ca=%s)", nameSet, err, client.acmeClient.Directory) - } - if len(certChains) == 0 { - return nil, usingTestCA, fmt.Errorf("no certificate chains") - } - - preferredChain := am.selectPreferredChain(certChains) - - ic := &IssuedCertificate{ - Certificate: preferredChain.ChainPEM, - Metadata: preferredChain, - } - - return ic, usingTestCA, nil -} - -// selectPreferredChain sorts and then filters the certificate chains to find the optimal -// chain preferred by the client. If there's only one chain, that is returned without any -// processing. If there are no matches, the first chain is returned. -func (am *ACMEManager) selectPreferredChain(certChains []acme.Certificate) acme.Certificate { - if len(certChains) == 1 { - if am.Logger != nil && (len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0) { - am.Logger.Debug("there is only one chain offered; selecting it regardless of preferences", - zap.String("chain_url", certChains[0].URL)) - } - return certChains[0] - } - - if am.PreferredChains.Smallest != nil { - if *am.PreferredChains.Smallest { - sort.Slice(certChains, func(i, j int) bool { - return len(certChains[i].ChainPEM) < len(certChains[j].ChainPEM) - }) - } else { - sort.Slice(certChains, func(i, j int) bool { - return len(certChains[i].ChainPEM) > len(certChains[j].ChainPEM) - }) - } - } - - if len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0 { - // in order to inspect, we need to decode their PEM contents - decodedChains := make([][]*x509.Certificate, len(certChains)) - for i, chain := range certChains { - certs, err := parseCertsFromPEMBundle(chain.ChainPEM) - if err != nil { - if am.Logger != nil { - am.Logger.Error("unable to parse PEM certificate chain", - zap.Int("chain", i), - zap.Error(err)) - } - continue - } - decodedChains[i] = certs - } - - if len(am.PreferredChains.AnyCommonName) > 0 { - for _, prefAnyCN := range am.PreferredChains.AnyCommonName { - for i, chain := range decodedChains { - for _, cert := range chain { - if cert.Issuer.CommonName == prefAnyCN { - if am.Logger != nil { - am.Logger.Debug("found preferred certificate chain by issuer common name", - zap.String("preference", prefAnyCN), - zap.Int("chain", i)) - } - return certChains[i] - } - } - } - } - } - - if len(am.PreferredChains.RootCommonName) > 0 { - for _, prefRootCN := range am.PreferredChains.RootCommonName { - for i, chain := range decodedChains { - if chain[len(chain)-1].Issuer.CommonName == prefRootCN { - if am.Logger != nil { - am.Logger.Debug("found preferred certificate chain by root common name", - zap.String("preference", prefRootCN), - zap.Int("chain", i)) - } - return certChains[i] - } - } - } - } - - if am.Logger != nil { - am.Logger.Warn("did not find chain matching preferences; using first") - } - } - - return certChains[0] -} - -// Revoke implements the Revoker interface. It revokes the given certificate. -func (am *ACMEManager) Revoke(ctx context.Context, cert CertificateResource, reason int) error { - client, err := am.newACMEClientWithAccount(ctx, false, false) - if err != nil { - return err - } - - certs, err := parseCertsFromPEMBundle(cert.CertificatePEM) - if err != nil { - return err - } - - return client.revoke(ctx, certs[0], reason) -} - -// ChainPreference describes the client's preferred certificate chain, -// useful if the CA offers alternate chains. The first matching chain -// will be selected. -type ChainPreference struct { - // Prefer chains with the fewest number of bytes. - Smallest *bool - - // Select first chain having a root with one of - // these common names. - RootCommonName []string - - // Select first chain that has any issuer with one - // of these common names. - AnyCommonName []string -} - -// DefaultACME specifies default settings to use for ACMEManagers. -// Using this value is optional but can be convenient. -var DefaultACME = ACMEManager{ - CA: LetsEncryptProductionCA, - TestCA: LetsEncryptStagingCA, -} - -// Some well-known CA endpoints available to use. -const ( - LetsEncryptStagingCA = "https://acme-staging-v02.api.letsencrypt.org/directory" - LetsEncryptProductionCA = "https://acme-v02.api.letsencrypt.org/directory" - ZeroSSLProductionCA = "https://acme.zerossl.com/v2/DV90" -) - -// prefixACME is the storage key prefix used for ACME-specific assets. -const prefixACME = "acme" - -// Interface guards -var ( - _ PreChecker = (*ACMEManager)(nil) - _ Issuer = (*ACMEManager)(nil) - _ Revoker = (*ACMEManager)(nil) -) diff --git a/vendor/github.com/caddyserver/certmagic/async.go b/vendor/github.com/caddyserver/certmagic/async.go deleted file mode 100644 index 67627b25..00000000 --- a/vendor/github.com/caddyserver/certmagic/async.go +++ /dev/null @@ -1,187 +0,0 @@ -package certmagic - -import ( - "context" - "errors" - "log" - "runtime" - "sync" - "time" - - "go.uber.org/zap" -) - -var jm = &jobManager{maxConcurrentJobs: 1000} - -type jobManager struct { - mu sync.Mutex - maxConcurrentJobs int - activeWorkers int - queue []namedJob - names map[string]struct{} -} - -type namedJob struct { - name string - job func() error - logger *zap.Logger -} - -// Submit enqueues the given job with the given name. If name is non-empty -// and a job with the same name is already enqueued or running, this is a -// no-op. If name is empty, no duplicate prevention will occur. The job -// manager will then run this job as soon as it is able. -func (jm *jobManager) Submit(logger *zap.Logger, name string, job func() error) { - jm.mu.Lock() - defer jm.mu.Unlock() - if jm.names == nil { - jm.names = make(map[string]struct{}) - } - if name != "" { - // prevent duplicate jobs - if _, ok := jm.names[name]; ok { - return - } - jm.names[name] = struct{}{} - } - jm.queue = append(jm.queue, namedJob{name, job, logger}) - if jm.activeWorkers < jm.maxConcurrentJobs { - jm.activeWorkers++ - go jm.worker() - } -} - -func (jm *jobManager) worker() { - defer func() { - if err := recover(); err != nil { - buf := make([]byte, stackTraceBufferSize) - buf = buf[:runtime.Stack(buf, false)] - log.Printf("panic: certificate worker: %v\n%s", err, buf) - } - }() - - for { - jm.mu.Lock() - if len(jm.queue) == 0 { - jm.activeWorkers-- - jm.mu.Unlock() - return - } - next := jm.queue[0] - jm.queue = jm.queue[1:] - jm.mu.Unlock() - if err := next.job(); err != nil { - if next.logger != nil { - next.logger.Error("job failed", zap.Error(err)) - } - } - if next.name != "" { - jm.mu.Lock() - delete(jm.names, next.name) - jm.mu.Unlock() - } - } -} - -func doWithRetry(ctx context.Context, log *zap.Logger, f func(context.Context) error) error { - var attempts int - ctx = context.WithValue(ctx, AttemptsCtxKey, &attempts) - - // the initial intervalIndex is -1, signaling - // that we should not wait for the first attempt - start, intervalIndex := time.Now(), -1 - var err error - - for time.Since(start) < maxRetryDuration { - var wait time.Duration - if intervalIndex >= 0 { - wait = retryIntervals[intervalIndex] - } - timer := time.NewTimer(wait) - select { - case <-ctx.Done(): - timer.Stop() - return context.Canceled - case <-timer.C: - err = f(ctx) - attempts++ - if err == nil || errors.Is(err, context.Canceled) { - return err - } - var errNoRetry ErrNoRetry - if errors.As(err, &errNoRetry) { - return err - } - if intervalIndex < len(retryIntervals)-1 { - intervalIndex++ - } - if time.Since(start) < maxRetryDuration { - if log != nil { - log.Error("will retry", - zap.Error(err), - zap.Int("attempt", attempts), - zap.Duration("retrying_in", retryIntervals[intervalIndex]), - zap.Duration("elapsed", time.Since(start)), - zap.Duration("max_duration", maxRetryDuration)) - } - } else { - if log != nil { - log.Error("final attempt; giving up", - zap.Error(err), - zap.Int("attempt", attempts), - zap.Duration("elapsed", time.Since(start)), - zap.Duration("max_duration", maxRetryDuration)) - } - return nil - } - } - } - return err -} - -// ErrNoRetry is an error type which signals -// to stop retries early. -type ErrNoRetry struct{ Err error } - -// Unwrap makes it so that e wraps e.Err. -func (e ErrNoRetry) Unwrap() error { return e.Err } -func (e ErrNoRetry) Error() string { return e.Err.Error() } - -type retryStateCtxKey struct{} - -// AttemptsCtxKey is the context key for the value -// that holds the attempt counter. The value counts -// how many times the operation has been attempted. -// A value of 0 means first attempt. -var AttemptsCtxKey retryStateCtxKey - -// retryIntervals are based on the idea of exponential -// backoff, but weighed a little more heavily to the -// front. We figure that intermittent errors would be -// resolved after the first retry, but any errors after -// that would probably require at least a few minutes -// to clear up: either for DNS to propagate, for the -// administrator to fix their DNS or network properties, -// or some other external factor needs to change. We -// chose intervals that we think will be most useful -// without introducing unnecessary delay. The last -// interval in this list will be used until the time -// of maxRetryDuration has elapsed. -var retryIntervals = []time.Duration{ - 1 * time.Minute, - 2 * time.Minute, - 2 * time.Minute, - 5 * time.Minute, // elapsed: 10 min - 10 * time.Minute, - 20 * time.Minute, - 20 * time.Minute, // elapsed: 1 hr - 30 * time.Minute, - 30 * time.Minute, // elapsed: 2 hr - 1 * time.Hour, - 3 * time.Hour, // elapsed: 6 hr - 6 * time.Hour, // for up to maxRetryDuration -} - -// maxRetryDuration is the maximum duration to try -// doing retries using the above intervals. -const maxRetryDuration = 24 * time.Hour * 30 diff --git a/vendor/github.com/caddyserver/certmagic/cache.go b/vendor/github.com/caddyserver/certmagic/cache.go deleted file mode 100644 index 673379af..00000000 --- a/vendor/github.com/caddyserver/certmagic/cache.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "fmt" - weakrand "math/rand" // seeded elsewhere - "strings" - "sync" - "time" - - "go.uber.org/zap" -) - -// Cache is a structure that stores certificates in memory. -// A Cache indexes certificates by name for quick access -// during TLS handshakes, and avoids duplicating certificates -// in memory. Generally, there should only be one per process. -// However, that is not a strict requirement; but using more -// than one is a code smell, and may indicate an -// over-engineered design. -// -// An empty cache is INVALID and must not be used. Be sure -// to call NewCache to get a valid value. -// -// These should be very long-lived values and must not be -// copied. Before all references leave scope to be garbage -// collected, ensure you call Stop() to stop maintenance on -// the certificates stored in this cache and release locks. -// -// Caches are not usually manipulated directly; create a -// Config value with a pointer to a Cache, and then use -// the Config to interact with the cache. Caches are -// agnostic of any particular storage or ACME config, -// since each certificate may be managed and stored -// differently. -type Cache struct { - // User configuration of the cache - options CacheOptions - - // The cache is keyed by certificate hash - cache map[string]Certificate - - // cacheIndex is a map of SAN to cache key (cert hash) - cacheIndex map[string][]string - - // Protects the cache and index maps - mu sync.RWMutex - - // Close this channel to cancel asset maintenance - stopChan chan struct{} - - // Used to signal when stopping is completed - doneChan chan struct{} - - logger *zap.Logger -} - -// NewCache returns a new, valid Cache for efficiently -// accessing certificates in memory. It also begins a -// maintenance goroutine to tend to the certificates -// in the cache. Call Stop() when you are done with the -// cache so it can clean up locks and stuff. -// -// Most users of this package will not need to call this -// because a default certificate cache is created for you. -// Only advanced use cases require creating a new cache. -// -// This function panics if opts.GetConfigForCert is not -// set. The reason is that a cache absolutely needs to -// be able to get a Config with which to manage TLS -// assets, and it is not safe to assume that the Default -// config is always the correct one, since you have -// created the cache yourself. -// -// See the godoc for Cache to use it properly. When -// no longer needed, caches should be stopped with -// Stop() to clean up resources even if the process -// is being terminated, so that it can clean up -// any locks for other processes to unblock! -func NewCache(opts CacheOptions) *Cache { - // assume default options if necessary - if opts.OCSPCheckInterval <= 0 { - opts.OCSPCheckInterval = DefaultOCSPCheckInterval - } - if opts.RenewCheckInterval <= 0 { - opts.RenewCheckInterval = DefaultRenewCheckInterval - } - if opts.Capacity < 0 { - opts.Capacity = 0 - } - - // this must be set, because we cannot not - // safely assume that the Default Config - // is always the correct one to use - if opts.GetConfigForCert == nil { - panic("cache must be initialized with a GetConfigForCert callback") - } - - c := &Cache{ - options: opts, - cache: make(map[string]Certificate), - cacheIndex: make(map[string][]string), - stopChan: make(chan struct{}), - doneChan: make(chan struct{}), - logger: opts.Logger, - } - - go c.maintainAssets(0) - - return c -} - -// Stop stops the maintenance goroutine for -// certificates in certCache. It blocks until -// stopping is complete. Once a cache is -// stopped, it cannot be reused. -func (certCache *Cache) Stop() { - close(certCache.stopChan) // signal to stop - <-certCache.doneChan // wait for stop to complete -} - -// CacheOptions is used to configure certificate caches. -// Once a cache has been created with certain options, -// those settings cannot be changed. -type CacheOptions struct { - // REQUIRED. A function that returns a configuration - // used for managing a certificate, or for accessing - // that certificate's asset storage (e.g. for - // OCSP staples, etc). The returned Config MUST - // be associated with the same Cache as the caller. - // - // The reason this is a callback function, dynamically - // returning a Config (instead of attaching a static - // pointer to a Config on each certificate) is because - // the config for how to manage a domain's certificate - // might change from maintenance to maintenance. The - // cache is so long-lived, we cannot assume that the - // host's situation will always be the same; e.g. the - // certificate might switch DNS providers, so the DNS - // challenge (if used) would need to be adjusted from - // the last time it was run ~8 weeks ago. - GetConfigForCert ConfigGetter - - // How often to check certificates for renewal; - // if unset, DefaultOCSPCheckInterval will be used. - OCSPCheckInterval time.Duration - - // How often to check certificates for renewal; - // if unset, DefaultRenewCheckInterval will be used. - RenewCheckInterval time.Duration - - // Maximum number of certificates to allow in the cache. - // If reached, certificates will be randomly evicted to - // make room for new ones. 0 means unlimited. - Capacity int - - // Set a logger to enable logging - Logger *zap.Logger -} - -// ConfigGetter is a function that returns a prepared, -// valid config that should be used when managing the -// given certificate or its assets. -type ConfigGetter func(Certificate) (*Config, error) - -// cacheCertificate calls unsyncedCacheCertificate with a write lock. -// -// This function is safe for concurrent use. -func (certCache *Cache) cacheCertificate(cert Certificate) { - certCache.mu.Lock() - certCache.unsyncedCacheCertificate(cert) - certCache.mu.Unlock() -} - -// unsyncedCacheCertificate adds cert to the in-memory cache unless -// it already exists in the cache (according to cert.Hash). It -// updates the name index. -// -// This function is NOT safe for concurrent use. Callers MUST acquire -// a write lock on certCache.mu first. -func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) { - // no-op if this certificate already exists in the cache - if _, ok := certCache.cache[cert.hash]; ok { - if certCache.logger != nil { - certCache.logger.Debug("certificate already cached", - zap.Strings("subjects", cert.Names), - zap.Time("expiration", cert.Leaf.NotAfter), - zap.Bool("managed", cert.managed), - zap.String("issuer_key", cert.issuerKey), - zap.String("hash", cert.hash)) - } - return - } - - // if the cache is at capacity, make room for new cert - cacheSize := len(certCache.cache) - if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity { - // Go maps are "nondeterministic" but not actually random, - // so although we could just chop off the "front" of the - // map with less code, that is a heavily skewed eviction - // strategy; generating random numbers is cheap and - // ensures a much better distribution. - rnd := weakrand.Intn(cacheSize) - i := 0 - for _, randomCert := range certCache.cache { - if i == rnd { - if certCache.logger != nil { - certCache.logger.Debug("cache full; evicting random certificate", - zap.Strings("removing_subjects", randomCert.Names), - zap.String("removing_hash", randomCert.hash), - zap.Strings("inserting_subjects", cert.Names), - zap.String("inserting_hash", cert.hash)) - } - certCache.removeCertificate(randomCert) - break - } - i++ - } - } - - // store the certificate - certCache.cache[cert.hash] = cert - - // update the index so we can access it by name - for _, name := range cert.Names { - certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash) - } - - if certCache.logger != nil { - certCache.logger.Debug("added certificate to cache", - zap.Strings("subjects", cert.Names), - zap.Time("expiration", cert.Leaf.NotAfter), - zap.Bool("managed", cert.managed), - zap.String("issuer_key", cert.issuerKey), - zap.String("hash", cert.hash)) - } -} - -// removeCertificate removes cert from the cache. -// -// This function is NOT safe for concurrent use; callers -// MUST first acquire a write lock on certCache.mu. -func (certCache *Cache) removeCertificate(cert Certificate) { - // delete all mentions of this cert from the name index - for _, name := range cert.Names { - keyList := certCache.cacheIndex[name] - for i := 0; i < len(keyList); i++ { - if keyList[i] == cert.hash { - keyList = append(keyList[:i], keyList[i+1:]...) - i-- - } - } - if len(keyList) == 0 { - delete(certCache.cacheIndex, name) - } else { - certCache.cacheIndex[name] = keyList - } - } - - // delete the actual cert from the cache - delete(certCache.cache, cert.hash) - - if certCache.logger != nil { - certCache.logger.Debug("removed certificate from cache", - zap.Strings("subjects", cert.Names), - zap.Time("expiration", cert.Leaf.NotAfter), - zap.Bool("managed", cert.managed), - zap.String("issuer_key", cert.issuerKey), - zap.String("hash", cert.hash)) - } -} - -// replaceCertificate atomically replaces oldCert with newCert in -// the cache. -// -// This method is safe for concurrent use. -func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) { - certCache.mu.Lock() - certCache.removeCertificate(oldCert) - certCache.unsyncedCacheCertificate(newCert) - certCache.mu.Unlock() - if certCache.logger != nil { - certCache.logger.Info("replaced certificate in cache", - zap.Strings("subjects", newCert.Names), - zap.Time("new_expiration", newCert.Leaf.NotAfter)) - } -} - -func (certCache *Cache) getAllMatchingCerts(name string) []Certificate { - certCache.mu.RLock() - defer certCache.mu.RUnlock() - - allCertKeys := certCache.cacheIndex[name] - - certs := make([]Certificate, len(allCertKeys)) - for i := range allCertKeys { - certs[i] = certCache.cache[allCertKeys[i]] - } - - return certs -} - -func (certCache *Cache) getAllCerts() []Certificate { - certCache.mu.RLock() - defer certCache.mu.RUnlock() - certs := make([]Certificate, 0, len(certCache.cache)) - for _, cert := range certCache.cache { - certs = append(certs, cert) - } - return certs -} - -func (certCache *Cache) getConfig(cert Certificate) (*Config, error) { - cfg, err := certCache.options.GetConfigForCert(cert) - if err != nil { - return nil, err - } - if cfg.certCache != nil && cfg.certCache != certCache { - return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)", - cert.Names, cfg.certCache, certCache) - } - return cfg, nil -} - -// AllMatchingCertificates returns a list of all certificates that could -// be used to serve the given SNI name, including exact SAN matches and -// wildcard matches. -func (certCache *Cache) AllMatchingCertificates(name string) []Certificate { - // get exact matches first - certs := certCache.getAllMatchingCerts(name) - - // then look for wildcard matches by replacing each - // label of the domain name with wildcards - labels := strings.Split(name, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - certs = append(certs, certCache.getAllMatchingCerts(candidate)...) - } - - return certs -} - -var ( - defaultCache *Cache - defaultCacheMu sync.Mutex -) diff --git a/vendor/github.com/caddyserver/certmagic/certificates.go b/vendor/github.com/caddyserver/certmagic/certificates.go deleted file mode 100644 index 067bfc50..00000000 --- a/vendor/github.com/caddyserver/certmagic/certificates.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "strings" - "time" - - "go.uber.org/zap" - "golang.org/x/crypto/ocsp" -) - -// Certificate is a tls.Certificate with associated metadata tacked on. -// Even if the metadata can be obtained by parsing the certificate, -// we are more efficient by extracting the metadata onto this struct, -// but at the cost of slightly higher memory use. -type Certificate struct { - tls.Certificate - - // Names is the list of subject names this - // certificate is signed for. - Names []string - - // Optional; user-provided, and arbitrary. - Tags []string - - // OCSP contains the certificate's parsed OCSP response. - ocsp *ocsp.Response - - // The hex-encoded hash of this cert's chain's bytes. - hash string - - // Whether this certificate is under our management. - managed bool - - // The unique string identifying the issuer of this certificate. - issuerKey string -} - -// NeedsRenewal returns true if the certificate is -// expiring soon (according to cfg) or has expired. -func (cert Certificate) NeedsRenewal(cfg *Config) bool { - return currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio) -} - -// Expired returns true if the certificate has expired. -func (cert Certificate) Expired() bool { - if cert.Leaf == nil { - // ideally cert.Leaf would never be nil, but this can happen for - // "synthetic" certs like those made to solve the TLS-ALPN challenge - // which adds a special cert directly to the cache, since - // tls.X509KeyPair() discards the leaf; oh well - return false - } - return time.Now().After(cert.Leaf.NotAfter) -} - -// currentlyInRenewalWindow returns true if the current time is -// within the renewal window, according to the given start/end -// dates and the ratio of the renewal window. If true is returned, -// the certificate being considered is due for renewal. -func currentlyInRenewalWindow(notBefore, notAfter time.Time, renewalWindowRatio float64) bool { - if notAfter.IsZero() { - return false - } - lifetime := notAfter.Sub(notBefore) - if renewalWindowRatio == 0 { - renewalWindowRatio = DefaultRenewalWindowRatio - } - renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio) - renewalWindowStart := notAfter.Add(-renewalWindow) - return time.Now().After(renewalWindowStart) -} - -// HasTag returns true if cert.Tags has tag. -func (cert Certificate) HasTag(tag string) bool { - for _, t := range cert.Tags { - if t == tag { - return true - } - } - return false -} - -// CacheManagedCertificate loads the certificate for domain into the -// cache, from the TLS storage for managed certificates. It returns a -// copy of the Certificate that was put into the cache. -// -// This is a lower-level method; normally you'll call Manage() instead. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheManagedCertificate(domain string) (Certificate, error) { - cert, err := cfg.loadManagedCertificate(domain) - if err != nil { - return cert, err - } - cfg.certCache.cacheCertificate(cert) - cfg.emit("cached_managed_cert", cert.Names) - return cert, nil -} - -// loadManagedCertificate loads the managed certificate for domain from any -// of the configured issuers' storage locations, but it does not add it to -// the cache. It just loads from storage and returns it. -func (cfg *Config) loadManagedCertificate(domain string) (Certificate, error) { - certRes, err := cfg.loadCertResourceAnyIssuer(domain) - if err != nil { - return Certificate{}, err - } - cert, err := cfg.makeCertificateWithOCSP(certRes.CertificatePEM, certRes.PrivateKeyPEM) - if err != nil { - return cert, err - } - cert.managed = true - cert.issuerKey = certRes.issuerKey - return cert, nil -} - -// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile -// and keyFile, which must be in PEM format. It stores the certificate in -// the in-memory cache. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheUnmanagedCertificatePEMFile(certFile, keyFile string, tags []string) error { - cert, err := cfg.makeCertificateFromDiskWithOCSP(cfg.Storage, certFile, keyFile) - if err != nil { - return err - } - cert.Tags = tags - cfg.certCache.cacheCertificate(cert) - cfg.emit("cached_unmanaged_cert", cert.Names) - return nil -} - -// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache. -// It staples OCSP if possible. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheUnmanagedTLSCertificate(tlsCert tls.Certificate, tags []string) error { - var cert Certificate - err := fillCertFromLeaf(&cert, tlsCert) - if err != nil { - return err - } - _, err = stapleOCSP(cfg.OCSP, cfg.Storage, &cert, nil) - if err != nil && cfg.Logger != nil { - cfg.Logger.Warn("stapling OCSP", zap.Error(err)) - } - cfg.emit("cached_unmanaged_cert", cert.Names) - cert.Tags = tags - cfg.certCache.cacheCertificate(cert) - return nil -} - -// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes -// of the certificate and key, then caches it in memory. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheUnmanagedCertificatePEMBytes(certBytes, keyBytes []byte, tags []string) error { - cert, err := cfg.makeCertificateWithOCSP(certBytes, keyBytes) - if err != nil { - return err - } - cert.Tags = tags - cfg.certCache.cacheCertificate(cert) - cfg.emit("cached_unmanaged_cert", cert.Names) - return nil -} - -// makeCertificateFromDiskWithOCSP makes a Certificate by loading the -// certificate and key files. It fills out all the fields in -// the certificate except for the Managed and OnDemand flags. -// (It is up to the caller to set those.) It staples OCSP. -func (cfg Config) makeCertificateFromDiskWithOCSP(storage Storage, certFile, keyFile string) (Certificate, error) { - certPEMBlock, err := ioutil.ReadFile(certFile) - if err != nil { - return Certificate{}, err - } - keyPEMBlock, err := ioutil.ReadFile(keyFile) - if err != nil { - return Certificate{}, err - } - return cfg.makeCertificateWithOCSP(certPEMBlock, keyPEMBlock) -} - -// makeCertificateWithOCSP is the same as makeCertificate except that it also -// staples OCSP to the certificate. -func (cfg Config) makeCertificateWithOCSP(certPEMBlock, keyPEMBlock []byte) (Certificate, error) { - cert, err := makeCertificate(certPEMBlock, keyPEMBlock) - if err != nil { - return cert, err - } - _, err = stapleOCSP(cfg.OCSP, cfg.Storage, &cert, certPEMBlock) - if err != nil && cfg.Logger != nil { - cfg.Logger.Warn("stapling OCSP", zap.Error(err)) - } - return cert, nil -} - -// makeCertificate turns a certificate PEM bundle and a key PEM block into -// a Certificate with necessary metadata from parsing its bytes filled into -// its struct fields for convenience (except for the OnDemand and Managed -// flags; it is up to the caller to set those properties!). This function -// does NOT staple OCSP. -func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) { - var cert Certificate - - // Convert to a tls.Certificate - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return cert, err - } - - // Extract necessary metadata - err = fillCertFromLeaf(&cert, tlsCert) - if err != nil { - return cert, err - } - - return cert, nil -} - -// fillCertFromLeaf populates cert from tlsCert. If it succeeds, it -// guarantees that cert.Leaf is non-nil. -func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error { - if len(tlsCert.Certificate) == 0 { - return fmt.Errorf("certificate is empty") - } - cert.Certificate = tlsCert - - // the leaf cert should be the one for the site; we must set - // the tls.Certificate.Leaf field so that TLS handshakes are - // more efficient - leaf, err := x509.ParseCertificate(tlsCert.Certificate[0]) - if err != nil { - return err - } - cert.Certificate.Leaf = leaf - - // for convenience, we do want to assemble all the - // subjects on the certificate into one list - if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated - cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)} - } - for _, name := range leaf.DNSNames { - if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated - cert.Names = append(cert.Names, strings.ToLower(name)) - } - } - for _, ip := range leaf.IPAddresses { - if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated - cert.Names = append(cert.Names, strings.ToLower(ipStr)) - } - } - for _, email := range leaf.EmailAddresses { - if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated - cert.Names = append(cert.Names, strings.ToLower(email)) - } - } - for _, u := range leaf.URIs { - if u.String() != leaf.Subject.CommonName { // TODO: CommonName is deprecated - cert.Names = append(cert.Names, u.String()) - } - } - if len(cert.Names) == 0 { - return fmt.Errorf("certificate has no names") - } - - cert.hash = hashCertificateChain(cert.Certificate.Certificate) - - return nil -} - -// managedCertInStorageExpiresSoon returns true if cert (being a -// managed certificate) is expiring within RenewDurationBefore. -// It returns false if there was an error checking the expiration -// of the certificate as found in storage, or if the certificate -// in storage is NOT expiring soon. A certificate that is expiring -// soon in our cache but is not expiring soon in storage probably -// means that another instance renewed the certificate in the -// meantime, and it would be a good idea to simply load the cert -// into our cache rather than repeating the renewal process again. -func (cfg *Config) managedCertInStorageExpiresSoon(cert Certificate) (bool, error) { - certRes, err := cfg.loadCertResourceAnyIssuer(cert.Names[0]) - if err != nil { - return false, err - } - _, needsRenew := cfg.managedCertNeedsRenewal(certRes) - return needsRenew, nil -} - -// reloadManagedCertificate reloads the certificate corresponding to the name(s) -// on oldCert into the cache, from storage. This also replaces the old certificate -// with the new one, so that all configurations that used the old cert now point -// to the new cert. It assumes that the new certificate for oldCert.Names[0] is -// already in storage. -func (cfg *Config) reloadManagedCertificate(oldCert Certificate) error { - if cfg.Logger != nil { - cfg.Logger.Info("reloading managed certificate", zap.Strings("identifiers", oldCert.Names)) - } - newCert, err := cfg.loadManagedCertificate(oldCert.Names[0]) - if err != nil { - return fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err) - } - cfg.certCache.replaceCertificate(oldCert, newCert) - return nil -} - -// SubjectQualifiesForCert returns true if subj is a name which, -// as a quick sanity check, looks like it could be the subject -// of a certificate. Requirements are: -// - must not be empty -// - must not start or end with a dot (RFC 1034) -// - must not contain common accidental special characters -func SubjectQualifiesForCert(subj string) bool { - // must not be empty - return strings.TrimSpace(subj) != "" && - - // must not start or end with a dot - !strings.HasPrefix(subj, ".") && - !strings.HasSuffix(subj, ".") && - - // if it has a wildcard, must be a left-most label (or exactly "*" - // which won't be trusted by browsers but still technically works) - (!strings.Contains(subj, "*") || strings.HasPrefix(subj, "*.") || subj == "*") && - - // must not contain other common special characters - !strings.ContainsAny(subj, "()[]{}<> \t\n\"\\!@#$%^&|;'+=") -} - -// SubjectQualifiesForPublicCert returns true if the subject -// name appears eligible for automagic TLS with a public -// CA such as Let's Encrypt. For example: localhost and IP -// addresses are not eligible because we cannot obtain certs -// for those names with a public CA. Wildcard names are -// allowed, as long as they conform to CABF requirements (only -// one wildcard label, and it must be the left-most label). -func SubjectQualifiesForPublicCert(subj string) bool { - // must at least qualify for a certificate - return SubjectQualifiesForCert(subj) && - - // localhost, .localhost TLD, and .local TLD are ineligible - !SubjectIsInternal(subj) && - - // cannot be an IP address (as of yet), see - // https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt - !SubjectIsIP(subj) && - - // only one wildcard label allowed, and it must be left-most, with 3+ labels - (!strings.Contains(subj, "*") || - (strings.Count(subj, "*") == 1 && - strings.Count(subj, ".") > 1 && - len(subj) > 2 && - strings.HasPrefix(subj, "*."))) -} - -// SubjectIsIP returns true if subj is an IP address. -func SubjectIsIP(subj string) bool { - return net.ParseIP(subj) != nil -} - -// SubjectIsInternal returns true if subj is an internal-facing -// hostname or address. -func SubjectIsInternal(subj string) bool { - return subj == "localhost" || - strings.HasSuffix(subj, ".localhost") || - strings.HasSuffix(subj, ".local") -} - -// MatchWildcard returns true if subject (a candidate DNS name) -// matches wildcard (a reference DNS name), mostly according to -// RFC 6125-compliant wildcard rules. See also RFC 2818 which -// states that IP addresses must match exactly, but this function -// does not attempt to distinguish IP addresses from internal or -// external DNS names that happen to look like IP addresses. -// It uses DNS wildcard matching logic. -// https://tools.ietf.org/html/rfc2818#section-3.1 -func MatchWildcard(subject, wildcard string) bool { - if subject == wildcard { - return true - } - if !strings.Contains(wildcard, "*") { - return false - } - labels := strings.Split(subject, ".") - for i := range labels { - if labels[i] == "" { - continue // invalid label - } - labels[i] = "*" - candidate := strings.Join(labels, ".") - if candidate == wildcard { - return true - } - } - return false -} diff --git a/vendor/github.com/caddyserver/certmagic/certmagic.go b/vendor/github.com/caddyserver/certmagic/certmagic.go deleted file mode 100644 index bb33b90c..00000000 --- a/vendor/github.com/caddyserver/certmagic/certmagic.go +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package certmagic automates the obtaining and renewal of TLS certificates, -// including TLS & HTTPS best practices such as robust OCSP stapling, caching, -// HTTP->HTTPS redirects, and more. -// -// Its high-level API serves your HTTP handlers over HTTPS if you simply give -// the domain name(s) and the http.Handler; CertMagic will create and run -// the HTTPS server for you, fully managing certificates during the lifetime -// of the server. Similarly, it can be used to start TLS listeners or return -// a ready-to-use tls.Config -- whatever layer you need TLS for, CertMagic -// makes it easy. See the HTTPS, Listen, and TLS functions for that. -// -// If you need more control, create a Cache using NewCache() and then make -// a Config using New(). You can then call Manage() on the config. But if -// you use this lower-level API, you'll have to be sure to solve the HTTP -// and TLS-ALPN challenges yourself (unless you disabled them or use the -// DNS challenge) by using the provided Config.GetCertificate function -// in your tls.Config and/or Config.HTTPChallangeHandler in your HTTP -// handler. -// -// See the package's README for more instruction. -package certmagic - -import ( - "context" - "crypto" - "crypto/tls" - "crypto/x509" - "fmt" - "log" - "net" - "net/http" - "sort" - "strings" - "sync" - "time" -) - -// HTTPS serves mux for all domainNames using the HTTP -// and HTTPS ports, redirecting all HTTP requests to HTTPS. -// It uses the Default config. -// -// This high-level convenience function is opinionated and -// applies sane defaults for production use, including -// timeouts for HTTP requests and responses. To allow very -// long-lived connections, you should make your own -// http.Server values and use this package's Listen(), TLS(), -// or Config.TLSConfig() functions to customize to your needs. -// For example, servers which need to support large uploads or -// downloads with slow clients may need to use longer timeouts, -// thus this function is not suitable. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func HTTPS(domainNames []string, mux http.Handler) error { - if mux == nil { - mux = http.DefaultServeMux - } - - DefaultACME.Agreed = true - cfg := NewDefault() - - err := cfg.ManageSync(domainNames) - if err != nil { - return err - } - - httpWg.Add(1) - defer httpWg.Done() - - // if we haven't made listeners yet, do so now, - // and clean them up when all servers are done - lnMu.Lock() - if httpLn == nil && httpsLn == nil { - httpLn, err = net.Listen("tcp", fmt.Sprintf(":%d", HTTPPort)) - if err != nil { - lnMu.Unlock() - return err - } - - tlsConfig := cfg.TLSConfig() - tlsConfig.NextProtos = append([]string{"h2", "http/1.1"}, tlsConfig.NextProtos...) - - httpsLn, err = tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), tlsConfig) - if err != nil { - httpLn.Close() - httpLn = nil - lnMu.Unlock() - return err - } - - go func() { - httpWg.Wait() - lnMu.Lock() - httpLn.Close() - httpsLn.Close() - lnMu.Unlock() - }() - } - hln, hsln := httpLn, httpsLn - lnMu.Unlock() - - // create HTTP/S servers that are configured - // with sane default timeouts and appropriate - // handlers (the HTTP server solves the HTTP - // challenge and issues redirects to HTTPS, - // while the HTTPS server simply serves the - // user's handler) - httpServer := &http.Server{ - ReadHeaderTimeout: 5 * time.Second, - ReadTimeout: 5 * time.Second, - WriteTimeout: 5 * time.Second, - IdleTimeout: 5 * time.Second, - } - if len(cfg.Issuers) > 0 { - if am, ok := cfg.Issuers[0].(*ACMEManager); ok { - httpServer.Handler = am.HTTPChallengeHandler(http.HandlerFunc(httpRedirectHandler)) - } - } - httpsServer := &http.Server{ - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - WriteTimeout: 2 * time.Minute, - IdleTimeout: 5 * time.Minute, - Handler: mux, - } - - log.Printf("%v Serving HTTP->HTTPS on %s and %s", - domainNames, hln.Addr(), hsln.Addr()) - - go httpServer.Serve(hln) - return httpsServer.Serve(hsln) -} - -func httpRedirectHandler(w http.ResponseWriter, r *http.Request) { - toURL := "https://" - - // since we redirect to the standard HTTPS port, we - // do not need to include it in the redirect URL - requestHost := hostOnly(r.Host) - - toURL += requestHost - toURL += r.URL.RequestURI() - - // get rid of this disgusting unencrypted HTTP connection 🤢 - w.Header().Set("Connection", "close") - - http.Redirect(w, r, toURL, http.StatusMovedPermanently) -} - -// TLS enables management of certificates for domainNames -// and returns a valid tls.Config. It uses the Default -// config. -// -// Because this is a convenience function that returns -// only a tls.Config, it does not assume HTTP is being -// served on the HTTP port, so the HTTP challenge is -// disabled (no HTTPChallengeHandler is necessary). The -// package variable Default is modified so that the -// HTTP challenge is disabled. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func TLS(domainNames []string) (*tls.Config, error) { - DefaultACME.Agreed = true - DefaultACME.DisableHTTPChallenge = true - cfg := NewDefault() - return cfg.TLSConfig(), cfg.ManageSync(domainNames) -} - -// Listen manages certificates for domainName and returns a -// TLS listener. It uses the Default config. -// -// Because this convenience function returns only a TLS-enabled -// listener and does not presume HTTP is also being served, -// the HTTP challenge will be disabled. The package variable -// Default is modified so that the HTTP challenge is disabled. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func Listen(domainNames []string) (net.Listener, error) { - DefaultACME.Agreed = true - DefaultACME.DisableHTTPChallenge = true - cfg := NewDefault() - err := cfg.ManageSync(domainNames) - if err != nil { - return nil, err - } - return tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), cfg.TLSConfig()) -} - -// ManageSync obtains certificates for domainNames and keeps them -// renewed using the Default config. -// -// This is a slightly lower-level function; you will need to -// wire up support for the ACME challenges yourself. You can -// obtain a Config to help you do that by calling NewDefault(). -// -// You will need to ensure that you use a TLS config that gets -// certificates from this Config and that the HTTP and TLS-ALPN -// challenges can be solved. The easiest way to do this is to -// use NewDefault().TLSConfig() as your TLS config and to wrap -// your HTTP handler with NewDefault().HTTPChallengeHandler(). -// If you don't have an HTTP server, you will need to disable -// the HTTP challenge. -// -// If you already have a TLS config you want to use, you can -// simply set its GetCertificate field to -// NewDefault().GetCertificate. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func ManageSync(domainNames []string) error { - DefaultACME.Agreed = true - return NewDefault().ManageSync(domainNames) -} - -// ManageAsync is the same as ManageSync, except that -// certificates are managed asynchronously. This means -// that the function will return before certificates -// are ready, and errors that occur during certificate -// obtain or renew operations are only logged. It is -// vital that you monitor the logs if using this method, -// which is only recommended for automated/non-interactive -// environments. -func ManageAsync(ctx context.Context, domainNames []string) error { - DefaultACME.Agreed = true - return NewDefault().ManageAsync(ctx, domainNames) -} - -// OnDemandConfig configures on-demand TLS (certificate -// operations as-needed, like during TLS handshakes, -// rather than immediately). -// -// When this package's high-level convenience functions -// are used (HTTPS, Manage, etc., where the Default -// config is used as a template), this struct regulates -// certificate operations using an implicit whitelist -// containing the names passed into those functions if -// no DecisionFunc is set. This ensures some degree of -// control by default to avoid certificate operations for -// aribtrary domain names. To override this whitelist, -// manually specify a DecisionFunc. To impose rate limits, -// specify your own DecisionFunc. -type OnDemandConfig struct { - // If set, this function will be called to determine - // whether a certificate can be obtained or renewed - // for the given name. If an error is returned, the - // request will be denied. - DecisionFunc func(name string) error - - // List of whitelisted hostnames (SNI values) for - // deferred (on-demand) obtaining of certificates. - // Used only by higher-level functions in this - // package to persist the list of hostnames that - // the config is supposed to manage. This is done - // because it seems reasonable that if you say - // "Manage [domain names...]", then only those - // domain names should be able to have certs; - // we don't NEED this feature, but it makes sense - // for higher-level convenience functions to be - // able to retain their convenience (alternative - // is: the user manually creates a DecisionFunc - // that whitelists the same names it already - // passed into Manage) and without letting clients - // have their run of any domain names they want. - // Only enforced if len > 0. - hostWhitelist []string -} - -func (o *OnDemandConfig) whitelistContains(name string) bool { - for _, n := range o.hostWhitelist { - if strings.EqualFold(n, name) { - return true - } - } - return false -} - -// isLoopback returns true if the hostname of addr looks -// explicitly like a common local hostname. addr must only -// be a host or a host:port combination. -func isLoopback(addr string) bool { - host := hostOnly(addr) - return host == "localhost" || - strings.Trim(host, "[]") == "::1" || - strings.HasPrefix(host, "127.") -} - -// isInternal returns true if the IP of addr -// belongs to a private network IP range. addr -// must only be an IP or an IP:port combination. -// Loopback addresses are considered false. -func isInternal(addr string) bool { - privateNetworks := []string{ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - "fc00::/7", - } - host := hostOnly(addr) - ip := net.ParseIP(host) - if ip == nil { - return false - } - for _, privateNetwork := range privateNetworks { - _, ipnet, _ := net.ParseCIDR(privateNetwork) - if ipnet.Contains(ip) { - return true - } - } - return false -} - -// hostOnly returns only the host portion of hostport. -// If there is no port or if there is an error splitting -// the port off, the whole input string is returned. -func hostOnly(hostport string) string { - host, _, err := net.SplitHostPort(hostport) - if err != nil { - return hostport // OK; probably had no port to begin with - } - return host -} - -// PreChecker is an interface that can be optionally implemented by -// Issuers. Pre-checks are performed before each call (or batch of -// identical calls) to Issue(), giving the issuer the option to ensure -// it has all the necessary information/state. -type PreChecker interface { - PreCheck(ctx context.Context, names []string, interactive bool) error -} - -// Issuer is a type that can issue certificates. -type Issuer interface { - // Issue obtains a certificate for the given CSR. It - // must honor context cancellation if it is long-running. - // It can also use the context to find out if the current - // call is part of a retry, via AttemptsCtxKey. - Issue(ctx context.Context, request *x509.CertificateRequest) (*IssuedCertificate, error) - - // IssuerKey must return a string that uniquely identifies - // this particular configuration of the Issuer such that - // any certificates obtained by this Issuer will be treated - // as identical if they have the same SANs. - // - // Certificates obtained from Issuers with the same IssuerKey - // will overwrite others with the same SANs. For example, an - // Issuer might be able to obtain certificates from different - // CAs, say A and B. It is likely that the CAs have different - // use cases and purposes (e.g. testing and production), so - // their respective certificates should not overwrite eaach - // other. - IssuerKey() string -} - -// Revoker can revoke certificates. Reason codes are defined -// by RFC 5280 §5.3.1: https://tools.ietf.org/html/rfc5280#section-5.3.1 -// and are available as constants in our ACME library. -type Revoker interface { - Revoke(ctx context.Context, cert CertificateResource, reason int) error -} - -// KeyGenerator can generate a private key. -type KeyGenerator interface { - // GenerateKey generates a private key. The returned - // PrivateKey must be able to expose its associated - // public key. - GenerateKey() (crypto.PrivateKey, error) -} - -// IssuedCertificate represents a certificate that was just issued. -type IssuedCertificate struct { - // The PEM-encoding of DER-encoded ASN.1 data. - Certificate []byte - - // Any extra information to serialize alongside the - // certificate in storage. - Metadata interface{} -} - -// CertificateResource associates a certificate with its private -// key and other useful information, for use in maintaining the -// certificate. -type CertificateResource struct { - // The list of names on the certificate; - // for convenience only. - SANs []string `json:"sans,omitempty"` - - // The PEM-encoding of DER-encoded ASN.1 data - // for the cert or chain. - CertificatePEM []byte `json:"-"` - - // The PEM-encoding of the certificate's private key. - PrivateKeyPEM []byte `json:"-"` - - // Any extra information associated with the certificate, - // usually provided by the issuer implementation. - IssuerData interface{} `json:"issuer_data,omitempty"` - - // The unique string identifying the issuer of the - // certificate; internally useful for storage access. - issuerKey string `json:"-"` -} - -// NamesKey returns the list of SANs as a single string, -// truncated to some ridiculously long size limit. It -// can act as a key for the set of names on the resource. -func (cr *CertificateResource) NamesKey() string { - sort.Strings(cr.SANs) - result := strings.Join(cr.SANs, ",") - if len(result) > 1024 { - const trunc = "_trunc" - result = result[:1024-len(trunc)] + trunc - } - return result -} - -// Default contains the package defaults for the -// various Config fields. This is used as a template -// when creating your own Configs with New() or -// NewDefault(), and it is also used as the Config -// by all the high-level functions in this package -// that abstract away most configuration (HTTPS(), -// TLS(), Listen(), etc). -// -// The fields of this value will be used for Config -// fields which are unset. Feel free to modify these -// defaults, but do not use this Config by itself: it -// is only a template. Valid configurations can be -// obtained by calling New() (if you have your own -// certificate cache) or NewDefault() (if you only -// need a single config and want to use the default -// cache). -// -// Even if the Issuers or Storage fields are not set, -// defaults will be applied in the call to New(). -var Default = Config{ - RenewalWindowRatio: DefaultRenewalWindowRatio, - Storage: defaultFileStorage, - KeySource: DefaultKeyGenerator, -} - -const ( - // HTTPChallengePort is the officially-designated port for - // the HTTP challenge according to the ACME spec. - HTTPChallengePort = 80 - - // TLSALPNChallengePort is the officially-designated port for - // the TLS-ALPN challenge according to the ACME spec. - TLSALPNChallengePort = 443 -) - -// Port variables must remain their defaults unless you -// forward packets from the defaults to whatever these -// are set to; otherwise ACME challenges will fail. -var ( - // HTTPPort is the port on which to serve HTTP - // and, as such, the HTTP challenge (unless - // Default.AltHTTPPort is set). - HTTPPort = 80 - - // HTTPSPort is the port on which to serve HTTPS - // and, as such, the TLS-ALPN challenge - // (unless Default.AltTLSALPNPort is set). - HTTPSPort = 443 -) - -// Variables for conveniently serving HTTPS. -var ( - httpLn, httpsLn net.Listener - lnMu sync.Mutex - httpWg sync.WaitGroup -) - -// Maximum size for the stack trace when recovering from panics. -const stackTraceBufferSize = 1024 * 128 diff --git a/vendor/github.com/caddyserver/certmagic/config.go b/vendor/github.com/caddyserver/certmagic/config.go deleted file mode 100644 index d408418f..00000000 --- a/vendor/github.com/caddyserver/certmagic/config.go +++ /dev/null @@ -1,1086 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "bytes" - "context" - "crypto" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/json" - "errors" - "fmt" - weakrand "math/rand" - "net" - "net/url" - "strings" - "time" - - "github.com/mholt/acmez" - "github.com/mholt/acmez/acme" - "go.uber.org/zap" - "golang.org/x/net/idna" -) - -// Config configures a certificate manager instance. -// An empty Config is not valid: use New() to obtain -// a valid Config. -type Config struct { - // How much of a certificate's lifetime becomes the - // renewal window, which is the span of time at the - // end of the certificate's validity period in which - // it should be renewed; for most certificates, the - // global default is good, but for extremely short- - // lived certs, you may want to raise this to ~0.5. - RenewalWindowRatio float64 - - // An optional event callback clients can set - // to subscribe to certain things happening - // internally by this config; invocations are - // synchronous, so make them return quickly! - OnEvent func(event string, data interface{}) - - // DefaultServerName specifies a server name - // to use when choosing a certificate if the - // ClientHello's ServerName field is empty. - DefaultServerName string - - // The state needed to operate on-demand TLS; - // if non-nil, on-demand TLS is enabled and - // certificate operations are deferred to - // TLS handshakes (or as-needed). - // TODO: Can we call this feature "Reactive/Lazy/Passive TLS" instead? - OnDemand *OnDemandConfig - - // Adds the must staple TLS extension to the CSR. - MustStaple bool - - // The source for getting new certificates; the - // default Issuer is ACMEManager. If multiple - // issuers are specified, they will be tried in - // turn until one succeeds. - Issuers []Issuer - - // The source of new private keys for certificates; - // the default KeySource is StandardKeyGenerator. - KeySource KeyGenerator - - // CertSelection chooses one of the certificates - // with which the ClientHello will be completed; - // if not set, DefaultCertificateSelector will - // be used. - CertSelection CertificateSelector - - // OCSP configures how OCSP is handled. By default, - // OCSP responses are fetched for every certificate - // with a responder URL, and cached on disk. Changing - // these defaults is STRONGLY discouraged unless you - // have a compelling reason to put clients at greater - // risk and reduce their privacy. - OCSP OCSPConfig - - // The storage to access when storing or loading - // TLS assets. Default is the local file system. - Storage Storage - - // Set a logger to enable logging. - Logger *zap.Logger - - // required pointer to the in-memory cert cache - certCache *Cache -} - -// NewDefault makes a valid config based on the package -// Default config. Most users will call this function -// instead of New() since most use cases require only a -// single config for any and all certificates. -// -// If your requirements are more advanced (for example, -// multiple configs depending on the certificate), then use -// New() instead. (You will need to make your own Cache -// first.) If you only need a single Config to manage your -// certs (even if that config changes, as long as it is the -// only one), customize the Default package variable before -// calling NewDefault(). -// -// All calls to NewDefault() will return configs that use the -// same, default certificate cache. All configs returned -// by NewDefault() are based on the values of the fields of -// Default at the time it is called. -// -// This is the only way to get a config that uses the -// default certificate cache. -func NewDefault() *Config { - defaultCacheMu.Lock() - if defaultCache == nil { - defaultCache = NewCache(CacheOptions{ - // the cache will likely need to renew certificates, - // so it will need to know how to do that, which - // depends on the certificate being managed and which - // can change during the lifetime of the cache; this - // callback makes it possible to get the latest and - // correct config with which to manage the cert, - // but if the user does not provide one, we can only - // assume that we are to use the default config - GetConfigForCert: func(Certificate) (*Config, error) { - return NewDefault(), nil - }, - }) - } - certCache := defaultCache - defaultCacheMu.Unlock() - - return newWithCache(certCache, Default) -} - -// New makes a new, valid config based on cfg and -// uses the provided certificate cache. certCache -// MUST NOT be nil or this function will panic. -// -// Use this method when you have an advanced use case -// that requires a custom certificate cache and config -// that may differ from the Default. For example, if -// not all certificates are managed/renewed the same -// way, you need to make your own Cache value with a -// GetConfigForCert callback that returns the correct -// configuration for each certificate. However, for -// the vast majority of cases, there will be only a -// single Config, thus the default cache (which always -// uses the default Config) and default config will -// suffice, and you should use NewDefault() instead. -func New(certCache *Cache, cfg Config) *Config { - if certCache == nil { - panic("a certificate cache is required") - } - if certCache.options.GetConfigForCert == nil { - panic("cache must have GetConfigForCert set in its options") - } - return newWithCache(certCache, cfg) -} - -// newWithCache ensures that cfg is a valid config by populating -// zero-value fields from the Default Config. If certCache is -// nil, this function panics. -func newWithCache(certCache *Cache, cfg Config) *Config { - if certCache == nil { - panic("cannot make a valid config without a pointer to a certificate cache") - } - - if cfg.OnDemand == nil { - cfg.OnDemand = Default.OnDemand - } - if cfg.RenewalWindowRatio == 0 { - cfg.RenewalWindowRatio = Default.RenewalWindowRatio - } - if cfg.OnEvent == nil { - cfg.OnEvent = Default.OnEvent - } - if cfg.KeySource == nil { - cfg.KeySource = Default.KeySource - } - if cfg.DefaultServerName == "" { - cfg.DefaultServerName = Default.DefaultServerName - } - if cfg.OnDemand == nil { - cfg.OnDemand = Default.OnDemand - } - if !cfg.MustStaple { - cfg.MustStaple = Default.MustStaple - } - if cfg.Storage == nil { - cfg.Storage = Default.Storage - } - if len(cfg.Issuers) == 0 { - cfg.Issuers = Default.Issuers - if len(cfg.Issuers) == 0 { - // at least one issuer is absolutely required - cfg.Issuers = []Issuer{NewACMEManager(&cfg, DefaultACME)} - } - } - - // absolutely don't allow a nil storage, - // because that would make almost anything - // a config can do pointless - if cfg.Storage == nil { - cfg.Storage = defaultFileStorage - } - - cfg.certCache = certCache - - return &cfg -} - -// ManageSync causes the certificates for domainNames to be managed -// according to cfg. If cfg.OnDemand is not nil, then this simply -// whitelists the domain names and defers the certificate operations -// to when they are needed. Otherwise, the certificates for each -// name are loaded from storage or obtained from the CA. If loaded -// from storage, they are renewed if they are expiring or expired. -// It then caches the certificate in memory and is prepared to serve -// them up during TLS handshakes. -// -// Note that name whitelisting for on-demand management only takes -// effect if cfg.OnDemand.DecisionFunc is not set (is nil); it will -// not overwrite an existing DecisionFunc, nor will it overwrite -// its decision; i.e. the implicit whitelist is only used if no -// DecisionFunc is set. -// -// This method is synchronous, meaning that certificates for all -// domainNames must be successfully obtained (or renewed) before -// it returns. It returns immediately on the first error for any -// of the given domainNames. This behavior is recommended for -// interactive use (i.e. when an administrator is present) so -// that errors can be reported and fixed immediately. -func (cfg *Config) ManageSync(domainNames []string) error { - return cfg.manageAll(context.Background(), domainNames, false) -} - -// ClientCredentials returns a list of TLS client certificate chains for the given identifiers. -// The return value can be used in a tls.Config to enable client authentication using managed certificates. -// Any certificates that need to be obtained or renewed for these identifiers will be managed accordingly. -func (cfg *Config) ClientCredentials(ctx context.Context, identifiers []string) ([]tls.Certificate, error) { - err := cfg.manageAll(ctx, identifiers, false) - if err != nil { - return nil, err - } - var chains []tls.Certificate - for _, id := range identifiers { - certRes, err := cfg.loadCertResourceAnyIssuer(id) - if err != nil { - return chains, err - } - chain, err := tls.X509KeyPair(certRes.CertificatePEM, certRes.PrivateKeyPEM) - if err != nil { - return chains, err - } - chains = append(chains, chain) - } - return chains, nil -} - -// ManageAsync is the same as ManageSync, except that ACME -// operations are performed asynchronously (in the background). -// This method returns before certificates are ready. It is -// crucial that the administrator monitors the logs and is -// notified of any errors so that corrective action can be -// taken as soon as possible. Any errors returned from this -// method occurred before ACME transactions started. -// -// As long as logs are monitored, this method is typically -// recommended for non-interactive environments. -// -// If there are failures loading, obtaining, or renewing a -// certificate, it will be retried with exponential backoff -// for up to about 30 days, with a maximum interval of about -// 24 hours. Cancelling ctx will cancel retries and shut down -// any goroutines spawned by ManageAsync. -func (cfg *Config) ManageAsync(ctx context.Context, domainNames []string) error { - return cfg.manageAll(ctx, domainNames, true) -} - -func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bool) error { - if ctx == nil { - ctx = context.Background() - } - - for _, domainName := range domainNames { - // if on-demand is configured, defer obtain and renew operations - if cfg.OnDemand != nil { - if !cfg.OnDemand.whitelistContains(domainName) { - cfg.OnDemand.hostWhitelist = append(cfg.OnDemand.hostWhitelist, domainName) - } - continue - } - - // otherwise, begin management immediately - err := cfg.manageOne(ctx, domainName, async) - if err != nil { - return err - } - } - - return nil -} - -func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool) error { - // first try loading existing certificate from storage - cert, err := cfg.CacheManagedCertificate(domainName) - if err != nil { - if _, ok := err.(ErrNotExist); !ok { - return fmt.Errorf("%s: caching certificate: %v", domainName, err) - } - // if we don't have one in storage, obtain one - obtain := func() error { - var err error - if async { - err = cfg.ObtainCertAsync(ctx, domainName) - } else { - err = cfg.ObtainCertSync(ctx, domainName) - } - if err != nil { - return fmt.Errorf("%s: obtaining certificate: %w", domainName, err) - } - cert, err = cfg.CacheManagedCertificate(domainName) - if err != nil { - return fmt.Errorf("%s: caching certificate after obtaining it: %v", domainName, err) - } - return nil - } - if async { - // Leave the job name empty so as to allow duplicate 'obtain' - // jobs; this is because Caddy calls ManageAsync() before the - // previous config is stopped (and before its context is - // canceled), which means that if an obtain job is still - // running for the same domain, Submit() would not queue the - // new one because it is still running, even though it is - // (probably) about to be canceled (it might not if the new - // config fails to finish loading, however). In any case, we - // presume it is safe to enqueue a duplicate obtain job because - // either the old one (or sometimes the new one) is about to be - // canceled. This seems like reasonable logic for any consumer - // of this lib. See https://github.com/caddyserver/caddy/issues/3202 - jm.Submit(cfg.Logger, "", obtain) - return nil - } - return obtain() - } - - // for an existing certificate, make sure it is renewed - renew := func() error { - var err error - if async { - err = cfg.RenewCertAsync(ctx, domainName, false) - } else { - err = cfg.RenewCertSync(ctx, domainName, false) - } - if err != nil { - return fmt.Errorf("%s: renewing certificate: %w", domainName, err) - } - // successful renewal, so update in-memory cache - err = cfg.reloadManagedCertificate(cert) - if err != nil { - return fmt.Errorf("%s: reloading renewed certificate into memory: %v", domainName, err) - } - return nil - } - if cert.NeedsRenewal(cfg) { - if async { - jm.Submit(cfg.Logger, "renew_"+domainName, renew) - return nil - } - return renew() - } - - return nil -} - -// Unmanage causes the certificates for domainNames to stop being managed. -// If there are certificates for the supplied domain names in the cache, they -// are evicted from the cache. -func (cfg *Config) Unmanage(domainNames []string) { - var deleteQueue []Certificate - for _, domainName := range domainNames { - certs := cfg.certCache.AllMatchingCertificates(domainName) - for _, cert := range certs { - if !cert.managed { - continue - } - deleteQueue = append(deleteQueue, cert) - } - } - - cfg.certCache.mu.Lock() - for _, cert := range deleteQueue { - cfg.certCache.removeCertificate(cert) - } - cfg.certCache.mu.Unlock() -} - -// ObtainCertSync generates a new private key and obtains a certificate for -// name using cfg in the foreground; i.e. interactively and without retries. -// It stows the renewed certificate and its assets in storage if successful. -// It DOES NOT load the certificate into the in-memory cache. This method -// is a no-op if storage already has a certificate for name. -func (cfg *Config) ObtainCertSync(ctx context.Context, name string) error { - return cfg.obtainCert(ctx, name, true) -} - -// ObtainCertAsync is the same as ObtainCertSync(), except it runs in the -// background; i.e. non-interactively, and with retries if it fails. -func (cfg *Config) ObtainCertAsync(ctx context.Context, name string) error { - return cfg.obtainCert(ctx, name, false) -} - -func (cfg *Config) obtainCert(ctx context.Context, name string, interactive bool) error { - if len(cfg.Issuers) == 0 { - return fmt.Errorf("no issuers configured; impossible to obtain or check for existing certificate in storage") - } - - // if storage has all resources for this certificate, obtain is a no-op - if cfg.storageHasCertResourcesAnyIssuer(name) { - return nil - } - - // ensure storage is writeable and readable - // TODO: this is not necessary every time; should only perform check once every so often for each storage, which may require some global state... - err := cfg.checkStorage() - if err != nil { - return fmt.Errorf("failed storage check: %v - storage is probably misconfigured", err) - } - - log := loggerNamed(cfg.Logger, "obtain") - - if log != nil { - log.Info("acquiring lock", zap.String("identifier", name)) - } - - // ensure idempotency of the obtain operation for this name - lockKey := cfg.lockKey(certIssueLockOp, name) - err = acquireLock(ctx, cfg.Storage, lockKey) - if err != nil { - return fmt.Errorf("unable to acquire lock '%s': %v", lockKey, err) - } - defer func() { - if log != nil { - log.Info("releasing lock", zap.String("identifier", name)) - } - if err := releaseLock(cfg.Storage, lockKey); err != nil { - if log != nil { - log.Error("unable to unlock", - zap.String("identifier", name), - zap.String("lock_key", lockKey), - zap.Error(err)) - } - } - }() - if log != nil { - log.Info("lock acquired", zap.String("identifier", name)) - } - - f := func(ctx context.Context) error { - // check if obtain is still needed -- might have been obtained during lock - if cfg.storageHasCertResourcesAnyIssuer(name) { - if log != nil { - log.Info("certificate already exists in storage", zap.String("identifier", name)) - } - return nil - } - - // if storage has a private key already, use it; otherwise, - // we'll generate our own - privKey, privKeyPEM, issuers, err := cfg.reusePrivateKey(name) - if err != nil { - return err - } - if privKey == nil { - privKey, err = cfg.KeySource.GenerateKey() - if err != nil { - return err - } - privKeyPEM, err = encodePrivateKey(privKey) - if err != nil { - return err - } - } - - csr, err := cfg.generateCSR(privKey, []string{name}) - if err != nil { - return err - } - - // try to obtain from each issuer until we succeed - var issuedCert *IssuedCertificate - var issuerUsed Issuer - for i, issuer := range issuers { - if log != nil { - log.Debug(fmt.Sprintf("trying issuer %d/%d", i+1, len(cfg.Issuers)), - zap.String("issuer", issuer.IssuerKey())) - } - - if prechecker, ok := issuer.(PreChecker); ok { - err = prechecker.PreCheck(ctx, []string{name}, interactive) - if err != nil { - continue - } - } - - issuedCert, err = issuer.Issue(ctx, csr) - if err == nil { - issuerUsed = issuer - break - } - - // err is usually wrapped, which is nice for simply printing it, but - // with our structured error logs we only need the problem string - errToLog := err - var problem acme.Problem - if errors.As(err, &problem) { - errToLog = problem - } - if log != nil { - log.Error("could not get certificate from issuer", - zap.String("identifier", name), - zap.String("issuer", issuer.IssuerKey()), - zap.Error(errToLog)) - } - } - if err != nil { - // only the error from the last issuer will be returned, but we logged the others - return fmt.Errorf("[%s] Obtain: %w", name, err) - } - - // success - immediately save the certificate resource - certRes := CertificateResource{ - SANs: namesFromCSR(csr), - CertificatePEM: issuedCert.Certificate, - PrivateKeyPEM: privKeyPEM, - IssuerData: issuedCert.Metadata, - } - err = cfg.saveCertResource(issuerUsed, certRes) - if err != nil { - return fmt.Errorf("[%s] Obtain: saving assets: %v", name, err) - } - - cfg.emit("cert_obtained", name) - - if log != nil { - log.Info("certificate obtained successfully", zap.String("identifier", name)) - } - - return nil - } - - if interactive { - err = f(ctx) - } else { - err = doWithRetry(ctx, log, f) - } - - return err -} - -// reusePrivateKey looks for a private key for domain in storage in the configured issuers -// paths. For the first private key it finds, it returns that key both decoded and PEM-encoded, -// as well as the reordered list of issuers to use instead of cfg.Issuers (because if a key -// is found, that issuer should be tried first, so it is moved to the front in a copy of -// cfg.Issuers). -func (cfg *Config) reusePrivateKey(domain string) (privKey crypto.PrivateKey, privKeyPEM []byte, issuers []Issuer, err error) { - // make a copy of cfg.Issuers so that if we have to reorder elements, we don't - // inadvertently mutate the configured issuers (see append calls below) - issuers = make([]Issuer, len(cfg.Issuers)) - copy(issuers, cfg.Issuers) - - for i, issuer := range issuers { - // see if this issuer location in storage has a private key for the domain - privateKeyStorageKey := StorageKeys.SitePrivateKey(issuer.IssuerKey(), domain) - privKeyPEM, err = cfg.Storage.Load(privateKeyStorageKey) - if _, ok := err.(ErrNotExist); ok { - err = nil // obviously, it's OK to not have a private key; so don't prevent obtaining a cert - continue - } - if err != nil { - return nil, nil, nil, fmt.Errorf("loading existing private key for reuse with issuer %s: %v", issuer.IssuerKey(), err) - } - - // we loaded a private key; try decoding it so we can use it - privKey, err = decodePrivateKey(privKeyPEM) - if err != nil { - return nil, nil, nil, err - } - - // since the private key was found in storage for this issuer, move it - // to the front of the list so we prefer this issuer first - issuers = append([]Issuer{issuer}, append(issuers[:i], issuers[i+1:]...)...) - break - } - - return -} - -// storageHasCertResourcesAnyIssuer returns true if storage has all the -// certificate resources in storage from any configured issuer. It checks -// all configured issuers in order. -func (cfg *Config) storageHasCertResourcesAnyIssuer(name string) bool { - for _, iss := range cfg.Issuers { - if cfg.storageHasCertResources(iss, name) { - return true - } - } - return false -} - -// RenewCertSync renews the certificate for name using cfg in the foreground; -// i.e. interactively and without retries. It stows the renewed certificate -// and its assets in storage if successful. It DOES NOT update the in-memory -// cache with the new certificate. The certificate will not be renewed if it -// is not close to expiring unless force is true. -// -// Renewing a certificate is the same as obtaining a certificate, except that -// the existing private key already in storage is reused. -func (cfg *Config) RenewCertSync(ctx context.Context, name string, force bool) error { - return cfg.renewCert(ctx, name, force, true) -} - -// RenewCertAsync is the same as RenewCertSync(), except it runs in the -// background; i.e. non-interactively, and with retries if it fails. -func (cfg *Config) RenewCertAsync(ctx context.Context, name string, force bool) error { - return cfg.renewCert(ctx, name, force, false) -} - -func (cfg *Config) renewCert(ctx context.Context, name string, force, interactive bool) error { - if len(cfg.Issuers) == 0 { - return fmt.Errorf("no issuers configured; impossible to renew or check existing certificate in storage") - } - - // ensure storage is writeable and readable - // TODO: this is not necessary every time; should only perform check once every so often for each storage, which may require some global state... - err := cfg.checkStorage() - if err != nil { - return fmt.Errorf("failed storage check: %v - storage is probably misconfigured", err) - } - - log := loggerNamed(cfg.Logger, "renew") - - if log != nil { - log.Info("acquiring lock", zap.String("identifier", name)) - } - - // ensure idempotency of the renew operation for this name - lockKey := cfg.lockKey(certIssueLockOp, name) - err = acquireLock(ctx, cfg.Storage, lockKey) - if err != nil { - return fmt.Errorf("unable to acquire lock '%s': %v", lockKey, err) - } - defer func() { - if log != nil { - log.Info("releasing lock", zap.String("identifier", name)) - } - if err := releaseLock(cfg.Storage, lockKey); err != nil { - if log != nil { - log.Error("unable to unlock", - zap.String("identifier", name), - zap.String("lock_key", lockKey), - zap.Error(err)) - } - } - }() - if log != nil { - log.Info("lock acquired", zap.String("identifier", name)) - } - - f := func(ctx context.Context) error { - // prepare for renewal (load PEM cert, key, and meta) - certRes, err := cfg.loadCertResourceAnyIssuer(name) - if err != nil { - return err - } - - // check if renew is still needed - might have been renewed while waiting for lock - timeLeft, needsRenew := cfg.managedCertNeedsRenewal(certRes) - if !needsRenew { - if force { - if log != nil { - log.Info("certificate does not need to be renewed, but renewal is being forced", - zap.String("identifier", name), - zap.Duration("remaining", timeLeft)) - } - } else { - if log != nil { - log.Info("certificate appears to have been renewed already", - zap.String("identifier", name), - zap.Duration("remaining", timeLeft)) - } - return nil - } - } - - if log != nil { - log.Info("renewing certificate", - zap.String("identifier", name), - zap.Duration("remaining", timeLeft)) - } - - privateKey, err := decodePrivateKey(certRes.PrivateKeyPEM) - if err != nil { - return err - } - csr, err := cfg.generateCSR(privateKey, []string{name}) - if err != nil { - return err - } - - // try to obtain from each issuer until we succeed - var issuedCert *IssuedCertificate - var issuerUsed Issuer - for _, issuer := range cfg.Issuers { - if prechecker, ok := issuer.(PreChecker); ok { - err = prechecker.PreCheck(ctx, []string{name}, interactive) - if err != nil { - continue - } - } - - issuedCert, err = issuer.Issue(ctx, csr) - if err == nil { - issuerUsed = issuer - break - } - - // err is usually wrapped, which is nice for simply printing it, but - // with our structured error logs we only need the problem string - errToLog := err - var problem acme.Problem - if errors.As(err, &problem) { - errToLog = problem - } - if log != nil { - log.Error("could not get certificate from issuer", - zap.String("identifier", name), - zap.String("issuer", issuer.IssuerKey()), - zap.Error(errToLog)) - } - } - if err != nil { - // only the error from the last issuer will be returned, but we logged the others - return fmt.Errorf("[%s] Renew: %w", name, err) - } - - // success - immediately save the renewed certificate resource - newCertRes := CertificateResource{ - SANs: namesFromCSR(csr), - CertificatePEM: issuedCert.Certificate, - PrivateKeyPEM: certRes.PrivateKeyPEM, - IssuerData: issuedCert.Metadata, - } - err = cfg.saveCertResource(issuerUsed, newCertRes) - if err != nil { - return fmt.Errorf("[%s] Renew: saving assets: %v", name, err) - } - - cfg.emit("cert_renewed", name) - - if log != nil { - log.Info("certificate renewed successfully", zap.String("identifier", name)) - } - - return nil - } - - if interactive { - err = f(ctx) - } else { - err = doWithRetry(ctx, log, f) - } - - return err -} - -func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string) (*x509.CertificateRequest, error) { - csrTemplate := new(x509.CertificateRequest) - - for _, name := range sans { - if ip := net.ParseIP(name); ip != nil { - csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip) - } else if strings.Contains(name, "@") { - csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name) - } else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") { - csrTemplate.URIs = append(csrTemplate.URIs, u) - } else { - // convert IDNs to ASCII according to RFC 5280 section 7 - normalizedName, err := idna.ToASCII(name) - if err != nil { - return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err) - } - csrTemplate.DNSNames = append(csrTemplate.DNSNames, normalizedName) - } - } - - if cfg.MustStaple { - csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, mustStapleExtension) - } - - csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privateKey) - if err != nil { - return nil, err - } - - return x509.ParseCertificateRequest(csrDER) -} - -// RevokeCert revokes the certificate for domain via ACME protocol. It requires -// that cfg.Issuers is properly configured with the same issuer that issued the -// certificate being revoked. See RFC 5280 §5.3.1 for reason codes. -// -// The certificate assets are deleted from storage after successful revocation -// to prevent reuse. -func (cfg *Config) RevokeCert(ctx context.Context, domain string, reason int, interactive bool) error { - for i, issuer := range cfg.Issuers { - issuerKey := issuer.IssuerKey() - - rev, ok := issuer.(Revoker) - if !ok { - return fmt.Errorf("issuer %d (%s) is not a Revoker", i, issuerKey) - } - - certRes, err := cfg.loadCertResource(issuer, domain) - if err != nil { - return err - } - - if !cfg.Storage.Exists(StorageKeys.SitePrivateKey(issuerKey, domain)) { - return fmt.Errorf("private key not found for %s", certRes.SANs) - } - - err = rev.Revoke(ctx, certRes, reason) - if err != nil { - return fmt.Errorf("issuer %d (%s): %v", i, issuerKey, err) - } - - cfg.emit("cert_revoked", domain) - - err = cfg.deleteSiteAssets(issuerKey, domain) - if err != nil { - return fmt.Errorf("certificate revoked, but unable to fully clean up assets from issuer %s: %v", issuerKey, err) - } - } - - return nil -} - -// TLSConfig is an opinionated method that returns a -// recommended, modern TLS configuration that can be -// used to configure TLS listeners, which also supports -// the TLS-ALPN challenge and serves up certificates -// managed by cfg. -// -// Unlike the package TLS() function, this method does -// not, by itself, enable certificate management for -// any domain names. -// -// Feel free to further customize the returned tls.Config, -// but do not mess with the GetCertificate or NextProtos -// fields unless you know what you're doing, as they're -// necessary to solve the TLS-ALPN challenge. -func (cfg *Config) TLSConfig() *tls.Config { - return &tls.Config{ - // these two fields necessary for TLS-ALPN challenge - GetCertificate: cfg.GetCertificate, - NextProtos: []string{acmez.ACMETLS1Protocol}, - - // the rest recommended for modern TLS servers - MinVersion: tls.VersionTLS12, - CurvePreferences: []tls.CurveID{ - tls.X25519, - tls.CurveP256, - }, - CipherSuites: preferredDefaultCipherSuites(), - PreferServerCipherSuites: true, - } -} - -// getChallengeInfo loads the challenge info from either the internal challenge memory -// or the external storage (implying distributed solving). The second return value -// indicates whether challenge info was loaded from external storage. If true, the -// challenge is being solved in a distributed fashion; if false, from internal memory. -// If no matching challenge information can be found, an error is returned. -func (cfg *Config) getChallengeInfo(identifier string) (Challenge, bool, error) { - // first, check if our process initiated this challenge; if so, just return it - chalData, ok := GetACMEChallenge(identifier) - if ok { - return chalData, false, nil - } - - // otherwise, perhaps another instance in the cluster initiated it; check - // the configured storage to retrieve challenge data - - var chalInfo acme.Challenge - var chalInfoBytes []byte - var tokenKey string - for _, issuer := range cfg.Issuers { - ds := distributedSolver{ - storage: cfg.Storage, - storageKeyIssuerPrefix: storageKeyACMECAPrefix(issuer.IssuerKey()), - } - tokenKey = ds.challengeTokensKey(identifier) - var err error - chalInfoBytes, err = cfg.Storage.Load(tokenKey) - if err == nil { - break - } - if _, ok := err.(ErrNotExist); ok { - continue - } - return Challenge{}, false, fmt.Errorf("opening distributed challenge token file %s: %v", tokenKey, err) - } - if len(chalInfoBytes) == 0 { - return Challenge{}, false, fmt.Errorf("no information found to solve challenge for identifier: %s", identifier) - } - - err := json.Unmarshal(chalInfoBytes, &chalInfo) - if err != nil { - return Challenge{}, false, fmt.Errorf("decoding challenge token file %s (corrupted?): %v", tokenKey, err) - } - - return Challenge{Challenge: chalInfo}, true, nil -} - -// checkStorage tests the storage by writing random bytes -// to a random key, and then loading those bytes and -// comparing the loaded value. If this fails, the provided -// cfg.Storage mechanism should not be used. -func (cfg *Config) checkStorage() error { - key := fmt.Sprintf("rw_test_%d", weakrand.Int()) - contents := make([]byte, 1024*10) // size sufficient for one or two ACME resources - _, err := weakrand.Read(contents) - if err != nil { - return err - } - err = cfg.Storage.Store(key, contents) - if err != nil { - return err - } - defer func() { - deleteErr := cfg.Storage.Delete(key) - if deleteErr != nil { - if cfg.Logger != nil { - cfg.Logger.Error("deleting test key from storage", - zap.String("key", key), zap.Error(err)) - } - } - // if there was no other error, make sure - // to return any error returned from Delete - if err == nil { - err = deleteErr - } - }() - loaded, err := cfg.Storage.Load(key) - if err != nil { - return err - } - if !bytes.Equal(contents, loaded) { - return fmt.Errorf("load yielded different value than was stored; expected %d bytes, got %d bytes of differing elements", len(contents), len(loaded)) - } - return nil -} - -// storageHasCertResources returns true if the storage -// associated with cfg's certificate cache has all the -// resources related to the certificate for domain: the -// certificate, the private key, and the metadata. -func (cfg *Config) storageHasCertResources(issuer Issuer, domain string) bool { - issuerKey := issuer.IssuerKey() - certKey := StorageKeys.SiteCert(issuerKey, domain) - keyKey := StorageKeys.SitePrivateKey(issuerKey, domain) - metaKey := StorageKeys.SiteMeta(issuerKey, domain) - return cfg.Storage.Exists(certKey) && - cfg.Storage.Exists(keyKey) && - cfg.Storage.Exists(metaKey) -} - -// deleteSiteAssets deletes the folder in storage containing the -// certificate, private key, and metadata file for domain from the -// issuer with the given issuer key. -func (cfg *Config) deleteSiteAssets(issuerKey, domain string) error { - err := cfg.Storage.Delete(StorageKeys.SiteCert(issuerKey, domain)) - if err != nil { - return fmt.Errorf("deleting certificate file: %v", err) - } - err = cfg.Storage.Delete(StorageKeys.SitePrivateKey(issuerKey, domain)) - if err != nil { - return fmt.Errorf("deleting private key: %v", err) - } - err = cfg.Storage.Delete(StorageKeys.SiteMeta(issuerKey, domain)) - if err != nil { - return fmt.Errorf("deleting metadata file: %v", err) - } - err = cfg.Storage.Delete(StorageKeys.CertsSitePrefix(issuerKey, domain)) - if err != nil { - return fmt.Errorf("deleting site asset folder: %v", err) - } - return nil -} - -// lockKey returns a key for a lock that is specific to the operation -// named op being performed related to domainName and this config's CA. -func (cfg *Config) lockKey(op, domainName string) string { - return fmt.Sprintf("%s_%s", op, domainName) -} - -// managedCertNeedsRenewal returns true if certRes is expiring soon or already expired, -// or if the process of decoding the cert and checking its expiration returned an error. -func (cfg *Config) managedCertNeedsRenewal(certRes CertificateResource) (time.Duration, bool) { - certChain, err := parseCertsFromPEMBundle(certRes.CertificatePEM) - if err != nil { - return 0, true - } - remaining := time.Until(certChain[0].NotAfter) - needsRenew := currentlyInRenewalWindow(certChain[0].NotBefore, certChain[0].NotAfter, cfg.RenewalWindowRatio) - return remaining, needsRenew -} - -func (cfg *Config) emit(eventName string, data interface{}) { - if cfg.OnEvent == nil { - return - } - cfg.OnEvent(eventName, data) -} - -func loggerNamed(l *zap.Logger, name string) *zap.Logger { - if l == nil { - return nil - } - return l.Named(name) -} - -// CertificateSelector is a type which can select a certificate to use given multiple choices. -type CertificateSelector interface { - SelectCertificate(*tls.ClientHelloInfo, []Certificate) (Certificate, error) -} - -// OCSPConfig configures how OCSP is handled. -type OCSPConfig struct { - // Disable automatic OCSP stapling; strongly - // discouraged unless you have a good reason. - // Disabling this puts clients at greater risk - // and reduces their privacy. - DisableStapling bool - - // A map of OCSP responder domains to replacement - // domains for querying OCSP servers. Used for - // overriding the OCSP responder URL that is - // embedded in certificates. Mapping to an empty - // URL will disable OCSP from that responder. - ResponderOverrides map[string]string -} - -// certIssueLockOp is the name of the operation used -// when naming a lock to make it mutually exclusive -// with other certificate issuance operations for a -// certain name. -const certIssueLockOp = "issue_cert" - -// Constants for PKIX MustStaple extension. -var ( - tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} - ocspMustStapleFeature = []byte{0x30, 0x03, 0x02, 0x01, 0x05} - mustStapleExtension = pkix.Extension{ - Id: tlsFeatureExtensionOID, - Value: ocspMustStapleFeature, - } -) diff --git a/vendor/github.com/caddyserver/certmagic/crypto.go b/vendor/github.com/caddyserver/certmagic/crypto.go deleted file mode 100644 index a705cdde..00000000 --- a/vendor/github.com/caddyserver/certmagic/crypto.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "encoding/json" - "encoding/pem" - "fmt" - "hash/fnv" - "sort" - "strings" - - "github.com/klauspost/cpuid/v2" - "go.uber.org/zap" - "golang.org/x/net/idna" -) - -// encodePrivateKey marshals a EC or RSA private key into a PEM-encoded array of bytes. -func encodePrivateKey(key crypto.PrivateKey) ([]byte, error) { - var pemType string - var keyBytes []byte - switch key := key.(type) { - case *ecdsa.PrivateKey: - var err error - pemType = "EC" - keyBytes, err = x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - case *rsa.PrivateKey: - pemType = "RSA" - keyBytes = x509.MarshalPKCS1PrivateKey(key) - case ed25519.PrivateKey: - var err error - pemType = "ED25519" - keyBytes, err = x509.MarshalPKCS8PrivateKey(key) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unsupported key type: %T", key) - } - pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes} - return pem.EncodeToMemory(&pemKey), nil -} - -// decodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes. -// Borrowed from Go standard library, to handle various private key and PEM block types. -// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308 -// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238) -func decodePrivateKey(keyPEMBytes []byte) (crypto.Signer, error) { - keyBlockDER, _ := pem.Decode(keyPEMBytes) - - if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") { - return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type) - } - - if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil { - return key, nil - } - - if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil { - switch key := key.(type) { - case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: - return key.(crypto.Signer), nil - default: - return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key) - } - } - - if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil { - return key, nil - } - - return nil, fmt.Errorf("unknown private key type") -} - -// parseCertsFromPEMBundle parses a certificate bundle from top to bottom and returns -// a slice of x509 certificates. This function will error if no certificates are found. -func parseCertsFromPEMBundle(bundle []byte) ([]*x509.Certificate, error) { - var certificates []*x509.Certificate - var certDERBlock *pem.Block - for { - certDERBlock, bundle = pem.Decode(bundle) - if certDERBlock == nil { - break - } - if certDERBlock.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(certDERBlock.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } - } - if len(certificates) == 0 { - return nil, fmt.Errorf("no certificates found in bundle") - } - return certificates, nil -} - -// fastHash hashes input using a hashing algorithm that -// is fast, and returns the hash as a hex-encoded string. -// Do not use this for cryptographic purposes. -func fastHash(input []byte) string { - h := fnv.New32a() - h.Write(input) - return fmt.Sprintf("%x", h.Sum32()) -} - -// saveCertResource saves the certificate resource to disk. This -// includes the certificate file itself, the private key, and the -// metadata file. -func (cfg *Config) saveCertResource(issuer Issuer, cert CertificateResource) error { - metaBytes, err := json.MarshalIndent(cert, "", "\t") - if err != nil { - return fmt.Errorf("encoding certificate metadata: %v", err) - } - - issuerKey := issuer.IssuerKey() - certKey := cert.NamesKey() - - all := []keyValue{ - { - key: StorageKeys.SiteCert(issuerKey, certKey), - value: cert.CertificatePEM, - }, - { - key: StorageKeys.SitePrivateKey(issuerKey, certKey), - value: cert.PrivateKeyPEM, - }, - { - key: StorageKeys.SiteMeta(issuerKey, certKey), - value: metaBytes, - }, - } - - return storeTx(cfg.Storage, all) -} - -// loadCertResourceAnyIssuer loads and returns the certificate resource from any -// of the configured issuers. If multiple are found (e.g. if there are 3 issuers -// configured, and all 3 have a resource matching certNamesKey), then the newest -// (latest NotBefore date) resource will be chosen. -func (cfg *Config) loadCertResourceAnyIssuer(certNamesKey string) (CertificateResource, error) { - // we can save some extra decoding steps if there's only one issuer, since - // we don't need to compare potentially multiple available resources to - // select the best one, when there's only one choice anyway - if len(cfg.Issuers) == 1 { - return cfg.loadCertResource(cfg.Issuers[0], certNamesKey) - } - - type decodedCertResource struct { - CertificateResource - issuer Issuer - decoded *x509.Certificate - } - var certResources []decodedCertResource - var lastErr error - - // load and decode all certificate resources found with the - // configured issuers so we can sort by newest - for _, issuer := range cfg.Issuers { - certRes, err := cfg.loadCertResource(issuer, certNamesKey) - if err != nil { - if _, ok := err.(ErrNotExist); ok { - // not a problem, but we need to remember the error - // in case we end up not finding any cert resources - // since we'll need an error to return in that case - lastErr = err - continue - } - return CertificateResource{}, err - } - certs, err := parseCertsFromPEMBundle(certRes.CertificatePEM) - if err != nil { - return CertificateResource{}, err - } - certResources = append(certResources, decodedCertResource{ - CertificateResource: certRes, - issuer: issuer, - decoded: certs[0], - }) - } - if len(certResources) == 0 { - if lastErr == nil { - lastErr = fmt.Errorf("no certificate resources found") // just in case; e.g. no Issuers configured - } - return CertificateResource{}, lastErr - } - - // sort by date so the most recently issued comes first - sort.Slice(certResources, func(i, j int) bool { - return certResources[j].decoded.NotBefore.Before(certResources[i].decoded.NotBefore) - }) - - if cfg.Logger != nil { - cfg.Logger.Debug("loading managed certificate", - zap.String("domain", certNamesKey), - zap.Time("expiration", certResources[0].decoded.NotAfter), - zap.String("issuer_key", certResources[0].issuer.IssuerKey()), - zap.Any("storage", cfg.Storage), - ) - } - - return certResources[0].CertificateResource, nil -} - -// loadCertResource loads a certificate resource from the given issuer's storage location. -func (cfg *Config) loadCertResource(issuer Issuer, certNamesKey string) (CertificateResource, error) { - certRes := CertificateResource{issuerKey: issuer.IssuerKey()} - - normalizedName, err := idna.ToASCII(certNamesKey) - if err != nil { - return CertificateResource{}, fmt.Errorf("converting '%s' to ASCII: %v", certNamesKey, err) - } - - certBytes, err := cfg.Storage.Load(StorageKeys.SiteCert(certRes.issuerKey, normalizedName)) - if err != nil { - return CertificateResource{}, err - } - certRes.CertificatePEM = certBytes - keyBytes, err := cfg.Storage.Load(StorageKeys.SitePrivateKey(certRes.issuerKey, normalizedName)) - if err != nil { - return CertificateResource{}, err - } - certRes.PrivateKeyPEM = keyBytes - metaBytes, err := cfg.Storage.Load(StorageKeys.SiteMeta(certRes.issuerKey, normalizedName)) - if err != nil { - return CertificateResource{}, err - } - err = json.Unmarshal(metaBytes, &certRes) - if err != nil { - return CertificateResource{}, fmt.Errorf("decoding certificate metadata: %v", err) - } - - return certRes, nil -} - -// hashCertificateChain computes the unique hash of certChain, -// which is the chain of DER-encoded bytes. It returns the -// hex encoding of the hash. -func hashCertificateChain(certChain [][]byte) string { - h := sha256.New() - for _, certInChain := range certChain { - h.Write(certInChain) - } - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func namesFromCSR(csr *x509.CertificateRequest) []string { - var nameSet []string - nameSet = append(nameSet, csr.DNSNames...) - nameSet = append(nameSet, csr.EmailAddresses...) - for _, v := range csr.IPAddresses { - nameSet = append(nameSet, v.String()) - } - for _, v := range csr.URIs { - nameSet = append(nameSet, v.String()) - } - return nameSet -} - -// preferredDefaultCipherSuites returns an appropriate -// cipher suite to use depending on hardware support -// for AES-NI. -// -// See https://github.com/mholt/caddy/issues/1674 -func preferredDefaultCipherSuites() []uint16 { - if cpuid.CPU.Supports(cpuid.AESNI) { - return defaultCiphersPreferAES - } - return defaultCiphersPreferChaCha -} - -var ( - defaultCiphersPreferAES = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - } - defaultCiphersPreferChaCha = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - } -) - -// StandardKeyGenerator is the standard, in-memory key source -// that uses crypto/rand. -type StandardKeyGenerator struct { - // The type of keys to generate. - KeyType KeyType -} - -// GenerateKey generates a new private key according to kg.KeyType. -func (kg StandardKeyGenerator) GenerateKey() (crypto.PrivateKey, error) { - switch kg.KeyType { - case ED25519: - _, priv, err := ed25519.GenerateKey(rand.Reader) - return priv, err - case "", P256: - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case P384: - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case RSA2048: - return rsa.GenerateKey(rand.Reader, 2048) - case RSA4096: - return rsa.GenerateKey(rand.Reader, 4096) - case RSA8192: - return rsa.GenerateKey(rand.Reader, 8192) - } - return nil, fmt.Errorf("unrecognized or unsupported key type: %s", kg.KeyType) -} - -// DefaultKeyGenerator is the default key source. -var DefaultKeyGenerator = StandardKeyGenerator{KeyType: P256} - -// KeyType enumerates the known/supported key types. -type KeyType string - -// Constants for all key types we support. -const ( - ED25519 = KeyType("ed25519") - P256 = KeyType("p256") - P384 = KeyType("p384") - RSA2048 = KeyType("rsa2048") - RSA4096 = KeyType("rsa4096") - RSA8192 = KeyType("rsa8192") -) diff --git a/vendor/github.com/caddyserver/certmagic/dnsutil.go b/vendor/github.com/caddyserver/certmagic/dnsutil.go deleted file mode 100644 index 2573cb96..00000000 --- a/vendor/github.com/caddyserver/certmagic/dnsutil.go +++ /dev/null @@ -1,345 +0,0 @@ -package certmagic - -import ( - "errors" - "fmt" - "net" - "strings" - "sync" - "time" - - "github.com/miekg/dns" -) - -// Code in this file adapted from go-acme/lego, July 2020: -// https://github.com/go-acme/lego -// by Ludovic Fernandez and Dominik Menke -// -// It has been modified. - -// findZoneByFQDN determines the zone apex for the given fqdn by recursing -// up the domain labels until the nameserver returns a SOA record in the -// answer section. -func findZoneByFQDN(fqdn string, nameservers []string) (string, error) { - if !strings.HasSuffix(fqdn, ".") { - fqdn += "." - } - soa, err := lookupSoaByFqdn(fqdn, nameservers) - if err != nil { - return "", err - } - return soa.zone, nil -} - -func lookupSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) { - if !strings.HasSuffix(fqdn, ".") { - fqdn += "." - } - - fqdnSOACacheMu.Lock() - defer fqdnSOACacheMu.Unlock() - - // prefer cached version if fresh - if ent := fqdnSOACache[fqdn]; ent != nil && !ent.isExpired() { - return ent, nil - } - - ent, err := fetchSoaByFqdn(fqdn, nameservers) - if err != nil { - return nil, err - } - - // save result to cache, but don't allow - // the cache to grow out of control - if len(fqdnSOACache) >= 1000 { - for key := range fqdnSOACache { - delete(fqdnSOACache, key) - break - } - } - fqdnSOACache[fqdn] = ent - - return ent, nil -} - -func fetchSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) { - var err error - var in *dns.Msg - - labelIndexes := dns.Split(fqdn) - for _, index := range labelIndexes { - domain := fqdn[index:] - - in, err = dnsQuery(domain, dns.TypeSOA, nameservers, true) - if err != nil { - continue - } - if in == nil { - continue - } - - switch in.Rcode { - case dns.RcodeSuccess: - // Check if we got a SOA RR in the answer section - if len(in.Answer) == 0 { - continue - } - - // CNAME records cannot/should not exist at the root of a zone. - // So we skip a domain when a CNAME is found. - if dnsMsgContainsCNAME(in) { - continue - } - - for _, ans := range in.Answer { - if soa, ok := ans.(*dns.SOA); ok { - return newSoaCacheEntry(soa), nil - } - } - case dns.RcodeNameError: - // NXDOMAIN - default: - // Any response code other than NOERROR and NXDOMAIN is treated as error - return nil, fmt.Errorf("unexpected response code '%s' for %s", dns.RcodeToString[in.Rcode], domain) - } - } - - return nil, fmt.Errorf("could not find the start of authority for %s%s", fqdn, formatDNSError(in, err)) -} - -// dnsMsgContainsCNAME checks for a CNAME answer in msg -func dnsMsgContainsCNAME(msg *dns.Msg) bool { - for _, ans := range msg.Answer { - if _, ok := ans.(*dns.CNAME); ok { - return true - } - } - return false -} - -func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (*dns.Msg, error) { - m := createDNSMsg(fqdn, rtype, recursive) - var in *dns.Msg - var err error - for _, ns := range nameservers { - in, err = sendDNSQuery(m, ns) - if err == nil && len(in.Answer) > 0 { - break - } - } - return in, err -} - -func createDNSMsg(fqdn string, rtype uint16, recursive bool) *dns.Msg { - m := new(dns.Msg) - m.SetQuestion(fqdn, rtype) - m.SetEdns0(4096, false) - if !recursive { - m.RecursionDesired = false - } - return m -} - -func sendDNSQuery(m *dns.Msg, ns string) (*dns.Msg, error) { - udp := &dns.Client{Net: "udp", Timeout: dnsTimeout} - in, _, err := udp.Exchange(m, ns) - // two kinds of errors we can handle by retrying with TCP: - // truncation and timeout; see https://github.com/caddyserver/caddy/issues/3639 - truncated := in != nil && in.Truncated - timeoutErr := err != nil && strings.Contains(err.Error(), "timeout") - if truncated || timeoutErr { - tcp := &dns.Client{Net: "tcp", Timeout: dnsTimeout} - in, _, err = tcp.Exchange(m, ns) - } - return in, err -} - -func formatDNSError(msg *dns.Msg, err error) string { - var parts []string - if msg != nil { - parts = append(parts, dns.RcodeToString[msg.Rcode]) - } - if err != nil { - parts = append(parts, err.Error()) - } - if len(parts) > 0 { - return ": " + strings.Join(parts, " ") - } - return "" -} - -// soaCacheEntry holds a cached SOA record (only selected fields) -type soaCacheEntry struct { - zone string // zone apex (a domain name) - primaryNs string // primary nameserver for the zone apex - expires time.Time // time when this cache entry should be evicted -} - -func newSoaCacheEntry(soa *dns.SOA) *soaCacheEntry { - return &soaCacheEntry{ - zone: soa.Hdr.Name, - primaryNs: soa.Ns, - expires: time.Now().Add(time.Duration(soa.Refresh) * time.Second), - } -} - -// isExpired checks whether a cache entry should be considered expired. -func (cache *soaCacheEntry) isExpired() bool { - return time.Now().After(cache.expires) -} - -// systemOrDefaultNameservers attempts to get system nameservers from the -// resolv.conf file given by path before falling back to hard-coded defaults. -func systemOrDefaultNameservers(path string, defaults []string) []string { - config, err := dns.ClientConfigFromFile(path) - if err != nil || len(config.Servers) == 0 { - return defaults - } - return config.Servers -} - -// populateNameserverPorts ensures that all nameservers have a port number. -func populateNameserverPorts(servers []string) { - for i := range servers { - _, port, _ := net.SplitHostPort(servers[i]) - if port == "" { - servers[i] = net.JoinHostPort(servers[i], "53") - } - } -} - -// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. -func checkDNSPropagation(fqdn, value string, resolvers []string) (bool, error) { - if !strings.HasSuffix(fqdn, ".") { - fqdn += "." - } - - // Initial attempt to resolve at the recursive NS - r, err := dnsQuery(fqdn, dns.TypeTXT, resolvers, true) - if err != nil { - return false, err - } - - // TODO: make this configurable, maybe - // if !p.requireCompletePropagation { - // return true, nil - // } - - if r.Rcode == dns.RcodeSuccess { - fqdn = updateDomainWithCName(r, fqdn) - } - - authoritativeNss, err := lookupNameservers(fqdn, resolvers) - if err != nil { - return false, err - } - - return checkAuthoritativeNss(fqdn, value, authoritativeNss) -} - -// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record. -func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) { - for _, ns := range nameservers { - r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false) - if err != nil { - return false, err - } - - if r.Rcode != dns.RcodeSuccess { - if r.Rcode == dns.RcodeNameError { - // if Present() succeeded, then it must show up eventually, or else - // something is really broken in the DNS provider or their API; - // no need for error here, simply have the caller try again - return false, nil - } - return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn) - } - - var found bool - for _, rr := range r.Answer { - if txt, ok := rr.(*dns.TXT); ok { - record := strings.Join(txt.Txt, "") - if record == value { - found = true - break - } - } - } - - if !found { - return false, nil - } - } - - return true, nil -} - -// lookupNameservers returns the authoritative nameservers for the given fqdn. -func lookupNameservers(fqdn string, resolvers []string) ([]string, error) { - var authoritativeNss []string - - zone, err := findZoneByFQDN(fqdn, resolvers) - if err != nil { - return nil, fmt.Errorf("could not determine the zone: %w", err) - } - - r, err := dnsQuery(zone, dns.TypeNS, resolvers, true) - if err != nil { - return nil, err - } - - for _, rr := range r.Answer { - if ns, ok := rr.(*dns.NS); ok { - authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns)) - } - } - - if len(authoritativeNss) > 0 { - return authoritativeNss, nil - } - return nil, errors.New("could not determine authoritative nameservers") -} - -// Update FQDN with CNAME if any -func updateDomainWithCName(r *dns.Msg, fqdn string) string { - for _, rr := range r.Answer { - if cn, ok := rr.(*dns.CNAME); ok { - if cn.Hdr.Name == fqdn { - return cn.Target - } - } - } - return fqdn -} - -// recursiveNameservers are used to pre-check DNS propagation. It -// picks user-configured nameservers (custom) OR the defaults -// obtained from resolv.conf and defaultNameservers if none is -// configured and ensures that all server addresses have a port value. -func recursiveNameservers(custom []string) []string { - var servers []string - if len(custom) == 0 { - servers = systemOrDefaultNameservers(defaultResolvConf, defaultNameservers) - } else { - servers = make([]string, len(custom)) - copy(servers, custom) - } - populateNameserverPorts(servers) - return servers -} - -var defaultNameservers = []string{ - "8.8.8.8:53", - "8.8.4.4:53", - "1.1.1.1:53", - "1.0.0.1:53", -} - -var dnsTimeout = 10 * time.Second - -var ( - fqdnSOACache = map[string]*soaCacheEntry{} - fqdnSOACacheMu sync.Mutex -) - -const defaultResolvConf = "/etc/resolv.conf" diff --git a/vendor/github.com/caddyserver/certmagic/filestorage.go b/vendor/github.com/caddyserver/certmagic/filestorage.go deleted file mode 100644 index 8adc2ce7..00000000 --- a/vendor/github.com/caddyserver/certmagic/filestorage.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "runtime" - "time" -) - -// FileStorage facilitates forming file paths derived from a root -// directory. It is used to get file paths in a consistent, -// cross-platform way or persisting ACME assets on the file system. -type FileStorage struct { - Path string -} - -// Exists returns true if key exists in fs. -func (fs *FileStorage) Exists(key string) bool { - _, err := os.Stat(fs.Filename(key)) - return !os.IsNotExist(err) -} - -// Store saves value at key. -func (fs *FileStorage) Store(key string, value []byte) error { - filename := fs.Filename(key) - err := os.MkdirAll(filepath.Dir(filename), 0700) - if err != nil { - return err - } - return ioutil.WriteFile(filename, value, 0600) -} - -// Load retrieves the value at key. -func (fs *FileStorage) Load(key string) ([]byte, error) { - contents, err := ioutil.ReadFile(fs.Filename(key)) - if os.IsNotExist(err) { - return nil, ErrNotExist(err) - } - return contents, nil -} - -// Delete deletes the value at key. -func (fs *FileStorage) Delete(key string) error { - err := os.Remove(fs.Filename(key)) - if os.IsNotExist(err) { - return ErrNotExist(err) - } - return err -} - -// List returns all keys that match prefix. -func (fs *FileStorage) List(prefix string, recursive bool) ([]string, error) { - var keys []string - walkPrefix := fs.Filename(prefix) - - err := filepath.Walk(walkPrefix, func(fpath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info == nil { - return fmt.Errorf("%s: file info is nil", fpath) - } - if fpath == walkPrefix { - return nil - } - - suffix, err := filepath.Rel(walkPrefix, fpath) - if err != nil { - return fmt.Errorf("%s: could not make path relative: %v", fpath, err) - } - keys = append(keys, path.Join(prefix, suffix)) - - if !recursive && info.IsDir() { - return filepath.SkipDir - } - return nil - }) - - return keys, err -} - -// Stat returns information about key. -func (fs *FileStorage) Stat(key string) (KeyInfo, error) { - fi, err := os.Stat(fs.Filename(key)) - if os.IsNotExist(err) { - return KeyInfo{}, ErrNotExist(err) - } - if err != nil { - return KeyInfo{}, err - } - return KeyInfo{ - Key: key, - Modified: fi.ModTime(), - Size: fi.Size(), - IsTerminal: !fi.IsDir(), - }, nil -} - -// Filename returns the key as a path on the file -// system prefixed by fs.Path. -func (fs *FileStorage) Filename(key string) string { - return filepath.Join(fs.Path, filepath.FromSlash(key)) -} - -// Lock obtains a lock named by the given key. It blocks -// until the lock can be obtained or an error is returned. -func (fs *FileStorage) Lock(ctx context.Context, key string) error { - filename := fs.lockFilename(key) - - for { - err := createLockfile(filename) - if err == nil { - // got the lock, yay - return nil - } - if !os.IsExist(err) { - // unexpected error - return fmt.Errorf("creating lock file: %v", err) - } - - // lock file already exists - - var meta lockMeta - f, err := os.Open(filename) - if err == nil { - err2 := json.NewDecoder(f).Decode(&meta) - f.Close() - if err2 != nil { - return fmt.Errorf("decoding lockfile contents: %w", err2) - } - } - - switch { - case os.IsNotExist(err): - // must have just been removed; try again to create it - continue - - case err != nil: - // unexpected error - return fmt.Errorf("accessing lock file: %v", err) - - case fileLockIsStale(meta): - // lock file is stale - delete it and try again to create one - log.Printf("[INFO][%s] Lock for '%s' is stale (created: %s, last update: %s); removing then retrying: %s", - fs, key, meta.Created, meta.Updated, filename) - removeLockfile(filename) - continue - - default: - // lockfile exists and is not stale; - // just wait a moment and try again, - // or return if context cancelled - select { - case <-time.After(fileLockPollInterval): - case <-ctx.Done(): - return ctx.Err() - } - } - } -} - -// Unlock releases the lock for name. -func (fs *FileStorage) Unlock(key string) error { - return removeLockfile(fs.lockFilename(key)) -} - -func (fs *FileStorage) String() string { - return "FileStorage:" + fs.Path -} - -func (fs *FileStorage) lockFilename(key string) string { - return filepath.Join(fs.lockDir(), StorageKeys.Safe(key)+".lock") -} - -func (fs *FileStorage) lockDir() string { - return filepath.Join(fs.Path, "locks") -} - -func fileLockIsStale(meta lockMeta) bool { - ref := meta.Updated - if ref.IsZero() { - ref = meta.Created - } - // since updates are exactly every lockFreshnessInterval, - // add a grace period for the actual file read+write to - // take place - return time.Since(ref) > lockFreshnessInterval*2 -} - -// createLockfile atomically creates the lockfile -// identified by filename. A successfully created -// lockfile should be removed with removeLockfile. -func createLockfile(filename string) error { - err := atomicallyCreateFile(filename, true) - if err != nil { - return err - } - - go keepLockfileFresh(filename) - - // if the app crashes in removeLockfile(), there is a - // small chance the .unlock file is left behind; it's - // safe to simply remove it as it's a guard against - // double removal of the .lock file. - _ = os.Remove(filename + ".unlock") - return nil -} - -// removeLockfile atomically removes filename, -// which must be a lockfile created by createLockfile. -// See discussion in PR #7 for more background: -// https://github.com/caddyserver/certmagic/pull/7 -func removeLockfile(filename string) error { - unlockFilename := filename + ".unlock" - if err := atomicallyCreateFile(unlockFilename, false); err != nil { - if os.IsExist(err) { - // another process is handling the unlocking - return nil - } - return err - } - defer os.Remove(unlockFilename) - return os.Remove(filename) -} - -// keepLockfileFresh continuously updates the lock file -// at filename with the current timestamp. It stops -// when the file disappears (happy path = lock released), -// or when there is an error at any point. Since it polls -// every lockFreshnessInterval, this function might -// not terminate until up to lockFreshnessInterval after -// the lock is released. -func keepLockfileFresh(filename string) { - defer func() { - if err := recover(); err != nil { - buf := make([]byte, stackTraceBufferSize) - buf = buf[:runtime.Stack(buf, false)] - log.Printf("panic: active locking: %v\n%s", err, buf) - } - }() - - for { - time.Sleep(lockFreshnessInterval) - done, err := updateLockfileFreshness(filename) - if err != nil { - log.Printf("[ERROR] Keeping lock file fresh: %v - terminating lock maintenance (lockfile: %s)", err, filename) - return - } - if done { - return - } - } -} - -// updateLockfileFreshness updates the lock file at filename -// with the current timestamp. It returns true if the parent -// loop can terminate (i.e. no more need to update the lock). -func updateLockfileFreshness(filename string) (bool, error) { - f, err := os.OpenFile(filename, os.O_RDWR, 0644) - if os.IsNotExist(err) { - return true, nil // lock released - } - if err != nil { - return true, err - } - defer f.Close() - - // read contents - metaBytes, err := ioutil.ReadAll(io.LimitReader(f, 2048)) - if err != nil { - return true, err - } - var meta lockMeta - if err := json.Unmarshal(metaBytes, &meta); err != nil { - return true, err - } - - // truncate file and reset I/O offset to beginning - if err := f.Truncate(0); err != nil { - return true, err - } - if _, err := f.Seek(0, 0); err != nil { - return true, err - } - - // write updated timestamp - meta.Updated = time.Now() - if err = json.NewEncoder(f).Encode(meta); err != nil { - return false, err - } - - // sync to device; we suspect that sometimes file systems - // (particularly AWS EFS) don't do this on their own, - // leaving the file empty when we close it; see - // https://github.com/caddyserver/caddy/issues/3954 - return false, f.Sync() -} - -// atomicallyCreateFile atomically creates the file -// identified by filename if it doesn't already exist. -func atomicallyCreateFile(filename string, writeLockInfo bool) error { - // no need to check this error, we only really care about the file creation error - _ = os.MkdirAll(filepath.Dir(filename), 0700) - f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644) - if err != nil { - return err - } - defer f.Close() - if writeLockInfo { - now := time.Now() - meta := lockMeta{ - Created: now, - Updated: now, - } - if err := json.NewEncoder(f).Encode(meta); err != nil { - return err - } - // see https://github.com/caddyserver/caddy/issues/3954 - if err := f.Sync(); err != nil { - return err - } - } - return nil -} - -// homeDir returns the best guess of the current user's home -// directory from environment variables. If unknown, "." (the -// current directory) is returned instead. -func homeDir() string { - home := os.Getenv("HOME") - if home == "" && runtime.GOOS == "windows" { - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home = drive + path - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - } - if home == "" { - home = "." - } - return home -} - -func dataDir() string { - baseDir := filepath.Join(homeDir(), ".local", "share") - if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" { - baseDir = xdgData - } - return filepath.Join(baseDir, "certmagic") -} - -// lockMeta is written into a lock file. -type lockMeta struct { - Created time.Time `json:"created,omitempty"` - Updated time.Time `json:"updated,omitempty"` -} - -// lockFreshnessInterval is how often to update -// a lock's timestamp. Locks with a timestamp -// more than this duration in the past (plus a -// grace period for latency) can be considered -// stale. -const lockFreshnessInterval = 5 * time.Second - -// fileLockPollInterval is how frequently -// to check the existence of a lock file -const fileLockPollInterval = 1 * time.Second - -// Interface guard -var _ Storage = (*FileStorage)(nil) diff --git a/vendor/github.com/caddyserver/certmagic/handshake.go b/vendor/github.com/caddyserver/certmagic/handshake.go deleted file mode 100644 index 5b749fdc..00000000 --- a/vendor/github.com/caddyserver/certmagic/handshake.go +++ /dev/null @@ -1,686 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "strings" - "sync" - "time" - - "github.com/mholt/acmez" - "go.uber.org/zap" -) - -// GetCertificate gets a certificate to satisfy clientHello. In getting -// the certificate, it abides the rules and settings defined in the -// Config that matches clientHello.ServerName. It first checks the in- -// memory cache, then, if the config enables "OnDemand", it accesses -// disk, then accesses the network if it must obtain a new certificate -// via ACME. -// -// This method is safe for use as a tls.Config.GetCertificate callback. -func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - cfg.emit("tls_handshake_started", clientHello) - - // special case: serve up the certificate for a TLS-ALPN ACME challenge - // (https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05) - for _, proto := range clientHello.SupportedProtos { - if proto == acmez.ACMETLS1Protocol { - challengeCert, distributed, err := cfg.getTLSALPNChallengeCert(clientHello) - if err != nil { - if cfg.Logger != nil { - cfg.Logger.Error("tls-alpn challenge", - zap.String("server_name", clientHello.ServerName), - zap.Error(err)) - } - return nil, err - } - if cfg.Logger != nil { - cfg.Logger.Info("served key authentication certificate", - zap.String("server_name", clientHello.ServerName), - zap.String("challenge", "tls-alpn-01"), - zap.String("remote", clientHello.Conn.RemoteAddr().String()), - zap.Bool("distributed", distributed)) - } - return challengeCert, nil - } - } - - // get the certificate and serve it up - cert, err := cfg.getCertDuringHandshake(clientHello, true, true) - if err == nil { - cfg.emit("tls_handshake_completed", clientHello) - } - return &cert.Certificate, err -} - -// getCertificate gets a certificate that matches name from the in-memory -// cache, according to the lookup table associated with cfg. The lookup then -// points to a certificate in the Instance certificate cache. -// -// The name is expected to already be normalized (e.g. lowercased). -// -// If there is no exact match for name, it will be checked against names of -// the form '*.example.com' (wildcard certificates) according to RFC 6125. -// If a match is found, matched will be true. If no matches are found, matched -// will be false and a "default" certificate will be returned with defaulted -// set to true. If defaulted is false, then no certificates were available. -// -// The logic in this function is adapted from the Go standard library, -// which is by the Go Authors. -// -// This function is safe for concurrent use. -func (cfg *Config) getCertificate(hello *tls.ClientHelloInfo) (cert Certificate, matched, defaulted bool) { - name := normalizedName(hello.ServerName) - - if name == "" { - // if SNI is empty, prefer matching IP address - if hello.Conn != nil { - addr := localIPFromConn(hello.Conn) - cert, matched = cfg.selectCert(hello, addr) - if matched { - return - } - } - - // fall back to a "default" certificate, if specified - if cfg.DefaultServerName != "" { - normDefault := normalizedName(cfg.DefaultServerName) - cert, defaulted = cfg.selectCert(hello, normDefault) - if defaulted { - return - } - } - } else { - // if SNI is specified, try an exact match first - cert, matched = cfg.selectCert(hello, name) - if matched { - return - } - - // try replacing labels in the name with - // wildcards until we get a match - labels := strings.Split(name, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - cert, matched = cfg.selectCert(hello, candidate) - if matched { - return - } - } - } - - // otherwise, we're bingo on ammo; see issues - // caddyserver/caddy#2035 and caddyserver/caddy#1303 (any - // change to certificate matching behavior must - // account for hosts defined where the hostname - // is empty or a catch-all, like ":443" or - // "0.0.0.0:443") - - return -} - -// selectCert uses hello to select a certificate from the -// cache for name. If cfg.CertSelection is set, it will be -// used to make the decision. Otherwise, the first matching -// unexpired cert is returned. As a special case, if no -// certificates match name and cfg.CertSelection is set, -// then all certificates in the cache will be passed in -// for the cfg.CertSelection to make the final decision. -func (cfg *Config) selectCert(hello *tls.ClientHelloInfo, name string) (Certificate, bool) { - logger := loggerNamed(cfg.Logger, "handshake") - choices := cfg.certCache.getAllMatchingCerts(name) - if len(choices) == 0 { - if cfg.CertSelection == nil { - if logger != nil { - logger.Debug("no matching certificates and no custom selection logic", zap.String("identifier", name)) - } - return Certificate{}, false - } - if logger != nil { - logger.Debug("no matching certificate; will choose from all certificates", zap.String("identifier", name)) - } - choices = cfg.certCache.getAllCerts() - } - if logger != nil { - logger.Debug("choosing certificate", - zap.String("identifier", name), - zap.Int("num_choices", len(choices))) - } - if cfg.CertSelection == nil { - cert, err := DefaultCertificateSelector(hello, choices) - if logger != nil { - logger.Debug("default certificate selection results", - zap.Error(err), - zap.String("identifier", name), - zap.Strings("subjects", cert.Names), - zap.Bool("managed", cert.managed), - zap.String("issuer_key", cert.issuerKey), - zap.String("hash", cert.hash)) - } - return cert, err == nil - } - cert, err := cfg.CertSelection.SelectCertificate(hello, choices) - if logger != nil { - logger.Debug("custom certificate selection results", - zap.Error(err), - zap.String("identifier", name), - zap.Strings("subjects", cert.Names), - zap.Bool("managed", cert.managed), - zap.String("issuer_key", cert.issuerKey), - zap.String("hash", cert.hash)) - } - return cert, err == nil -} - -// DefaultCertificateSelector is the default certificate selection logic -// given a choice of certificates. If there is at least one certificate in -// choices, it always returns a certificate without error. It chooses the -// first non-expired certificate that the client supports if possible, -// otherwise it returns an expired certificate that the client supports, -// otherwise it just returns the first certificate in the list of choices. -func DefaultCertificateSelector(hello *tls.ClientHelloInfo, choices []Certificate) (Certificate, error) { - if len(choices) == 0 { - return Certificate{}, fmt.Errorf("no certificates available") - } - now := time.Now() - best := choices[0] - for _, choice := range choices { - if err := hello.SupportsCertificate(&choice.Certificate); err != nil { - continue - } - best = choice // at least the client supports it... - if now.After(choice.Leaf.NotBefore) && now.Before(choice.Leaf.NotAfter) { - return choice, nil // ...and unexpired, great! "Certificate, I choose you!" - } - } - return best, nil // all matching certs are expired or incompatible, oh well -} - -// getCertDuringHandshake will get a certificate for hello. It first tries -// the in-memory cache. If no certificate for hello is in the cache, the -// config most closely corresponding to hello will be loaded. If that config -// allows it (OnDemand==true) and if loadIfNecessary == true, it goes to disk -// to load it into the cache and serve it. If it's not on disk and if -// obtainIfNecessary == true, the certificate will be obtained from the CA, -// cached, and served. If obtainIfNecessary is true, then loadIfNecessary -// must also be set to true. An error will be returned if and only if no -// certificate is available. -// -// This function is safe for concurrent use. -func (cfg *Config) getCertDuringHandshake(hello *tls.ClientHelloInfo, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) { - log := loggerNamed(cfg.Logger, "handshake") - - // First check our in-memory cache to see if we've already loaded it - cert, matched, defaulted := cfg.getCertificate(hello) - if matched { - if log != nil { - log.Debug("matched certificate in cache", - zap.Strings("subjects", cert.Names), - zap.Bool("managed", cert.managed), - zap.Time("expiration", cert.Leaf.NotAfter), - zap.String("hash", cert.hash)) - } - if cert.managed && cfg.OnDemand != nil && obtainIfNecessary { - // It's been reported before that if the machine goes to sleep (or - // suspends the process) that certs which are already loaded into - // memory won't get renewed in the background, so we need to check - // expiry on each handshake too, sigh: - // https://caddy.community/t/local-certificates-not-renewing-on-demand/9482 - return cfg.optionalMaintenance(loggerNamed(cfg.Logger, "on_demand"), cert, hello) - } - return cert, nil - } - - name := cfg.getNameFromClientHello(hello) - - // We might be able to load or obtain a needed certificate. Load from - // storage even if OnDemand isn't enabled in case a statically-managed - // cert was evicted from a full cache. - cfg.certCache.mu.RLock() - cacheSize := len(cfg.certCache.cache) - cfg.certCache.mu.RUnlock() - loadDynamically := cfg.OnDemand != nil || cacheSize >= cfg.certCache.options.Capacity - - if loadDynamically && loadIfNecessary { - // Then check to see if we have one on disk - loadedCert, err := cfg.CacheManagedCertificate(name) - if _, ok := err.(ErrNotExist); ok { - // If no exact match, try a wildcard variant, which is something we can still use - labels := strings.Split(name, ".") - labels[0] = "*" - loadedCert, err = cfg.CacheManagedCertificate(strings.Join(labels, ".")) - } - if err == nil { - if log != nil { - log.Debug("loaded certificate from storage", - zap.Strings("subjects", loadedCert.Names), - zap.Bool("managed", loadedCert.managed), - zap.Time("expiration", loadedCert.Leaf.NotAfter), - zap.String("hash", loadedCert.hash)) - } - loadedCert, err = cfg.handshakeMaintenance(hello, loadedCert) - if err != nil { - if log != nil { - log.Error("maintining newly-loaded certificate", - zap.String("server_name", name), - zap.Error(err)) - } - } - return loadedCert, nil - } - if cfg.OnDemand != nil && obtainIfNecessary { - // By this point, we need to ask the CA for a certificate - return cfg.obtainOnDemandCertificate(hello) - } - } - - // Fall back to the default certificate if there is one - if defaulted { - if log != nil { - log.Debug("fell back to default certificate", - zap.Strings("subjects", cert.Names), - zap.Bool("managed", cert.managed), - zap.Time("expiration", cert.Leaf.NotAfter), - zap.String("hash", cert.hash)) - } - return cert, nil - } - - if log != nil { - log.Debug("no certificate matching TLS ClientHello", - zap.String("server_name", hello.ServerName), - zap.String("remote", hello.Conn.RemoteAddr().String()), - zap.String("identifier", name), - zap.Uint16s("cipher_suites", hello.CipherSuites), - zap.Int("cache_size", cacheSize), - zap.Int("cache_capacity", cfg.certCache.options.Capacity), - zap.Bool("load_if_necessary", loadIfNecessary), - zap.Bool("obtain_if_necessary", obtainIfNecessary), - zap.Bool("on_demand", cfg.OnDemand != nil)) - } - - return Certificate{}, fmt.Errorf("no certificate available for '%s'", name) -} - -// optionalMaintenance will perform maintenance on the certificate (if necessary) and -// will return the resulting certificate. This should only be done if the certificate -// is managed, OnDemand is enabled, and the scope is allowed to obtain certificates. -func (cfg *Config) optionalMaintenance(log *zap.Logger, cert Certificate, hello *tls.ClientHelloInfo) (Certificate, error) { - newCert, err := cfg.handshakeMaintenance(hello, cert) - if err == nil { - return newCert, nil - } - - if log != nil { - log.Error("renewing certificate on-demand failed", - zap.Strings("subjects", cert.Names), - zap.Time("not_after", cert.Leaf.NotAfter), - zap.Error(err)) - } - - if cert.Expired() { - return cert, err - } - - // still has time remaining, so serve it anyway - return cert, nil -} - -// checkIfCertShouldBeObtained checks to see if an on-demand TLS certificate -// should be obtained for a given domain based upon the config settings. If -// a non-nil error is returned, do not issue a new certificate for name. -func (cfg *Config) checkIfCertShouldBeObtained(name string) error { - if cfg.OnDemand == nil { - return fmt.Errorf("not configured for on-demand certificate issuance") - } - if !SubjectQualifiesForCert(name) { - return fmt.Errorf("subject name does not qualify for certificate: %s", name) - } - if cfg.OnDemand.DecisionFunc != nil { - return cfg.OnDemand.DecisionFunc(name) - } - if len(cfg.OnDemand.hostWhitelist) > 0 && - !cfg.OnDemand.whitelistContains(name) { - return fmt.Errorf("certificate for '%s' is not managed", name) - } - return nil -} - -// obtainOnDemandCertificate obtains a certificate for hello. -// If another goroutine has already started obtaining a cert for -// hello, it will wait and use what the other goroutine obtained. -// -// This function is safe for use by multiple concurrent goroutines. -func (cfg *Config) obtainOnDemandCertificate(hello *tls.ClientHelloInfo) (Certificate, error) { - log := loggerNamed(cfg.Logger, "on_demand") - - name := cfg.getNameFromClientHello(hello) - - getCertWithoutReobtaining := func() (Certificate, error) { - // very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely - return cfg.getCertDuringHandshake(hello, true, false) - } - - // We must protect this process from happening concurrently, so synchronize. - obtainCertWaitChansMu.Lock() - wait, ok := obtainCertWaitChans[name] - if ok { - // lucky us -- another goroutine is already obtaining the certificate. - // wait for it to finish obtaining the cert and then we'll use it. - obtainCertWaitChansMu.Unlock() - - // TODO: see if we can get a proper context in here, for true cancellation - timeout := time.NewTimer(2 * time.Minute) - select { - case <-timeout.C: - return Certificate{}, fmt.Errorf("timed out waiting to obtain certificate for %s", name) - case <-wait: - timeout.Stop() - } - - return getCertWithoutReobtaining() - } - - // looks like it's up to us to do all the work and obtain the cert. - // make a chan others can wait on if needed - wait = make(chan struct{}) - obtainCertWaitChans[name] = wait - obtainCertWaitChansMu.Unlock() - - unblockWaiters := func() { - obtainCertWaitChansMu.Lock() - close(wait) - delete(obtainCertWaitChans, name) - obtainCertWaitChansMu.Unlock() - } - - // Make sure the certificate should be obtained based on config - err := cfg.checkIfCertShouldBeObtained(name) - if err != nil { - unblockWaiters() - return Certificate{}, err - } - - if log != nil { - log.Info("obtaining new certificate", zap.String("server_name", name)) - } - - // TODO: use a proper context; we use one with timeout because retries are enabled because interactive is false - ctx, cancel := context.WithTimeout(context.TODO(), 90*time.Second) - defer cancel() - - // Obtain the certificate - err = cfg.ObtainCertAsync(ctx, name) - - // immediately unblock anyone waiting for it; doing this in - // a defer would risk deadlock because of the recursive call - // to getCertDuringHandshake below when we return! - unblockWaiters() - - if err != nil { - // shucks; failed to solve challenge on-demand - return Certificate{}, err - } - - // success; certificate was just placed on disk, so - // we need only restart serving the certificate - return getCertWithoutReobtaining() -} - -// handshakeMaintenance performs a check on cert for expiration and OCSP validity. -// If necessary, it will renew the certificate and/or refresh the OCSP staple. -// OCSP stapling errors are not returned, only logged. -// -// This function is safe for use by multiple concurrent goroutines. -func (cfg *Config) handshakeMaintenance(hello *tls.ClientHelloInfo, cert Certificate) (Certificate, error) { - log := loggerNamed(cfg.Logger, "on_demand") - - // Check cert expiration - if currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio) { - return cfg.renewDynamicCertificate(hello, cert) - } - - // Check OCSP staple validity - if cert.ocsp != nil { - refreshTime := cert.ocsp.ThisUpdate.Add(cert.ocsp.NextUpdate.Sub(cert.ocsp.ThisUpdate) / 2) - if time.Now().After(refreshTime) { - _, err := stapleOCSP(cfg.OCSP, cfg.Storage, &cert, nil) - if err != nil { - // An error with OCSP stapling is not the end of the world, and in fact, is - // quite common considering not all certs have issuer URLs that support it. - if log != nil { - log.Warn("stapling OCSP", - zap.String("server_name", hello.ServerName), - zap.Error(err)) - } - } - cfg.certCache.mu.Lock() - cfg.certCache.cache[cert.hash] = cert - cfg.certCache.mu.Unlock() - } - } - - return cert, nil -} - -// renewDynamicCertificate renews the certificate for name using cfg. It returns the -// certificate to use and an error, if any. name should already be lower-cased before -// calling this function. name is the name obtained directly from the handshake's -// ClientHello. If the certificate hasn't yet expired, currentCert will be returned -// and the renewal will happen in the background; otherwise this blocks until the -// certificate has been renewed, and returns the renewed certificate. -// -// This function is safe for use by multiple concurrent goroutines. -func (cfg *Config) renewDynamicCertificate(hello *tls.ClientHelloInfo, currentCert Certificate) (Certificate, error) { - log := loggerNamed(cfg.Logger, "on_demand") - - name := cfg.getNameFromClientHello(hello) - timeLeft := time.Until(currentCert.Leaf.NotAfter) - - getCertWithoutReobtaining := func() (Certificate, error) { - // very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely - return cfg.getCertDuringHandshake(hello, true, false) - } - - // see if another goroutine is already working on this certificate - obtainCertWaitChansMu.Lock() - wait, ok := obtainCertWaitChans[name] - if ok { - // lucky us -- another goroutine is already renewing the certificate - obtainCertWaitChansMu.Unlock() - - if timeLeft > 0 { - // the current certificate hasn't expired, and another goroutine is already - // renewing it, so we might as well serve what we have without blocking - if log != nil { - log.Debug("certificate expires soon but is already being renewed; serving current certificate", - zap.Strings("subjects", currentCert.Names), - zap.Duration("remaining", timeLeft)) - } - return currentCert, nil - } - - // otherwise, we'll have to wait for the renewal to finish so we don't serve - // an expired certificate - - if log != nil { - log.Debug("certificate has expired, but is already being renewed; waiting for renewal to complete", - zap.Strings("subjects", currentCert.Names), - zap.Time("expired", currentCert.Leaf.NotAfter)) - } - - // TODO: see if we can get a proper context in here, for true cancellation - timeout := time.NewTimer(2 * time.Minute) - select { - case <-timeout.C: - return Certificate{}, fmt.Errorf("timed out waiting for certificate renewal of %s", name) - case <-wait: - timeout.Stop() - } - - return getCertWithoutReobtaining() - } - - // looks like it's up to us to do all the work and renew the cert - wait = make(chan struct{}) - obtainCertWaitChans[name] = wait - obtainCertWaitChansMu.Unlock() - - unblockWaiters := func() { - obtainCertWaitChansMu.Lock() - close(wait) - delete(obtainCertWaitChans, name) - obtainCertWaitChansMu.Unlock() - } - - if log != nil { - log.Info("attempting certificate renewal", - zap.String("server_name", name), - zap.Strings("subjects", currentCert.Names), - zap.Time("expiration", currentCert.Leaf.NotAfter), - zap.Duration("remaining", timeLeft)) - } - - // Make sure a certificate for this name should be obtained on-demand - err := cfg.checkIfCertShouldBeObtained(name) - if err != nil { - // if not, remove from cache (it will be deleted from storage later) - cfg.certCache.mu.Lock() - cfg.certCache.removeCertificate(currentCert) - cfg.certCache.mu.Unlock() - unblockWaiters() - return Certificate{}, err - } - - // Renew and reload the certificate - renewAndReload := func(ctx context.Context, cancel context.CancelFunc) (Certificate, error) { - defer cancel() - err = cfg.RenewCertAsync(ctx, name, false) - if err == nil { - // even though the recursive nature of the dynamic cert loading - // would just call this function anyway, we do it here to - // make the replacement as atomic as possible. - newCert, err := cfg.CacheManagedCertificate(name) - if err != nil { - if log != nil { - log.Error("loading renewed certificate", zap.String("server_name", name), zap.Error(err)) - } - } else { - // replace the old certificate with the new one - cfg.certCache.replaceCertificate(currentCert, newCert) - } - } - - // immediately unblock anyone waiting for it; doing this in - // a defer would risk deadlock because of the recursive call - // to getCertDuringHandshake below when we return! - unblockWaiters() - - if err != nil { - return Certificate{}, err - } - - return getCertWithoutReobtaining() - } - - // if the certificate hasn't expired, we can serve what we have and renew in the background - if timeLeft > 0 { - // TODO: get a proper context; we use one with timeout because retries are enabled because interactive is false - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) - go renewAndReload(ctx, cancel) - return currentCert, nil - } - - // otherwise, we have to block while we renew an expired certificate - ctx, cancel := context.WithTimeout(context.TODO(), 90*time.Second) - return renewAndReload(ctx, cancel) -} - -// getTLSALPNChallengeCert is to be called when the clientHello pertains to -// a TLS-ALPN challenge and a certificate is required to solve it. This method gets -// the relevant challenge info and then returns the associated certificate (if any) -// or generates it anew if it's not available (as is the case when distributed -// solving). True is returned if the challenge is being solved distributed (there -// is no semantic difference with distributed solving; it is mainly for logging). -func (cfg *Config) getTLSALPNChallengeCert(clientHello *tls.ClientHelloInfo) (*tls.Certificate, bool, error) { - chalData, distributed, err := cfg.getChallengeInfo(clientHello.ServerName) - if err != nil { - return nil, distributed, err - } - - // fast path: we already created the certificate (this avoids having to re-create - // it at every handshake that tries to verify, e.g. multi-perspective validation) - if chalData.data != nil { - return chalData.data.(*tls.Certificate), distributed, nil - } - - // otherwise, we can re-create the solution certificate, but it takes a few cycles - cert, err := acmez.TLSALPN01ChallengeCert(chalData.Challenge) - if err != nil { - return nil, distributed, fmt.Errorf("making TLS-ALPN challenge certificate: %v", err) - } - if cert == nil { - return nil, distributed, fmt.Errorf("got nil TLS-ALPN challenge certificate but no error") - } - - return cert, distributed, nil -} - -// getNameFromClientHello returns a normalized form of hello.ServerName. -// If hello.ServerName is empty (i.e. client did not use SNI), then the -// associated connection's local address is used to extract an IP address. -func (*Config) getNameFromClientHello(hello *tls.ClientHelloInfo) string { - if name := normalizedName(hello.ServerName); name != "" { - return name - } - return localIPFromConn(hello.Conn) -} - -// localIPFromConn returns the host portion of c's local address -// and strips the scope ID if one exists (see RFC 4007). -func localIPFromConn(c net.Conn) string { - if c == nil { - return "" - } - localAddr := c.LocalAddr().String() - ip, _, err := net.SplitHostPort(localAddr) - if err != nil { - // OK; assume there was no port - ip = localAddr - } - // IPv6 addresses can have scope IDs, e.g. "fe80::4c3:3cff:fe4f:7e0b%eth0", - // but for our purposes, these are useless (unless a valid use case proves - // otherwise; see issue #3911) - if scopeIDStart := strings.Index(ip, "%"); scopeIDStart > -1 { - ip = ip[:scopeIDStart] - } - return ip -} - -// normalizedName returns a cleaned form of serverName that is -// used for consistency when referring to a SNI value. -func normalizedName(serverName string) string { - return strings.ToLower(strings.TrimSpace(serverName)) -} - -// obtainCertWaitChans is used to coordinate obtaining certs for each hostname. -var obtainCertWaitChans = make(map[string]chan struct{}) -var obtainCertWaitChansMu sync.Mutex diff --git a/vendor/github.com/caddyserver/certmagic/httphandler.go b/vendor/github.com/caddyserver/certmagic/httphandler.go deleted file mode 100644 index d17cfaab..00000000 --- a/vendor/github.com/caddyserver/certmagic/httphandler.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "net/http" - "strings" - - "github.com/mholt/acmez/acme" - "go.uber.org/zap" -) - -// HTTPChallengeHandler wraps h in a handler that can solve the ACME -// HTTP challenge. cfg is required, and it must have a certificate -// cache backed by a functional storage facility, since that is where -// the challenge state is stored between initiation and solution. -// -// If a request is not an ACME HTTP challenge, h will be invoked. -func (am *ACMEManager) HTTPChallengeHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if am.HandleHTTPChallenge(w, r) { - return - } - h.ServeHTTP(w, r) - }) -} - -// HandleHTTPChallenge uses am to solve challenge requests from an ACME -// server that were initiated by this instance or any other instance in -// this cluster (being, any instances using the same storage am does). -// -// If the HTTP challenge is disabled, this function is a no-op. -// -// If am is nil or if am does not have a certificate cache backed by -// usable storage, solving the HTTP challenge will fail. -// -// It returns true if it handled the request; if so, the response has -// already been written. If false is returned, this call was a no-op and -// the request has not been handled. -func (am *ACMEManager) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool { - if am == nil { - return false - } - if am.DisableHTTPChallenge { - return false - } - if !LooksLikeHTTPChallenge(r) { - return false - } - return am.distributedHTTPChallengeSolver(w, r) -} - -// distributedHTTPChallengeSolver checks to see if this challenge -// request was initiated by this or another instance which uses the -// same storage as am does, and attempts to complete the challenge for -// it. It returns true if the request was handled; false otherwise. -func (am *ACMEManager) distributedHTTPChallengeSolver(w http.ResponseWriter, r *http.Request) bool { - if am == nil { - return false - } - host := hostOnly(r.Host) - chalInfo, distributed, err := am.config.getChallengeInfo(host) - if err != nil { - if am.Logger != nil { - am.Logger.Error("looking up info for HTTP challenge", - zap.String("host", host), - zap.Error(err)) - } - return false - } - return solveHTTPChallenge(am.Logger, w, r, chalInfo.Challenge, distributed) -} - -// solveHTTPChallenge solves the HTTP challenge using the given challenge information. -// If the challenge is being solved in a distributed fahsion, set distributed to true for logging purposes. -// It returns true the properties of the request check out in relation to the HTTP challenge. -// Most of this code borrowed from xenolf's built-in HTTP-01 challenge solver in March 2018. -func solveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge, distributed bool) bool { - challengeReqPath := challenge.HTTP01ResourcePath() - if r.URL.Path == challengeReqPath && - strings.EqualFold(hostOnly(r.Host), challenge.Identifier.Value) && // mitigate DNS rebinding attacks - r.Method == "GET" { - w.Header().Add("Content-Type", "text/plain") - w.Write([]byte(challenge.KeyAuthorization)) - r.Close = true - if logger != nil { - logger.Info("served key authentication", - zap.String("identifier", challenge.Identifier.Value), - zap.String("challenge", "http-01"), - zap.String("remote", r.RemoteAddr), - zap.Bool("distributed", distributed)) - } - return true - } - return false -} - -// SolveHTTPChallenge solves the HTTP challenge. It should be used only on HTTP requests that are -// from ACME servers trying to validate an identifier (i.e. LooksLikeHTTPChallenge() == true). It -// returns true if the request criteria check out and it answered with key authentication, in which -// case no further handling of the request is necessary. -func SolveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge) bool { - return solveHTTPChallenge(logger, w, r, challenge, false) -} - -// LooksLikeHTTPChallenge returns true if r looks like an ACME -// HTTP challenge request from an ACME server. -func LooksLikeHTTPChallenge(r *http.Request) bool { - return r.Method == "GET" && strings.HasPrefix(r.URL.Path, challengeBasePath) -} - -const challengeBasePath = "/.well-known/acme-challenge" diff --git a/vendor/github.com/caddyserver/certmagic/maintain.go b/vendor/github.com/caddyserver/certmagic/maintain.go deleted file mode 100644 index 63d475ce..00000000 --- a/vendor/github.com/caddyserver/certmagic/maintain.go +++ /dev/null @@ -1,648 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "context" - "crypto/x509" - "encoding/pem" - "fmt" - "log" - "path" - "runtime" - "strings" - "time" - - "github.com/mholt/acmez/acme" - "go.uber.org/zap" - "golang.org/x/crypto/ocsp" -) - -// maintainAssets is a permanently-blocking function -// that loops indefinitely and, on a regular schedule, checks -// certificates for expiration and initiates a renewal of certs -// that are expiring soon. It also updates OCSP stapling. It -// should only be called once per cache. Panics are recovered, -// and if panicCount < 10, the function is called recursively, -// incrementing panicCount each time. Initial invocation should -// start panicCount at 0. -func (certCache *Cache) maintainAssets(panicCount int) { - log := loggerNamed(certCache.logger, "maintenance") - if log != nil { - log = log.With(zap.String("cache", fmt.Sprintf("%p", certCache))) - } - - defer func() { - if err := recover(); err != nil { - buf := make([]byte, stackTraceBufferSize) - buf = buf[:runtime.Stack(buf, false)] - if log != nil { - log.Error("panic", zap.Any("error", err), zap.ByteString("stack", buf)) - } - if panicCount < 10 { - certCache.maintainAssets(panicCount + 1) - } - } - }() - - renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval) - ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval) - - if log != nil { - log.Info("started background certificate maintenance") - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for { - select { - case <-renewalTicker.C: - err := certCache.RenewManagedCertificates(ctx) - if err != nil && log != nil { - log.Error("renewing managed certificates", zap.Error(err)) - } - case <-ocspTicker.C: - certCache.updateOCSPStaples(ctx) - case <-certCache.stopChan: - renewalTicker.Stop() - ocspTicker.Stop() - // TODO: stop any in-progress maintenance operations and clear locks we made (this might be done now with our use of context) - if log != nil { - log.Info("stopped background certificate maintenance") - } - close(certCache.doneChan) - return - } - } -} - -// RenewManagedCertificates renews managed certificates, -// including ones loaded on-demand. Note that this is done -// automatically on a regular basis; normally you will not -// need to call this. This method assumes non-interactive -// mode (i.e. operating in the background). -func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error { - log := loggerNamed(certCache.logger, "maintenance") - - // configs will hold a map of certificate name to the config - // to use when managing that certificate - configs := make(map[string]*Config) - - // we use the queues for a very important reason: to do any and all - // operations that could require an exclusive write lock outside - // of the read lock! otherwise we get a deadlock, yikes. in other - // words, our first iteration through the certificate cache does NOT - // perform any operations--only queues them--so that more fine-grained - // write locks may be obtained during the actual operations. - var renewQueue, reloadQueue, deleteQueue []Certificate - - certCache.mu.RLock() - for certKey, cert := range certCache.cache { - if !cert.managed { - continue - } - - // the list of names on this cert should never be empty... programmer error? - if cert.Names == nil || len(cert.Names) == 0 { - if log != nil { - log.Warn("certificate has no names; removing from cache", zap.String("cert_key", certKey)) - } - deleteQueue = append(deleteQueue, cert) - continue - } - - // get the config associated with this certificate - cfg, err := certCache.getConfig(cert) - if err != nil { - if log != nil { - log.Error("unable to get configuration to manage certificate; unable to renew", - zap.Strings("identifiers", cert.Names), - zap.Error(err)) - } - continue - } - if cfg == nil { - // this is bad if this happens, probably a programmer error (oops) - if log != nil { - log.Error("no configuration associated with certificate; unable to manage", - zap.Strings("identifiers", cert.Names)) - } - continue - } - if cfg.OnDemand != nil { - continue - } - - // if time is up or expires soon, we need to try to renew it - if cert.NeedsRenewal(cfg) { - configs[cert.Names[0]] = cfg - - // see if the certificate in storage has already been renewed, possibly by another - // instance that didn't coordinate with this one; if so, just load it (this - // might happen if another instance already renewed it - kinda sloppy but checking disk - // first is a simple way to possibly drastically reduce rate limit problems) - storedCertExpiring, err := cfg.managedCertInStorageExpiresSoon(cert) - if err != nil { - // hmm, weird, but not a big deal, maybe it was deleted or something - if log != nil { - log.Warn("error while checking if stored certificate is also expiring soon", - zap.Strings("identifiers", cert.Names), - zap.Error(err)) - } - } else if !storedCertExpiring { - // if the certificate is NOT expiring soon and there was no error, then we - // are good to just reload the certificate from storage instead of repeating - // a likely-unnecessary renewal procedure - reloadQueue = append(reloadQueue, cert) - continue - } - - // the certificate in storage has not been renewed yet, so we will do it - // NOTE: It is super-important to note that the TLS-ALPN challenge requires - // a write lock on the cache in order to complete its challenge, so it is extra - // vital that this renew operation does not happen inside our read lock! - renewQueue = append(renewQueue, cert) - } - } - certCache.mu.RUnlock() - - // Reload certificates that merely need to be updated in memory - for _, oldCert := range reloadQueue { - timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC()) - if log != nil { - log.Info("certificate expires soon, but is already renewed in storage; reloading stored certificate", - zap.Strings("identifiers", oldCert.Names), - zap.Duration("remaining", timeLeft)) - } - - cfg := configs[oldCert.Names[0]] - - // crucially, this happens OUTSIDE a lock on the certCache - err := cfg.reloadManagedCertificate(oldCert) - if err != nil { - if log != nil { - log.Error("loading renewed certificate", - zap.Strings("identifiers", oldCert.Names), - zap.Error(err)) - } - continue - } - } - - // Renewal queue - for _, oldCert := range renewQueue { - cfg := configs[oldCert.Names[0]] - err := certCache.queueRenewalTask(ctx, oldCert, cfg) - if err != nil { - if log != nil { - log.Error("queueing renewal task", - zap.Strings("identifiers", oldCert.Names), - zap.Error(err)) - } - continue - } - } - - // Deletion queue - certCache.mu.Lock() - for _, cert := range deleteQueue { - certCache.removeCertificate(cert) - } - certCache.mu.Unlock() - - return nil -} - -func (certCache *Cache) queueRenewalTask(ctx context.Context, oldCert Certificate, cfg *Config) error { - log := loggerNamed(certCache.logger, "maintenance") - - timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC()) - if log != nil { - log.Info("certificate expires soon; queuing for renewal", - zap.Strings("identifiers", oldCert.Names), - zap.Duration("remaining", timeLeft)) - } - - // Get the name which we should use to renew this certificate; - // we only support managing certificates with one name per cert, - // so this should be easy. - renewName := oldCert.Names[0] - - // queue up this renewal job (is a no-op if already active or queued) - jm.Submit(cfg.Logger, "renew_"+renewName, func() error { - timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC()) - if log != nil { - log.Info("attempting certificate renewal", - zap.Strings("identifiers", oldCert.Names), - zap.Duration("remaining", timeLeft)) - } - - // perform renewal - crucially, this happens OUTSIDE a lock on certCache - err := cfg.RenewCertAsync(ctx, renewName, false) - if err != nil { - if cfg.OnDemand != nil { - // loaded dynamically, remove dynamically - certCache.mu.Lock() - certCache.removeCertificate(oldCert) - certCache.mu.Unlock() - } - return fmt.Errorf("%v %v", oldCert.Names, err) - } - - // successful renewal, so update in-memory cache by loading - // renewed certificate so it will be used with handshakes - err = cfg.reloadManagedCertificate(oldCert) - if err != nil { - return ErrNoRetry{fmt.Errorf("%v %v", oldCert.Names, err)} - } - return nil - }) - - return nil -} - -// updateOCSPStaples updates the OCSP stapling in all -// eligible, cached certificates. -// -// OCSP maintenance strives to abide the relevant points on -// Ryan Sleevi's recommendations for good OCSP support: -// https://gist.github.com/sleevi/5efe9ef98961ecfb4da8 -func (certCache *Cache) updateOCSPStaples(ctx context.Context) { - logger := loggerNamed(certCache.logger, "maintenance") - - // temporary structures to store updates or tasks - // so that we can keep our locks short-lived - type ocspUpdate struct { - rawBytes []byte - parsed *ocsp.Response - } - type updateQueueEntry struct { - cert Certificate - certHash string - lastNextUpdate time.Time - } - type renewQueueEntry struct { - oldCert Certificate - ocspResp *ocsp.Response - } - updated := make(map[string]ocspUpdate) - var updateQueue []updateQueueEntry // certs that need a refreshed staple - var renewQueue []renewQueueEntry // certs that need to be renewed (due to revocation) - configs := make(map[string]*Config) - - // obtain brief read lock during our scan to see which staples need updating - certCache.mu.RLock() - for certHash, cert := range certCache.cache { - // no point in updating OCSP for expired or "synthetic" certificates - if cert.Leaf == nil || cert.Expired() { - continue - } - var lastNextUpdate time.Time - if cert.ocsp != nil { - lastNextUpdate = cert.ocsp.NextUpdate - if freshOCSP(cert.ocsp) { - continue // no need to update staple if ours is still fresh - } - } - updateQueue = append(updateQueue, updateQueueEntry{cert, certHash, lastNextUpdate}) - } - certCache.mu.RUnlock() - - // perform updates outside of any lock on certCache - for _, qe := range updateQueue { - cert := qe.cert - certHash := qe.certHash - lastNextUpdate := qe.lastNextUpdate - - cfg, err := certCache.getConfig(cert) - if err != nil { - if logger != nil { - logger.Error("unable to refresh OCSP staple because getting automation config for certificate failed", - zap.Strings("identifiers", cert.Names), - zap.Error(err)) - } - continue - } - if cfg == nil { - // this is bad if this happens, probably a programmer error (oops) - if logger != nil { - logger.Error("no configuration associated with certificate; unable to manage OCSP staples", - zap.Strings("identifiers", cert.Names)) - } - continue - } - - ocspResp, err := stapleOCSP(cfg.OCSP, cfg.Storage, &cert, nil) - if err != nil || ocspResp == nil { - if cert.ocsp != nil { - // if there was no staple before, that's fine; otherwise we should log the error - if logger != nil { - logger.Error("stapling OCSP", - zap.Strings("identifiers", cert.Names), - zap.Error(err)) - } - } - continue - } - - // By this point, we've obtained the latest OCSP response. - // If there was no staple before, or if the response is updated, make - // sure we apply the update to all names on the certificate. - if cert.ocsp != nil && (lastNextUpdate.IsZero() || lastNextUpdate != cert.ocsp.NextUpdate) { - if logger != nil { - logger.Info("advancing OCSP staple", - zap.Strings("identifiers", cert.Names), - zap.Time("from", lastNextUpdate), - zap.Time("to", cert.ocsp.NextUpdate)) - } - updated[certHash] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.ocsp} - } - - // If a managed certificate was revoked, we should attempt to replace it with a new one. - if cert.managed && ocspResp.Status == ocsp.Revoked && len(cert.Names) > 0 { - renewQueue = append(renewQueue, renewQueueEntry{ - oldCert: cert, - ocspResp: ocspResp, - }) - configs[cert.Names[0]] = cfg - } - } - - // These write locks should be brief since we have all the info we need now. - for certKey, update := range updated { - certCache.mu.Lock() - cert := certCache.cache[certKey] - cert.ocsp = update.parsed - cert.Certificate.OCSPStaple = update.rawBytes - certCache.cache[certKey] = cert - certCache.mu.Unlock() - } - - // We attempt to replace any certificates that were revoked. - // Crucially, this happens OUTSIDE a lock on the certCache. - for _, renew := range renewQueue { - if logger != nil { - logger.Warn("OCSP status for managed certificate is REVOKED; attempting to replace with new certificate", - zap.Strings("identifiers", renew.oldCert.Names), - zap.Time("expiration", renew.oldCert.Leaf.NotAfter)) - } - - renewName := renew.oldCert.Names[0] - cfg := configs[renewName] - - // if revoked for key compromise, we can't be sure whether the storage of - // the key is still safe; however, we KNOW the old key is not safe, and we - // can only hope by the time of revocation that storage has been secured; - // key management is not something we want to get into, but in this case - // it seems prudent to replace the key - and since renewal requires reuse - // of a prior key, we can't do a "renew" to replace the cert if we need a - // new key, so we'll have to do an obtain instead - var obtainInsteadOfRenew bool - if renew.ocspResp.RevocationReason == acme.ReasonKeyCompromise { - err := cfg.moveCompromisedPrivateKey(renew.oldCert, logger) - if err != nil && logger != nil { - logger.Error("could not remove compromised private key from use", - zap.Strings("identifiers", renew.oldCert.Names), - zap.String("issuer", renew.oldCert.issuerKey), - zap.Error(err)) - } - obtainInsteadOfRenew = true - } - - var err error - if obtainInsteadOfRenew { - err = cfg.ObtainCertAsync(ctx, renewName) - } else { - // notice that we force renewal; otherwise, it might see that the - // certificate isn't close to expiring and return, but we really - // need a replacement certificate! see issue #4191 - err = cfg.RenewCertAsync(ctx, renewName, true) - } - if err != nil { - // probably better to not serve a revoked certificate at all - if logger != nil { - logger.Error("unable to obtain new to certificate after OCSP status of REVOKED; removing from cache", - zap.Strings("identifiers", renew.oldCert.Names), - zap.Error(err)) - } - certCache.mu.Lock() - certCache.removeCertificate(renew.oldCert) - certCache.mu.Unlock() - continue - } - err = cfg.reloadManagedCertificate(renew.oldCert) - if err != nil { - if logger != nil { - logger.Error("after obtaining new certificate due to OCSP status of REVOKED", - zap.Strings("identifiers", renew.oldCert.Names), - zap.Error(err)) - } - continue - } - } -} - -// CleanStorageOptions specifies how to clean up a storage unit. -type CleanStorageOptions struct { - OCSPStaples bool - ExpiredCerts bool - ExpiredCertGracePeriod time.Duration -} - -// CleanStorage removes assets which are no longer useful, -// according to opts. -func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions) { - if opts.OCSPStaples { - err := deleteOldOCSPStaples(ctx, storage) - if err != nil { - log.Printf("[ERROR] Deleting old OCSP staples: %v", err) - } - } - if opts.ExpiredCerts { - err := deleteExpiredCerts(ctx, storage, opts.ExpiredCertGracePeriod) - if err != nil { - log.Printf("[ERROR] Deleting expired certificates: %v", err) - } - } - // TODO: delete stale locks? -} - -func deleteOldOCSPStaples(ctx context.Context, storage Storage) error { - ocspKeys, err := storage.List(prefixOCSP, false) - if err != nil { - // maybe just hasn't been created yet; no big deal - return nil - } - for _, key := range ocspKeys { - // if context was cancelled, quit early; otherwise proceed - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - ocspBytes, err := storage.Load(key) - if err != nil { - log.Printf("[ERROR] While deleting old OCSP staples, unable to load staple file: %v", err) - continue - } - resp, err := ocsp.ParseResponse(ocspBytes, nil) - if err != nil { - // contents are invalid; delete it - err = storage.Delete(key) - if err != nil { - log.Printf("[ERROR] Purging corrupt staple file %s: %v", key, err) - } - continue - } - if time.Now().After(resp.NextUpdate) { - // response has expired; delete it - err = storage.Delete(key) - if err != nil { - log.Printf("[ERROR] Purging expired staple file %s: %v", key, err) - } - } - } - return nil -} - -func deleteExpiredCerts(ctx context.Context, storage Storage, gracePeriod time.Duration) error { - issuerKeys, err := storage.List(prefixCerts, false) - if err != nil { - // maybe just hasn't been created yet; no big deal - return nil - } - - for _, issuerKey := range issuerKeys { - siteKeys, err := storage.List(issuerKey, false) - if err != nil { - log.Printf("[ERROR] Listing contents of %s: %v", issuerKey, err) - continue - } - - for _, siteKey := range siteKeys { - // if context was cancelled, quit early; otherwise proceed - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - siteAssets, err := storage.List(siteKey, false) - if err != nil { - log.Printf("[ERROR] Listing contents of %s: %v", siteKey, err) - continue - } - - for _, assetKey := range siteAssets { - if path.Ext(assetKey) != ".crt" { - continue - } - - certFile, err := storage.Load(assetKey) - if err != nil { - return fmt.Errorf("loading certificate file %s: %v", assetKey, err) - } - block, _ := pem.Decode(certFile) - if block == nil || block.Type != "CERTIFICATE" { - return fmt.Errorf("certificate file %s does not contain PEM-encoded certificate", assetKey) - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return fmt.Errorf("certificate file %s is malformed; error parsing PEM: %v", assetKey, err) - } - - if expiredTime := time.Since(cert.NotAfter); expiredTime >= gracePeriod { - log.Printf("[INFO] Certificate %s expired %s ago; cleaning up", assetKey, expiredTime) - baseName := strings.TrimSuffix(assetKey, ".crt") - for _, relatedAsset := range []string{ - assetKey, - baseName + ".key", - baseName + ".json", - } { - log.Printf("[INFO] Deleting %s because resource expired", relatedAsset) - err := storage.Delete(relatedAsset) - if err != nil { - log.Printf("[ERROR] Cleaning up asset related to expired certificate for %s: %s: %v", - baseName, relatedAsset, err) - } - } - } - } - - // update listing; if folder is empty, delete it - siteAssets, err = storage.List(siteKey, false) - if err != nil { - continue - } - if len(siteAssets) == 0 { - log.Printf("[INFO] Deleting %s because key is empty", siteKey) - err := storage.Delete(siteKey) - if err != nil { - return fmt.Errorf("deleting empty site folder %s: %v", siteKey, err) - } - } - } - } - return nil -} - -// moveCompromisedPrivateKey moves the private key for cert to a ".compromised" file -// by copying the data to the new file, then deleting the old one. -func (cfg *Config) moveCompromisedPrivateKey(cert Certificate, logger *zap.Logger) error { - privKeyStorageKey := StorageKeys.SitePrivateKey(cert.issuerKey, cert.Names[0]) - - privKeyPEM, err := cfg.Storage.Load(privKeyStorageKey) - if err != nil { - return err - } - - compromisedPrivKeyStorageKey := privKeyStorageKey + ".compromised" - err = cfg.Storage.Store(compromisedPrivKeyStorageKey, privKeyPEM) - if err != nil { - // better safe than sorry: as a last resort, try deleting the key so it won't be reused - cfg.Storage.Delete(privKeyStorageKey) - return err - } - - err = cfg.Storage.Delete(privKeyStorageKey) - if err != nil { - return err - } - - logger.Info("removed certificate's compromised private key from use", - zap.String("storage_path", compromisedPrivKeyStorageKey), - zap.Strings("identifiers", cert.Names), - zap.String("issuer", cert.issuerKey)) - - return nil -} - -const ( - // DefaultRenewCheckInterval is how often to check certificates for expiration. - // Scans are very lightweight, so this can be semi-frequent. This default should - // be smaller than *DefaultRenewalWindowRatio/3, which - // gives certificates plenty of chance to be renewed on time. - DefaultRenewCheckInterval = 10 * time.Minute - - // DefaultRenewalWindowRatio is how much of a certificate's lifetime becomes the - // renewal window. The renewal window is the span of time at the end of the - // certificate's validity period in which it should be renewed. A default value - // of ~1/3 is pretty safe and recommended for most certificates. - DefaultRenewalWindowRatio = 1.0 / 3.0 - - // DefaultOCSPCheckInterval is how often to check if OCSP stapling needs updating. - DefaultOCSPCheckInterval = 1 * time.Hour -) diff --git a/vendor/github.com/caddyserver/certmagic/ocsp.go b/vendor/github.com/caddyserver/certmagic/ocsp.go deleted file mode 100644 index 4a21546d..00000000 --- a/vendor/github.com/caddyserver/certmagic/ocsp.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "bytes" - "crypto/x509" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "time" - - "golang.org/x/crypto/ocsp" -) - -// stapleOCSP staples OCSP information to cert for hostname name. -// If you have it handy, you should pass in the PEM-encoded certificate -// bundle; otherwise the DER-encoded cert will have to be PEM-encoded. -// If you don't have the PEM blocks already, just pass in nil. -// -// Errors here are not necessarily fatal, it could just be that the -// certificate doesn't have an issuer URL. This function may return -// both nil values if OCSP stapling is disabled according to ocspConfig. -// -// If a status was received, it returns that status. Note that the -// returned status is not always stapled to the certificate. -func stapleOCSP(ocspConfig OCSPConfig, storage Storage, cert *Certificate, pemBundle []byte) (*ocsp.Response, error) { - if ocspConfig.DisableStapling { - return nil, nil - } - - if pemBundle == nil { - // we need a PEM encoding only for some function calls below - bundle := new(bytes.Buffer) - for _, derBytes := range cert.Certificate.Certificate { - pem.Encode(bundle, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - } - pemBundle = bundle.Bytes() - } - - var ocspBytes []byte - var ocspResp *ocsp.Response - var ocspErr error - var gotNewOCSP bool - - // First try to load OCSP staple from storage and see if - // we can still use it. - ocspStapleKey := StorageKeys.OCSPStaple(cert, pemBundle) - cachedOCSP, err := storage.Load(ocspStapleKey) - if err == nil { - resp, err := ocsp.ParseResponse(cachedOCSP, nil) - if err == nil { - if freshOCSP(resp) { - // staple is still fresh; use it - ocspBytes = cachedOCSP - ocspResp = resp - } - } else { - // invalid contents; delete the file - // (we do this independently of the maintenance routine because - // in this case we know for sure this should be a staple file - // because we loaded it by name, whereas the maintenance routine - // just iterates the list of files, even if somehow a non-staple - // file gets in the folder. in this case we are sure it is corrupt.) - err := storage.Delete(ocspStapleKey) - if err != nil { - log.Printf("[WARNING] Unable to delete invalid OCSP staple file: %v", err) - } - } - } - - // If we couldn't get a fresh staple by reading the cache, - // then we need to request it from the OCSP responder - if ocspResp == nil || len(ocspBytes) == 0 { - ocspBytes, ocspResp, ocspErr = getOCSPForCert(ocspConfig, pemBundle) - if ocspErr != nil { - // An error here is not a problem because a certificate may simply - // not contain a link to an OCSP server. But we should log it anyway. - // There's nothing else we can do to get OCSP for this certificate, - // so we can return here with the error. - return nil, fmt.Errorf("no OCSP stapling for %v: %v", cert.Names, ocspErr) - } - gotNewOCSP = true - } - - // By now, we should have a response. If good, staple it to - // the certificate. If the OCSP response was not loaded from - // storage, we persist it for next time. - if ocspResp.Status == ocsp.Good { - if ocspResp.NextUpdate.After(cert.Leaf.NotAfter) { - // uh oh, this OCSP response expires AFTER the certificate does, that's kinda bogus. - // it was the reason a lot of Symantec-validated sites (not Caddy) went down - // in October 2017. https://twitter.com/mattiasgeniar/status/919432824708648961 - return ocspResp, fmt.Errorf("invalid: OCSP response for %v valid after certificate expiration (%s)", - cert.Names, cert.Leaf.NotAfter.Sub(ocspResp.NextUpdate)) - } - cert.Certificate.OCSPStaple = ocspBytes - cert.ocsp = ocspResp - if gotNewOCSP { - err := storage.Store(ocspStapleKey, ocspBytes) - if err != nil { - return ocspResp, fmt.Errorf("unable to write OCSP staple file for %v: %v", cert.Names, err) - } - } - } - - return ocspResp, nil -} - -// getOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, -// the parsed response, and an error, if any. The returned []byte can be passed directly -// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the -// issued certificate, this function will try to get the issuer certificate from the -// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return -// values are nil, the OCSP status may be assumed OCSPUnknown. -// -// Borrowed from xenolf. -func getOCSPForCert(ocspConfig OCSPConfig, bundle []byte) ([]byte, *ocsp.Response, error) { - // TODO: Perhaps this should be synchronized too, with a Locker? - - certificates, err := parseCertsFromPEMBundle(bundle) - if err != nil { - return nil, nil, err - } - - // We expect the certificate slice to be ordered downwards the chain. - // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, - // which should always be the first two certificates. If there's no - // OCSP server listed in the leaf cert, there's nothing to do. And if - // we have only one certificate so far, we need to get the issuer cert. - issuedCert := certificates[0] - if len(issuedCert.OCSPServer) == 0 { - return nil, nil, fmt.Errorf("no OCSP server specified in certificate") - } - - // apply override for responder URL - respURL := issuedCert.OCSPServer[0] - if len(ocspConfig.ResponderOverrides) > 0 { - if override, ok := ocspConfig.ResponderOverrides[respURL]; ok { - respURL = override - } - } - if respURL == "" { - return nil, nil, fmt.Errorf("override disables querying OCSP responder: %v", issuedCert.OCSPServer[0]) - } - - if len(certificates) == 1 { - if len(issuedCert.IssuingCertificateURL) == 0 { - return nil, nil, fmt.Errorf("no URL to issuing certificate") - } - - resp, err := http.Get(issuedCert.IssuingCertificateURL[0]) - if err != nil { - return nil, nil, fmt.Errorf("getting issuer certificate: %v", err) - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, nil, fmt.Errorf("reading issuer certificate: %v", err) - } - - issuerCert, err := x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, nil, fmt.Errorf("parsing issuer certificate: %v", err) - } - - // insert it into the slice on position 0; - // we want it ordered right SRV CRT -> CA - certificates = append(certificates, issuerCert) - } - - issuerCert := certificates[1] - - ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) - if err != nil { - return nil, nil, fmt.Errorf("creating OCSP request: %v", err) - } - - reader := bytes.NewReader(ocspReq) - req, err := http.Post(respURL, "application/ocsp-request", reader) - if err != nil { - return nil, nil, fmt.Errorf("making OCSP request: %v", err) - } - defer req.Body.Close() - - ocspResBytes, err := ioutil.ReadAll(io.LimitReader(req.Body, 1024*1024)) - if err != nil { - return nil, nil, fmt.Errorf("reading OCSP response: %v", err) - } - - ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) - if err != nil { - return nil, nil, fmt.Errorf("parsing OCSP response: %v", err) - } - - return ocspResBytes, ocspRes, nil -} - -// freshOCSP returns true if resp is still fresh, -// meaning that it is not expedient to get an -// updated response from the OCSP server. -func freshOCSP(resp *ocsp.Response) bool { - nextUpdate := resp.NextUpdate - // If there is an OCSP responder certificate, and it expires before the - // OCSP response, use its expiration date as the end of the OCSP - // response's validity period. - if resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) { - nextUpdate = resp.Certificate.NotAfter - } - // start checking OCSP staple about halfway through validity period for good measure - refreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2) - return time.Now().Before(refreshTime) -} diff --git a/vendor/github.com/caddyserver/certmagic/ratelimiter.go b/vendor/github.com/caddyserver/certmagic/ratelimiter.go deleted file mode 100644 index 6a3b7b18..00000000 --- a/vendor/github.com/caddyserver/certmagic/ratelimiter.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "context" - "log" - "runtime" - "sync" - "time" -) - -// NewRateLimiter returns a rate limiter that allows up to maxEvents -// in a sliding window of size window. If maxEvents and window are -// both 0, or if maxEvents is non-zero and window is 0, rate limiting -// is disabled. This function panics if maxEvents is less than 0 or -// if maxEvents is 0 and window is non-zero, which is considered to be -// an invalid configuration, as it would never allow events. -func NewRateLimiter(maxEvents int, window time.Duration) *RingBufferRateLimiter { - if maxEvents < 0 { - panic("maxEvents cannot be less than zero") - } - if maxEvents == 0 && window != 0 { - panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events") - } - rbrl := &RingBufferRateLimiter{ - window: window, - ring: make([]time.Time, maxEvents), - started: make(chan struct{}), - stopped: make(chan struct{}), - ticket: make(chan struct{}), - } - go rbrl.loop() - <-rbrl.started // make sure loop is ready to receive before we return - return rbrl -} - -// RingBufferRateLimiter uses a ring to enforce rate limits -// consisting of a maximum number of events within a single -// sliding window of a given duration. An empty value is -// not valid; use NewRateLimiter to get one. -type RingBufferRateLimiter struct { - window time.Duration - ring []time.Time // maxEvents == len(ring) - cursor int // always points to the oldest timestamp - mu sync.Mutex // protects ring, cursor, and window - started chan struct{} - stopped chan struct{} - ticket chan struct{} -} - -// Stop cleans up r's scheduling goroutine. -func (r *RingBufferRateLimiter) Stop() { - close(r.stopped) -} - -func (r *RingBufferRateLimiter) loop() { - defer func() { - if err := recover(); err != nil { - buf := make([]byte, stackTraceBufferSize) - buf = buf[:runtime.Stack(buf, false)] - log.Printf("panic: ring buffer rate limiter: %v\n%s", err, buf) - } - }() - - for { - // if we've been stopped, return - select { - case <-r.stopped: - return - default: - } - - if len(r.ring) == 0 { - if r.window == 0 { - // rate limiting is disabled; always allow immediately - r.permit() - continue - } - panic("invalid configuration: maxEvents = 0 and window != 0 does not allow any events") - } - - // wait until next slot is available or until we've been stopped - r.mu.Lock() - then := r.ring[r.cursor].Add(r.window) - r.mu.Unlock() - waitDuration := time.Until(then) - waitTimer := time.NewTimer(waitDuration) - select { - case <-waitTimer.C: - r.permit() - case <-r.stopped: - waitTimer.Stop() - return - } - } -} - -// Allow returns true if the event is allowed to -// happen right now. It does not wait. If the event -// is allowed, a ticket is claimed. -func (r *RingBufferRateLimiter) Allow() bool { - select { - case <-r.ticket: - return true - default: - return false - } -} - -// Wait blocks until the event is allowed to occur. It returns an -// error if the context is cancelled. -func (r *RingBufferRateLimiter) Wait(ctx context.Context) error { - select { - case <-ctx.Done(): - return context.Canceled - case <-r.ticket: - return nil - } -} - -// MaxEvents returns the maximum number of events that -// are allowed within the sliding window. -func (r *RingBufferRateLimiter) MaxEvents() int { - r.mu.Lock() - defer r.mu.Unlock() - return len(r.ring) -} - -// SetMaxEvents changes the maximum number of events that are -// allowed in the sliding window. If the new limit is lower, -// the oldest events will be forgotten. If the new limit is -// higher, the window will suddenly have capacity for new -// reservations. It panics if maxEvents is 0 and window size -// is not zero. -func (r *RingBufferRateLimiter) SetMaxEvents(maxEvents int) { - newRing := make([]time.Time, maxEvents) - r.mu.Lock() - defer r.mu.Unlock() - - if r.window != 0 && maxEvents == 0 { - panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events") - } - - // only make the change if the new limit is different - if maxEvents == len(r.ring) { - return - } - - // the new ring may be smaller; fast-forward to the - // oldest timestamp that will be kept in the new - // ring so the oldest ones are forgotten and the - // newest ones will be remembered - sizeDiff := len(r.ring) - maxEvents - for i := 0; i < sizeDiff; i++ { - r.advance() - } - - if len(r.ring) > 0 { - // copy timestamps into the new ring until we - // have either copied all of them or have reached - // the capacity of the new ring - startCursor := r.cursor - for i := 0; i < len(newRing); i++ { - newRing[i] = r.ring[r.cursor] - r.advance() - if r.cursor == startCursor { - // new ring is larger than old one; - // "we've come full circle" - break - } - } - } - - r.ring = newRing - r.cursor = 0 -} - -// Window returns the size of the sliding window. -func (r *RingBufferRateLimiter) Window() time.Duration { - r.mu.Lock() - defer r.mu.Unlock() - return r.window -} - -// SetWindow changes r's sliding window duration to window. -// Goroutines that are already blocked on a call to Wait() -// will not be affected. It panics if window is non-zero -// but the max event limit is 0. -func (r *RingBufferRateLimiter) SetWindow(window time.Duration) { - r.mu.Lock() - defer r.mu.Unlock() - if window != 0 && len(r.ring) == 0 { - panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events") - } - r.window = window -} - -// permit allows one event through the throttle. This method -// blocks until a goroutine is waiting for a ticket or until -// the rate limiter is stopped. -func (r *RingBufferRateLimiter) permit() { - for { - select { - case r.started <- struct{}{}: - // notify parent goroutine that we've started; should - // only happen once, before constructor returns - continue - case <-r.stopped: - return - case r.ticket <- struct{}{}: - r.mu.Lock() - defer r.mu.Unlock() - if len(r.ring) > 0 { - r.ring[r.cursor] = time.Now() - r.advance() - } - return - } - } -} - -// advance moves the cursor to the next position. -// It is NOT safe for concurrent use, so it must -// be called inside a lock on r.mu. -func (r *RingBufferRateLimiter) advance() { - r.cursor++ - if r.cursor >= len(r.ring) { - r.cursor = 0 - } -} diff --git a/vendor/github.com/caddyserver/certmagic/solvers.go b/vendor/github.com/caddyserver/certmagic/solvers.go deleted file mode 100644 index 8cdaeaf8..00000000 --- a/vendor/github.com/caddyserver/certmagic/solvers.go +++ /dev/null @@ -1,686 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "log" - "net" - "net/http" - "path" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/libdns/libdns" - "github.com/mholt/acmez" - "github.com/mholt/acmez/acme" - "github.com/miekg/dns" -) - -// httpSolver solves the HTTP challenge. It must be -// associated with a config and an address to use -// for solving the challenge. If multiple httpSolvers -// are initialized concurrently, the first one to -// begin will start the server, and the last one to -// finish will stop the server. This solver must be -// wrapped by a distributedSolver to work properly, -// because the only way the HTTP challenge handler -// can access the keyAuth material is by loading it -// from storage, which is done by distributedSolver. -type httpSolver struct { - closed int32 // accessed atomically - acmeManager *ACMEManager - address string -} - -// Present starts an HTTP server if none is already listening on s.address. -func (s *httpSolver) Present(ctx context.Context, _ acme.Challenge) error { - solversMu.Lock() - defer solversMu.Unlock() - - si := getSolverInfo(s.address) - si.count++ - if si.listener != nil { - return nil // already be served by us - } - - // notice the unusual error handling here; we - // only continue to start a challenge server if - // we got a listener; in all other cases return - ln, err := robustTryListen(s.address) - if ln == nil { - return err - } - - // successfully bound socket, so save listener and start key auth HTTP server - si.listener = ln - go s.serve(si) - - return nil -} - -// serve is an HTTP server that serves only HTTP challenge responses. -func (s *httpSolver) serve(si *solverInfo) { - defer func() { - if err := recover(); err != nil { - buf := make([]byte, stackTraceBufferSize) - buf = buf[:runtime.Stack(buf, false)] - log.Printf("panic: http solver server: %v\n%s", err, buf) - } - }() - defer close(si.done) - httpServer := &http.Server{Handler: s.acmeManager.HTTPChallengeHandler(http.NewServeMux())} - httpServer.SetKeepAlivesEnabled(false) - err := httpServer.Serve(si.listener) - if err != nil && atomic.LoadInt32(&s.closed) != 1 { - log.Printf("[ERROR] key auth HTTP server: %v", err) - } -} - -// CleanUp cleans up the HTTP server if it is the last one to finish. -func (s *httpSolver) CleanUp(ctx context.Context, _ acme.Challenge) error { - solversMu.Lock() - defer solversMu.Unlock() - si := getSolverInfo(s.address) - si.count-- - if si.count == 0 { - // last one out turns off the lights - atomic.StoreInt32(&s.closed, 1) - if si.listener != nil { - si.listener.Close() - <-si.done - } - delete(solvers, s.address) - } - return nil -} - -// tlsALPNSolver is a type that can solve TLS-ALPN challenges. -// It must have an associated config and address on which to -// serve the challenge. -type tlsALPNSolver struct { - config *Config - address string -} - -// Present adds the certificate to the certificate cache and, if -// needed, starts a TLS server for answering TLS-ALPN challenges. -func (s *tlsALPNSolver) Present(ctx context.Context, chal acme.Challenge) error { - // we pre-generate the certificate for efficiency with multi-perspective - // validation, so it only has to be done once (at least, by this instance; - // distributed solving does not have that luxury, oh well) - update the - // challenge data in memory to be the generated certificate - cert, err := acmez.TLSALPN01ChallengeCert(chal) - if err != nil { - return err - } - - key := challengeKey(chal) - activeChallengesMu.Lock() - chalData := activeChallenges[key] - chalData.data = cert - activeChallenges[key] = chalData - activeChallengesMu.Unlock() - - // the rest of this function increments the - // challenge count for the solver at this - // listener address, and if necessary, starts - // a simple TLS server - - solversMu.Lock() - defer solversMu.Unlock() - - si := getSolverInfo(s.address) - si.count++ - if si.listener != nil { - return nil // already be served by us - } - - // notice the unusual error handling here; we - // only continue to start a challenge server if - // we got a listener; in all other cases return - ln, err := robustTryListen(s.address) - if ln == nil { - return err - } - - // we were able to bind the socket, so make it into a TLS - // listener, store it with the solverInfo, and start the - // challenge server - - si.listener = tls.NewListener(ln, s.config.TLSConfig()) - - go func() { - defer func() { - if err := recover(); err != nil { - buf := make([]byte, stackTraceBufferSize) - buf = buf[:runtime.Stack(buf, false)] - log.Printf("panic: tls-alpn solver server: %v\n%s", err, buf) - } - }() - defer close(si.done) - for { - conn, err := si.listener.Accept() - if err != nil { - if atomic.LoadInt32(&si.closed) == 1 { - return - } - log.Printf("[ERROR] TLS-ALPN challenge server: accept: %v", err) - continue - } - go s.handleConn(conn) - } - }() - - return nil -} - -// handleConn completes the TLS handshake and then closes conn. -func (*tlsALPNSolver) handleConn(conn net.Conn) { - defer func() { - if err := recover(); err != nil { - buf := make([]byte, stackTraceBufferSize) - buf = buf[:runtime.Stack(buf, false)] - log.Printf("panic: tls-alpn solver handler: %v\n%s", err, buf) - } - }() - defer conn.Close() - tlsConn, ok := conn.(*tls.Conn) - if !ok { - log.Printf("[ERROR] TLS-ALPN challenge server: expected tls.Conn but got %T: %#v", conn, conn) - return - } - err := tlsConn.Handshake() - if err != nil { - log.Printf("[ERROR] TLS-ALPN challenge server: handshake: %v", err) - return - } -} - -// CleanUp removes the challenge certificate from the cache, and if -// it is the last one to finish, stops the TLS server. -func (s *tlsALPNSolver) CleanUp(ctx context.Context, chal acme.Challenge) error { - solversMu.Lock() - defer solversMu.Unlock() - si := getSolverInfo(s.address) - si.count-- - if si.count == 0 { - // last one out turns off the lights - atomic.StoreInt32(&si.closed, 1) - if si.listener != nil { - si.listener.Close() - <-si.done - } - delete(solvers, s.address) - } - - return nil -} - -// DNS01Solver is a type that makes libdns providers usable -// as ACME dns-01 challenge solvers. -// See https://github.com/libdns/libdns -type DNS01Solver struct { - // The implementation that interacts with the DNS - // provider to set or delete records. (REQUIRED) - DNSProvider ACMEDNSProvider - - // The TTL for the temporary challenge records. - TTL time.Duration - - // Maximum time to wait for temporary record to appear. - PropagationTimeout time.Duration - - // Preferred DNS resolver(s) to use when doing DNS lookups. - Resolvers []string - - txtRecords map[string]dnsPresentMemory // keyed by domain name - txtRecordsMu sync.Mutex -} - -// Present creates the DNS TXT record for the given ACME challenge. -func (s *DNS01Solver) Present(ctx context.Context, challenge acme.Challenge) error { - dnsName := challenge.DNS01TXTRecordName() - keyAuth := challenge.DNS01KeyAuthorization() - - // multiple identifiers can have the same ACME challenge - // domain (e.g. example.com and *.example.com) so we need - // to ensure that we don't solve those concurrently and - // step on each challenges' metaphorical toes; see - // https://github.com/caddyserver/caddy/issues/3474 - activeDNSChallenges.Lock(dnsName) - - zone, err := findZoneByFQDN(dnsName, recursiveNameservers(s.Resolvers)) - if err != nil { - return fmt.Errorf("could not determine zone for domain %q: %v", dnsName, err) - } - - rec := libdns.Record{ - Type: "TXT", - Name: libdns.RelativeName(dnsName+".", zone), - Value: keyAuth, - TTL: s.TTL, - } - - results, err := s.DNSProvider.AppendRecords(ctx, zone, []libdns.Record{rec}) - if err != nil { - return fmt.Errorf("adding temporary record for zone %s: %w", zone, err) - } - if len(results) != 1 { - return fmt.Errorf("expected one record, got %d: %v", len(results), results) - } - - // remember the record and zone we got so we can clean up more efficiently - s.txtRecordsMu.Lock() - if s.txtRecords == nil { - s.txtRecords = make(map[string]dnsPresentMemory) - } - s.txtRecords[dnsName] = dnsPresentMemory{dnsZone: zone, rec: results[0]} - s.txtRecordsMu.Unlock() - - return nil -} - -// Wait blocks until the TXT record created in Present() appears in -// authoritative lookups, i.e. until it has propagated, or until -// timeout, whichever is first. -func (s *DNS01Solver) Wait(ctx context.Context, challenge acme.Challenge) error { - dnsName := challenge.DNS01TXTRecordName() - keyAuth := challenge.DNS01KeyAuthorization() - - timeout := s.PropagationTimeout - if timeout == 0 { - timeout = 2 * time.Minute - } - const interval = 2 * time.Second - - resolvers := recursiveNameservers(s.Resolvers) - - var err error - start := time.Now() - for time.Since(start) < timeout { - select { - case <-time.After(interval): - case <-ctx.Done(): - return ctx.Err() - } - var ready bool - ready, err = checkDNSPropagation(dnsName, keyAuth, resolvers) - if err != nil { - return fmt.Errorf("checking DNS propagation of %s: %w", dnsName, err) - } - if ready { - return nil - } - } - - return fmt.Errorf("timed out waiting for record to fully propagate; verify DNS provider configuration is correct - last error: %v", err) -} - -// CleanUp deletes the DNS TXT record created in Present(). -func (s *DNS01Solver) CleanUp(ctx context.Context, challenge acme.Challenge) error { - dnsName := challenge.DNS01TXTRecordName() - - defer func() { - // always forget about it so we don't leak memory - s.txtRecordsMu.Lock() - delete(s.txtRecords, dnsName) - s.txtRecordsMu.Unlock() - - // always do this last - but always do it! - activeDNSChallenges.Unlock(dnsName) - }() - - // recall the record we created and zone we looked up - s.txtRecordsMu.Lock() - memory, ok := s.txtRecords[dnsName] - if !ok { - s.txtRecordsMu.Unlock() - return fmt.Errorf("no memory of presenting a DNS record for %s (probably OK if presenting failed)", challenge.Identifier.Value) - } - s.txtRecordsMu.Unlock() - - // clean up the record - _, err := s.DNSProvider.DeleteRecords(ctx, memory.dnsZone, []libdns.Record{memory.rec}) - if err != nil { - return fmt.Errorf("deleting temporary record for zone %s: %w", memory.dnsZone, err) - } - - return nil -} - -type dnsPresentMemory struct { - dnsZone string - rec libdns.Record -} - -// ACMEDNSProvider defines the set of operations required for -// ACME challenges. A DNS provider must be able to append and -// delete records in order to solve ACME challenges. Find one -// you can use at https://github.com/libdns. If your provider -// isn't implemented yet, feel free to contribute! -type ACMEDNSProvider interface { - libdns.RecordAppender - libdns.RecordDeleter -} - -// activeDNSChallenges synchronizes DNS challenges for -// names to ensure that challenges for the same ACME -// DNS name do not overlap; for example, the TXT record -// to make for both example.com and *.example.com are -// the same; thus we cannot solve them concurrently. -var activeDNSChallenges = newMapMutex() - -// mapMutex implements named mutexes. -type mapMutex struct { - cond *sync.Cond - set map[interface{}]struct{} -} - -func newMapMutex() *mapMutex { - return &mapMutex{ - cond: sync.NewCond(new(sync.Mutex)), - set: make(map[interface{}]struct{}), - } -} - -func (mmu *mapMutex) Lock(key interface{}) { - mmu.cond.L.Lock() - defer mmu.cond.L.Unlock() - for mmu.locked(key) { - mmu.cond.Wait() - } - mmu.set[key] = struct{}{} -} - -func (mmu *mapMutex) Unlock(key interface{}) { - mmu.cond.L.Lock() - defer mmu.cond.L.Unlock() - delete(mmu.set, key) - mmu.cond.Broadcast() -} - -func (mmu *mapMutex) locked(key interface{}) (ok bool) { - _, ok = mmu.set[key] - return -} - -// distributedSolver allows the ACME HTTP-01 and TLS-ALPN challenges -// to be solved by an instance other than the one which initiated it. -// This is useful behind load balancers or in other cluster/fleet -// configurations. The only requirement is that the instance which -// initiates the challenge shares the same storage and locker with -// the others in the cluster. The storage backing the certificate -// cache in distributedSolver.config is crucial. -// -// Obviously, the instance which completes the challenge must be -// serving on the HTTPChallengePort for the HTTP-01 challenge or the -// TLSALPNChallengePort for the TLS-ALPN-01 challenge (or have all -// the packets port-forwarded) to receive and handle the request. The -// server which receives the challenge must handle it by checking to -// see if the challenge token exists in storage, and if so, decode it -// and use it to serve up the correct response. HTTPChallengeHandler -// in this package as well as the GetCertificate method implemented -// by a Config support and even require this behavior. -// -// In short: the only two requirements for cluster operation are -// sharing sync and storage, and using the facilities provided by -// this package for solving the challenges. -type distributedSolver struct { - // The storage backing the distributed solver. It must be - // the same storage configuration as what is solving the - // challenge in order to be effective. - storage Storage - - // The storage key prefix, associated with the issuer - // that is solving the challenge. - storageKeyIssuerPrefix string - - // Since the distributedSolver is only a - // wrapper over an actual solver, place - // the actual solver here. - solver acmez.Solver -} - -// Present invokes the underlying solver's Present method -// and also stores domain, token, and keyAuth to the storage -// backing the certificate cache of dhs.acmeManager. -func (dhs distributedSolver) Present(ctx context.Context, chal acme.Challenge) error { - infoBytes, err := json.Marshal(chal) - if err != nil { - return err - } - - err = dhs.storage.Store(dhs.challengeTokensKey(challengeKey(chal)), infoBytes) - if err != nil { - return err - } - - err = dhs.solver.Present(ctx, chal) - if err != nil { - return fmt.Errorf("presenting with embedded solver: %v", err) - } - return nil -} - -// Wait wraps the underlying solver's Wait() method, if any. Implements acmez.Waiter. -func (dhs distributedSolver) Wait(ctx context.Context, challenge acme.Challenge) error { - if waiter, ok := dhs.solver.(acmez.Waiter); ok { - return waiter.Wait(ctx, challenge) - } - return nil -} - -// CleanUp invokes the underlying solver's CleanUp method -// and also cleans up any assets saved to storage. -func (dhs distributedSolver) CleanUp(ctx context.Context, chal acme.Challenge) error { - err := dhs.storage.Delete(dhs.challengeTokensKey(challengeKey(chal))) - if err != nil { - return err - } - err = dhs.solver.CleanUp(ctx, chal) - if err != nil { - return fmt.Errorf("cleaning up embedded provider: %v", err) - } - return nil -} - -// challengeTokensPrefix returns the key prefix for challenge info. -func (dhs distributedSolver) challengeTokensPrefix() string { - return path.Join(dhs.storageKeyIssuerPrefix, "challenge_tokens") -} - -// challengeTokensKey returns the key to use to store and access -// challenge info for domain. -func (dhs distributedSolver) challengeTokensKey(domain string) string { - return path.Join(dhs.challengeTokensPrefix(), StorageKeys.Safe(domain)+".json") -} - -// solverInfo associates a listener with the -// number of challenges currently using it. -type solverInfo struct { - closed int32 // accessed atomically - count int - listener net.Listener - done chan struct{} // used to signal when our own solver server is done -} - -// getSolverInfo gets a valid solverInfo struct for address. -func getSolverInfo(address string) *solverInfo { - si, ok := solvers[address] - if !ok { - si = &solverInfo{done: make(chan struct{})} - solvers[address] = si - } - return si -} - -// robustTryListen calls net.Listen for a TCP socket at addr. -// This function may return both a nil listener and a nil error! -// If it was able to bind the socket, it returns the listener -// and no error. If it wasn't able to bind the socket because -// the socket is already in use, then it returns a nil listener -// and nil error. If it had any other error, it returns the -// error. The intended error handling logic for this function -// is to proceed if the returned listener is not nil; otherwise -// return err (which may also be nil). In other words, this -// function ignores errors if the socket is already in use, -// which is useful for our challenge servers, where we assume -// that whatever is already listening can solve the challenges. -func robustTryListen(addr string) (net.Listener, error) { - var listenErr error - for i := 0; i < 2; i++ { - // doesn't hurt to sleep briefly before the second - // attempt in case the OS has timing issues - if i > 0 { - time.Sleep(100 * time.Millisecond) - } - - // if we can bind the socket right away, great! - var ln net.Listener - ln, listenErr = net.Listen("tcp", addr) - if listenErr == nil { - return ln, nil - } - - // if it failed just because the socket is already in use, we - // have no choice but to assume that whatever is using the socket - // can answer the challenge already, so we ignore the error - connectErr := dialTCPSocket(addr) - if connectErr == nil { - return nil, nil - } - - // hmm, we couldn't connect to the socket, so something else must - // be wrong, right? wrong!! we've had reports across multiple OSes - // now that sometimes connections fail even though the OS told us - // that the address was already in use; either the listener is - // fluctuating between open and closed very, very quickly, or the - // OS is inconsistent and contradicting itself; I have been unable - // to reproduce this, so I'm now resorting to hard-coding substring - // matching in error messages as a really hacky and unreliable - // safeguard against this, until we can idenify exactly what was - // happening; see the following threads for more info: - // https://caddy.community/t/caddy-retry-error/7317 - // https://caddy.community/t/v2-upgrade-to-caddy2-failing-with-errors/7423 - if strings.Contains(listenErr.Error(), "address already in use") || - strings.Contains(listenErr.Error(), "one usage of each socket address") { - log.Printf("[WARNING] OS reports a contradiction: %v - but we cannot connect to it, with this error: %v; continuing anyway 🤞 (I don't know what causes this... if you do, please help?)", listenErr, connectErr) - return nil, nil - } - } - return nil, fmt.Errorf("could not start listener for challenge server at %s: %v", addr, listenErr) -} - -// dialTCPSocket connects to a TCP address just for the sake of -// seeing if it is open. It returns a nil error if a TCP connection -// can successfully be made to addr within a short timeout. -func dialTCPSocket(addr string) error { - conn, err := net.DialTimeout("tcp", addr, 250*time.Millisecond) - if err == nil { - conn.Close() - } - return err -} - -// GetACMEChallenge returns an active ACME challenge for the given identifier, -// or false if no active challenge for that identifier is known. -func GetACMEChallenge(identifier string) (Challenge, bool) { - activeChallengesMu.Lock() - chalData, ok := activeChallenges[identifier] - activeChallengesMu.Unlock() - return chalData, ok -} - -// The active challenge solvers, keyed by listener address, -// and protected by a mutex. Note that the creation of -// solver listeners and the incrementing of their counts -// are atomic operations guarded by this mutex. -var ( - solvers = make(map[string]*solverInfo) - solversMu sync.Mutex -) - -// activeChallenges holds information about all known, currently-active -// ACME challenges, keyed by identifier. CertMagic guarantees that -// challenges for the same identifier do not overlap, by its locking -// mechanisms; thus if a challenge comes in for a certain identifier, -// we can be confident that if this process initiated the challenge, -// the correct information to solve it is in this map. (It may have -// alternatively been initiated by another instance in a cluster, in -// which case the distributed solver will take care of that.) -var ( - activeChallenges = make(map[string]Challenge) - activeChallengesMu sync.Mutex -) - -// Challenge is an ACME challenge, but optionally paired with -// data that can make it easier or more efficient to solve. -type Challenge struct { - acme.Challenge - data interface{} -} - -// challengeKey returns the map key for a given challenge; it is the identifier -// unless it is an IP address using the TLS-ALPN challenge. -func challengeKey(chal acme.Challenge) string { - if chal.Type == acme.ChallengeTypeTLSALPN01 && chal.Identifier.Type == "ip" { - reversed, err := dns.ReverseAddr(chal.Identifier.Value) - if err == nil { - return reversed[:len(reversed)-1] // strip off '.' - } - } - return chal.Identifier.Value -} - -// solverWrapper should be used to wrap all challenge solvers so that -// we can add the challenge info to memory; this makes challenges globally -// solvable by a single HTTP or TLS server even if multiple servers with -// different configurations/scopes need to get certificates. -type solverWrapper struct{ acmez.Solver } - -func (sw solverWrapper) Present(ctx context.Context, chal acme.Challenge) error { - activeChallengesMu.Lock() - activeChallenges[challengeKey(chal)] = Challenge{Challenge: chal} - activeChallengesMu.Unlock() - return sw.Solver.Present(ctx, chal) -} - -func (sw solverWrapper) Wait(ctx context.Context, chal acme.Challenge) error { - if waiter, ok := sw.Solver.(acmez.Waiter); ok { - return waiter.Wait(ctx, chal) - } - return nil -} - -func (sw solverWrapper) CleanUp(ctx context.Context, chal acme.Challenge) error { - activeChallengesMu.Lock() - delete(activeChallenges, challengeKey(chal)) - activeChallengesMu.Unlock() - return sw.Solver.CleanUp(ctx, chal) -} - -// Interface guards -var ( - _ acmez.Solver = (*solverWrapper)(nil) - _ acmez.Waiter = (*solverWrapper)(nil) - _ acmez.Waiter = (*distributedSolver)(nil) -) diff --git a/vendor/github.com/caddyserver/certmagic/storage.go b/vendor/github.com/caddyserver/certmagic/storage.go deleted file mode 100644 index 804a4740..00000000 --- a/vendor/github.com/caddyserver/certmagic/storage.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "context" - "path" - "regexp" - "strings" - "sync" - "time" - - "go.uber.org/zap" -) - -// Storage is a type that implements a key-value store. -// Keys are prefix-based, with forward slash '/' as separators -// and without a leading slash. -// -// Processes running in a cluster will wish to use the -// same Storage value (its implementation and configuration) -// in order to share certificates and other TLS resources -// with the cluster. -// -// The Load, Delete, List, and Stat methods should return -// ErrNotExist if the key does not exist. -// -// Implementations of Storage must be safe for concurrent use. -type Storage interface { - // Locker provides atomic synchronization - // operations, making Storage safe to share. - Locker - - // Store puts value at key. - Store(key string, value []byte) error - - // Load retrieves the value at key. - Load(key string) ([]byte, error) - - // Delete deletes key. An error should be - // returned only if the key still exists - // when the method returns. - Delete(key string) error - - // Exists returns true if the key exists - // and there was no error checking. - Exists(key string) bool - - // List returns all keys that match prefix. - // If recursive is true, non-terminal keys - // will be enumerated (i.e. "directories" - // should be walked); otherwise, only keys - // prefixed exactly by prefix will be listed. - List(prefix string, recursive bool) ([]string, error) - - // Stat returns information about key. - Stat(key string) (KeyInfo, error) -} - -// Locker facilitates synchronization of certificate tasks across -// machines and networks. -type Locker interface { - // Lock acquires the lock for key, blocking until the lock - // can be obtained or an error is returned. Note that, even - // after acquiring a lock, an idempotent operation may have - // already been performed by another process that acquired - // the lock before - so always check to make sure idempotent - // operations still need to be performed after acquiring the - // lock. - // - // The actual implementation of obtaining of a lock must be - // an atomic operation so that multiple Lock calls at the - // same time always results in only one caller receiving the - // lock at any given time. - // - // To prevent deadlocks, all implementations (where this concern - // is relevant) should put a reasonable expiration on the lock in - // case Unlock is unable to be called due to some sort of network - // failure or system crash. Additionally, implementations should - // honor context cancellation as much as possible (in case the - // caller wishes to give up and free resources before the lock - // can be obtained). - Lock(ctx context.Context, key string) error - - // Unlock releases the lock for key. This method must ONLY be - // called after a successful call to Lock, and only after the - // critical section is finished, even if it errored or timed - // out. Unlock cleans up any resources allocated during Lock. - Unlock(key string) error -} - -// KeyInfo holds information about a key in storage. -// Key and IsTerminal are required; Modified and Size -// are optional if the storage implementation is not -// able to get that information. Setting them will -// make certain operations more consistent or -// predictable, but it is not crucial to basic -// functionality. -type KeyInfo struct { - Key string - Modified time.Time - Size int64 - IsTerminal bool // false for keys that only contain other keys (like directories) -} - -// storeTx stores all the values or none at all. -func storeTx(s Storage, all []keyValue) error { - for i, kv := range all { - err := s.Store(kv.key, kv.value) - if err != nil { - for j := i - 1; j >= 0; j-- { - s.Delete(all[j].key) - } - return err - } - } - return nil -} - -// keyValue pairs a key and a value. -type keyValue struct { - key string - value []byte -} - -// KeyBuilder provides a namespace for methods that -// build keys and key prefixes, for addressing items -// in a Storage implementation. -type KeyBuilder struct{} - -// CertsPrefix returns the storage key prefix for -// the given certificate issuer. -func (keys KeyBuilder) CertsPrefix(issuerKey string) string { - return path.Join(prefixCerts, keys.Safe(issuerKey)) -} - -// CertsSitePrefix returns a key prefix for items associated with -// the site given by domain using the given issuer key. -func (keys KeyBuilder) CertsSitePrefix(issuerKey, domain string) string { - return path.Join(keys.CertsPrefix(issuerKey), keys.Safe(domain)) -} - -// SiteCert returns the path to the certificate file for domain -// that is associated with the issuer with the given issuerKey. -func (keys KeyBuilder) SiteCert(issuerKey, domain string) string { - safeDomain := keys.Safe(domain) - return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".crt") -} - -// SitePrivateKey returns the path to the private key file for domain -// that is associated with the certificate from the given issuer with -// the given issuerKey. -func (keys KeyBuilder) SitePrivateKey(issuerKey, domain string) string { - safeDomain := keys.Safe(domain) - return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".key") -} - -// SiteMeta returns the path to the metadata file for domain that -// is associated with the certificate from the given issuer with -// the given issuerKey. -func (keys KeyBuilder) SiteMeta(issuerKey, domain string) string { - safeDomain := keys.Safe(domain) - return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".json") -} - -// OCSPStaple returns a key for the OCSP staple associated -// with the given certificate. If you have the PEM bundle -// handy, pass that in to save an extra encoding step. -func (keys KeyBuilder) OCSPStaple(cert *Certificate, pemBundle []byte) string { - var ocspFileName string - if len(cert.Names) > 0 { - firstName := keys.Safe(cert.Names[0]) - ocspFileName = firstName + "-" - } - ocspFileName += fastHash(pemBundle) - return path.Join(prefixOCSP, ocspFileName) -} - -// Safe standardizes and sanitizes str for use as -// a single component of a storage key. This method -// is idempotent. -func (keys KeyBuilder) Safe(str string) string { - str = strings.ToLower(str) - str = strings.TrimSpace(str) - - // replace a few specific characters - repl := strings.NewReplacer( - " ", "_", - "+", "_plus_", - "*", "wildcard_", - ":", "-", - "..", "", // prevent directory traversal (regex allows single dots) - ) - str = repl.Replace(str) - - // finally remove all non-word characters - return safeKeyRE.ReplaceAllLiteralString(str, "") -} - -// CleanUpOwnLocks immediately cleans up all -// current locks obtained by this process. Since -// this does not cancel the operations that -// the locks are synchronizing, this should be -// called only immediately before process exit. -// Errors are only reported if a logger is given. -func CleanUpOwnLocks(logger *zap.Logger) { - locksMu.Lock() - defer locksMu.Unlock() - for lockKey, storage := range locks { - err := storage.Unlock(lockKey) - if err == nil { - delete(locks, lockKey) - } else if logger != nil { - logger.Error("unable to clean up lock in storage backend", - zap.Any("storage", storage), - zap.String("lock_key", lockKey), - zap.Error(err), - ) - } - } -} - -func acquireLock(ctx context.Context, storage Storage, lockKey string) error { - err := storage.Lock(ctx, lockKey) - if err == nil { - locksMu.Lock() - locks[lockKey] = storage - locksMu.Unlock() - } - return err -} - -func releaseLock(storage Storage, lockKey string) error { - err := storage.Unlock(lockKey) - if err == nil { - locksMu.Lock() - delete(locks, lockKey) - locksMu.Unlock() - } - return err -} - -// locks stores a reference to all the current -// locks obtained by this process. -var locks = make(map[string]Storage) -var locksMu sync.Mutex - -// StorageKeys provides methods for accessing -// keys and key prefixes for items in a Storage. -// Typically, you will not need to use this -// because accessing storage is abstracted away -// for most cases. Only use this if you need to -// directly access TLS assets in your application. -var StorageKeys KeyBuilder - -const ( - prefixCerts = "certificates" - prefixOCSP = "ocsp" -) - -// safeKeyRE matches any undesirable characters in storage keys. -// Note that this allows dots, so you'll have to strip ".." manually. -var safeKeyRE = regexp.MustCompile(`[^\w@.-]`) - -// ErrNotExist is returned by Storage implementations when -// a resource is not found. It is similar to os.IsNotExist -// except this is a type, not a variable. -// TODO: use new Go error wrapping conventions -type ErrNotExist interface { - error -} - -// defaultFileStorage is a convenient, default storage -// implementation using the local file system. -var defaultFileStorage = &FileStorage{Path: dataDir()} diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065..00000000 --- a/vendor/github.com/cespare/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md deleted file mode 100644 index 0982fd25..00000000 --- a/vendor/github.com/cespare/xxhash/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -The API is very small, taking its cue from the other hashing packages in the -standard library: - - $ go doc github.com/cespare/xxhash ! - package xxhash // import "github.com/cespare/xxhash" - - Package xxhash implements the 64-bit variant of xxHash (XXH64) as described - at http://cyan4973.github.io/xxHash/. - - func New() hash.Hash64 - func Sum64(b []byte) uint64 - func Sum64String(s string) uint64 - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64 against another popular Go XXH64 implementation, -[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): - -| input size | OneOfOne | cespare (purego) | cespare | -| --- | --- | --- | --- | -| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | -| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | -| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | -| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | - -These numbers were generated with: - -``` -$ go test -benchtime 10s -bench '/OneOfOne,' -$ go test -tags purego -benchtime 10s -bench '/xxhash,' -$ go test -benchtime 10s -bench '/xxhash,' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go deleted file mode 100644 index f3eac5eb..00000000 --- a/vendor/github.com/cespare/xxhash/rotate.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package xxhash - -// TODO(caleb): After Go 1.10 comes out, remove this fallback code. - -func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go deleted file mode 100644 index b99612ba..00000000 --- a/vendor/github.com/cespare/xxhash/rotate19.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.9 - -package xxhash - -import "math/bits" - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea88..00000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b53065..00000000 --- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 2fd8693c..00000000 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index db0b35fb..00000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - b = b[len(d.mem):] - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b807..00000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index d580e32a..00000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) - - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 4a5a8216..00000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index fc9bea7a..00000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 53bf76ef..00000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Notes: -// -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. -// -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. -// -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) -} diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go deleted file mode 100644 index f896bd28..00000000 --- a/vendor/github.com/cespare/xxhash/xxhash.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "hash" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -type xxh struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total int - mem [32]byte - n int // how much of mem is used -} - -// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. -func New() hash.Hash64 { - var x xxh - x.Reset() - return &x -} - -func (x *xxh) Reset() { - x.n = 0 - x.total = 0 - x.v1 = prime1v + prime2 - x.v2 = prime2 - x.v3 = 0 - x.v4 = -prime1v -} - -func (x *xxh) Size() int { return 8 } -func (x *xxh) BlockSize() int { return 32 } - -// Write adds more data to x. It always returns len(b), nil. -func (x *xxh) Write(b []byte) (n int, err error) { - n = len(b) - x.total += len(b) - - if x.n+len(b) < 32 { - // This new data doesn't even fill the current block. - copy(x.mem[x.n:], b) - x.n += len(b) - return - } - - if x.n > 0 { - // Finish off the partial block. - copy(x.mem[x.n:], b) - x.v1 = round(x.v1, u64(x.mem[0:8])) - x.v2 = round(x.v2, u64(x.mem[8:16])) - x.v3 = round(x.v3, u64(x.mem[16:24])) - x.v4 = round(x.v4, u64(x.mem[24:32])) - b = b[32-x.n:] - x.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - b = writeBlocks(x, b) - } - - // Store any remaining partial block. - copy(x.mem[:], b) - x.n = len(b) - - return -} - -func (x *xxh) Sum(b []byte) []byte { - s := x.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -func (x *xxh) Sum64() uint64 { - var h uint64 - - if x.total >= 32 { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = x.v3 + prime5 - } - - h += uint64(x.total) - - i, end := 0, x.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(x.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(x.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(x.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go deleted file mode 100644 index d6176526..00000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s deleted file mode 100644 index 757f2011..00000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,233 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the x pointer. - -// func writeBlocks(x *xxh, b []byte) []byte -TEXT ·writeBlocks(SB), NOSPLIT, $0-56 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from x. - MOVQ x+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to x. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // Construct return slice. - // NOTE: It's important that we don't construct a slice that has a base - // pointer off the end of the original slice, as in Go 1.7+ this will - // cause runtime crashes. (See discussion in, for example, - // https://github.com/golang/go/issues/16772.) - // Therefore, we calculate the length/cap first, and if they're zero, we - // keep the old base. This is what the compiler does as well if you - // write code like - // b = b[len(b):] - - // New length is 32 - (CX - BX) -> BX+32 - CX. - ADDQ $32, BX - SUBQ CX, BX - JZ afterSetBase - - MOVQ CX, ret_base+32(FP) - -afterSetBase: - MOVQ BX, ret_len+40(FP) - MOVQ BX, ret_cap+48(FP) // set cap == len - - RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go deleted file mode 100644 index c68d13f8..00000000 --- a/vendor/github.com/cespare/xxhash/xxhash_other.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // x := New() - // x.Write(b) - // return x.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(x *xxh, b []byte) []byte { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 - return b -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go deleted file mode 100644 index dfa15ab7..00000000 --- a/vendor/github.com/cespare/xxhash/xxhash_safe.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go deleted file mode 100644 index d2b64e8b..00000000 --- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -// -// TODO(caleb): Consider removing this if an optimization is ever added to make -// it unnecessary: https://golang.org/issue/2205. -// -// TODO(caleb): We still have a function call; we could instead write Go/asm -// copies of Sum64 for strings to squeeze out a bit more speed. -func Sum64String(s string) uint64 { - // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ - // for some discussion about this unsafe conversion. - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} diff --git a/vendor/github.com/cheekybits/genny/.gitignore b/vendor/github.com/cheekybits/genny/.gitignore deleted file mode 100644 index c62d148c..00000000 --- a/vendor/github.com/cheekybits/genny/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -genny diff --git a/vendor/github.com/cheekybits/genny/.travis.yml b/vendor/github.com/cheekybits/genny/.travis.yml deleted file mode 100644 index 78ba5f2d..00000000 --- a/vendor/github.com/cheekybits/genny/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.7 - - 1.8 - - 1.9 diff --git a/vendor/github.com/cheekybits/genny/LICENSE b/vendor/github.com/cheekybits/genny/LICENSE deleted file mode 100644 index 519d7f22..00000000 --- a/vendor/github.com/cheekybits/genny/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 cheekybits - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/cheekybits/genny/README.md b/vendor/github.com/cheekybits/genny/README.md deleted file mode 100644 index 64a28ac7..00000000 --- a/vendor/github.com/cheekybits/genny/README.md +++ /dev/null @@ -1,245 +0,0 @@ -# genny - Generics for Go - -[![Build Status](https://travis-ci.org/cheekybits/genny.svg?branch=master)](https://travis-ci.org/cheekybits/genny) [![GoDoc](https://godoc.org/github.com/cheekybits/genny/parse?status.png)](http://godoc.org/github.com/cheekybits/genny/parse) - -Install: - -``` -go get github.com/cheekybits/genny -``` - -===== - -(pron. Jenny) by Mat Ryer ([@matryer](https://twitter.com/matryer)) and Tyler Bunnell ([@TylerJBunnell](https://twitter.com/TylerJBunnell)). - -Until the Go core team include support for [generics in Go](http://golang.org/doc/faq#generics), `genny` is a code-generation generics solution. It allows you write normal buildable and testable Go code which, when processed by the `genny gen` tool, will replace the generics with specific types. - - * Generic code is valid Go code - * Generic code compiles and can be tested - * Use `stdin` and `stdout` or specify in and out files - * Supports Go 1.4's [go generate](http://tip.golang.org/doc/go1.4#gogenerate) - * Multiple specific types will generate every permutation - * Use `BUILTINS` and `NUMBERS` wildtype to generate specific code for all built-in (and number) Go types - * Function names and comments also get updated - -## Library - -We have started building a [library of common things](https://github.com/cheekybits/gennylib), and you can use `genny get` to generate the specific versions you need. - -For example: `genny get maps/concurrentmap.go "KeyType=BUILTINS ValueType=BUILTINS"` will print out generated code for all types for a concurrent map. Any file in the library may be generated locally in this way using all the same options given to `genny gen`. - -## Usage - -``` -genny [{flags}] gen "{types}" - -gen - generates type specific code from generic code. -get - fetch a generic template from the online library and gen it. - -{flags} - (optional) Command line flags (see below) -{types} - (required) Specific types for each generic type in the source -{types} format: {generic}={specific}[,another][ {generic2}={specific2}] - -Examples: - Generic=Specific - Generic1=Specific1 Generic2=Specific2 - Generic1=Specific1,Specific2 Generic2=Specific3,Specific4 - -Flags: - -in="": file to parse instead of stdin - -out="": file to save output to instead of stdout - -pkg="": package name for generated files -``` - - * Comma separated type lists will generate code for each type - -### Flags - - * `-in` - specify the input file (rather than using stdin) - * `-out` - specify the output file (rather than using stdout) - -### go generate - -To use Go 1.4's `go generate` capability, insert the following comment in your source code file: - -``` -//go:generate genny -in=$GOFILE -out=gen-$GOFILE gen "KeyType=string,int ValueType=string,int" -``` - - * Start the line with `//go:generate ` - * Use the `-in` and `-out` flags to specify the files to work on - * Use the `genny` command as usual after the flags - -Now, running `go generate` (in a shell) for the package will cause the generic versions of the files to be generated. - - * The output file will be overwritten, so it's safe to call `go generate` many times - * Use `$GOFILE` to refer to the current file - * The `//go:generate` line will be removed from the output - -To see a real example of how to use `genny` with `go generate`, look in the [example/go-generate directory](https://github.com/cheekybits/genny/tree/master/examples/go-generate). - -## How it works - -Define your generic types using the special `generic.Type` placeholder type: - -```go -type KeyType generic.Type -type ValueType generic.Type -``` - - * You can use as many as you like - * Give them meaningful names - -Then write the generic code referencing the types as your normally would: - -```go -func SetValueTypeForKeyType(key KeyType, value ValueType) { /* ... */ } -``` - - * Generic type names will also be replaced in comments and function names (see Real example below) - -Since `generic.Type` is a real Go type, your code will compile, and you can even write unit tests against your generic code. - -#### Generating specific versions - -Pass the file through the `genny gen` tool with the specific types as the argument: - -``` -cat generic.go | genny gen "KeyType=string ValueType=interface{}" -``` - -The output will be the complete Go source file with the generic types replaced with the types specified in the arguments. - -## Real example - -Given [this generic Go code](https://github.com/cheekybits/genny/tree/master/examples/queue) which compiles and is tested: - -```go -package queue - -import "github.com/cheekybits/genny/generic" - -// NOTE: this is how easy it is to define a generic type -type Something generic.Type - -// SomethingQueue is a queue of Somethings. -type SomethingQueue struct { - items []Something -} - -func NewSomethingQueue() *SomethingQueue { - return &SomethingQueue{items: make([]Something, 0)} -} -func (q *SomethingQueue) Push(item Something) { - q.items = append(q.items, item) -} -func (q *SomethingQueue) Pop() Something { - item := q.items[0] - q.items = q.items[1:] - return item -} -``` - -When `genny gen` is invoked like this: - -``` -cat source.go | genny gen "Something=string" -``` - -It outputs: - -```go -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package queue - -// StringQueue is a queue of Strings. -type StringQueue struct { - items []string -} - -func NewStringQueue() *StringQueue { - return &StringQueue{items: make([]string, 0)} -} -func (q *StringQueue) Push(item string) { - q.items = append(q.items, item) -} -func (q *StringQueue) Pop() string { - item := q.items[0] - q.items = q.items[1:] - return item -} -``` - -To get a _something_ for every built-in Go type plus one of your own types, you could run: - -``` -cat source.go | genny gen "Something=BUILTINS,*MyType" -``` - -#### More examples - -Check out the [test code files](https://github.com/cheekybits/genny/tree/master/parse/test) for more real examples. - -## Writing test code - -Once you have defined a generic type with some code worth testing: - -```go -package slice - -import ( - "log" - "reflect" - - "github.com/stretchr/gogen/generic" -) - -type MyType generic.Type - -func EnsureMyTypeSlice(objectOrSlice interface{}) []MyType { - log.Printf("%v", reflect.TypeOf(objectOrSlice)) - switch obj := objectOrSlice.(type) { - case []MyType: - log.Println(" returning it untouched") - return obj - case MyType: - log.Println(" wrapping in slice") - return []MyType{obj} - default: - panic("ensure slice needs MyType or []MyType") - } -} -``` - -You can treat it like any normal Go type in your test code: - -```go -func TestEnsureMyTypeSlice(t *testing.T) { - - myType := new(MyType) - slice := EnsureMyTypeSlice(myType) - if assert.NotNil(t, slice) { - assert.Equal(t, slice[0], myType) - } - - slice = EnsureMyTypeSlice(slice) - log.Printf("%#v", slice[0]) - if assert.NotNil(t, slice) { - assert.Equal(t, slice[0], myType) - } - -} -``` - -### Understanding what `generic.Type` is - -Because `generic.Type` is an empty interface type (literally `interface{}`) every other type will be considered to be a `generic.Type` if you are switching on the type of an object. Of course, once the specific versions are generated, this issue goes away but it's worth knowing when you are writing your tests against generic code. - -### Contributions - - * See the [API documentation for the parse package](http://godoc.org/github.com/cheekybits/genny/parse) - * Please do TDD - * All input welcome diff --git a/vendor/github.com/cheekybits/genny/doc.go b/vendor/github.com/cheekybits/genny/doc.go deleted file mode 100644 index 4c31e22b..00000000 --- a/vendor/github.com/cheekybits/genny/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package main is the command line tool for Genny. -package main diff --git a/vendor/github.com/cheekybits/genny/generic/doc.go b/vendor/github.com/cheekybits/genny/generic/doc.go deleted file mode 100644 index 3bd6c869..00000000 --- a/vendor/github.com/cheekybits/genny/generic/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package generic contains the generic marker types. -package generic diff --git a/vendor/github.com/cheekybits/genny/generic/generic.go b/vendor/github.com/cheekybits/genny/generic/generic.go deleted file mode 100644 index 04a2306c..00000000 --- a/vendor/github.com/cheekybits/genny/generic/generic.go +++ /dev/null @@ -1,13 +0,0 @@ -package generic - -// Type is the placeholder type that indicates a generic value. -// When genny is executed, variables of this type will be replaced with -// references to the specific types. -// var GenericType generic.Type -type Type interface{} - -// Number is the placehoder type that indiccates a generic numerical value. -// When genny is executed, variables of this type will be replaced with -// references to the specific types. -// var GenericType generic.Number -type Number float64 diff --git a/vendor/github.com/cheekybits/genny/main.go b/vendor/github.com/cheekybits/genny/main.go deleted file mode 100644 index fe06a6c0..00000000 --- a/vendor/github.com/cheekybits/genny/main.go +++ /dev/null @@ -1,154 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "strings" - - "github.com/cheekybits/genny/out" - "github.com/cheekybits/genny/parse" -) - -/* - - source | genny gen [-in=""] [-out=""] [-pkg=""] "KeyType=string,int ValueType=string,int" - -*/ - -const ( - _ = iota - exitcodeInvalidArgs - exitcodeInvalidTypeSet - exitcodeStdinFailed - exitcodeGenFailed - exitcodeGetFailed - exitcodeSourceFileInvalid - exitcodeDestFileFailed -) - -func main() { - var ( - in = flag.String("in", "", "file to parse instead of stdin") - out = flag.String("out", "", "file to save output to instead of stdout") - pkgName = flag.String("pkg", "", "package name for generated files") - prefix = "https://github.com/metabition/gennylib/raw/master/" - ) - flag.Parse() - args := flag.Args() - - if len(args) < 2 { - usage() - os.Exit(exitcodeInvalidArgs) - } - - if strings.ToLower(args[0]) != "gen" && strings.ToLower(args[0]) != "get" { - usage() - os.Exit(exitcodeInvalidArgs) - } - - // parse the typesets - var setsArg = args[1] - if strings.ToLower(args[0]) == "get" { - setsArg = args[2] - } - typeSets, err := parse.TypeSet(setsArg) - if err != nil { - fatal(exitcodeInvalidTypeSet, err) - } - - outWriter := newWriter(*out) - - if strings.ToLower(args[0]) == "get" { - if len(args) != 3 { - fmt.Println("not enough arguments to get") - usage() - os.Exit(exitcodeInvalidArgs) - } - r, err := http.Get(prefix + args[1]) - if err != nil { - fatal(exitcodeGetFailed, err) - } - b, err := ioutil.ReadAll(r.Body) - if err != nil { - fatal(exitcodeGetFailed, err) - } - r.Body.Close() - br := bytes.NewReader(b) - err = gen(*in, *pkgName, br, typeSets, outWriter) - } else if len(*in) > 0 { - var file *os.File - file, err = os.Open(*in) - if err != nil { - fatal(exitcodeSourceFileInvalid, err) - } - defer file.Close() - err = gen(*in, *pkgName, file, typeSets, outWriter) - } else { - var source []byte - source, err = ioutil.ReadAll(os.Stdin) - if err != nil { - fatal(exitcodeStdinFailed, err) - } - reader := bytes.NewReader(source) - err = gen("stdin", *pkgName, reader, typeSets, outWriter) - } - - // do the work - if err != nil { - fatal(exitcodeGenFailed, err) - } - -} - -func usage() { - fmt.Fprintln(os.Stderr, `usage: genny [{flags}] gen "{types}" - -gen - generates type specific code from generic code. -get - fetch a generic template from the online library and gen it. - -{flags} - (optional) Command line flags (see below) -{types} - (required) Specific types for each generic type in the source -{types} format: {generic}={specific}[,another][ {generic2}={specific2}] - -Examples: - Generic=Specific - Generic1=Specific1 Generic2=Specific2 - Generic1=Specific1,Specific2 Generic2=Specific3,Specific4 - -Flags:`) - flag.PrintDefaults() -} - -func newWriter(fileName string) io.Writer { - if fileName == "" { - return os.Stdout - } - lf := &out.LazyFile{FileName: fileName} - defer lf.Close() - return lf -} - -func fatal(code int, a ...interface{}) { - fmt.Println(a...) - os.Exit(code) -} - -// gen performs the generic generation. -func gen(filename, pkgName string, in io.ReadSeeker, typesets []map[string]string, out io.Writer) error { - - var output []byte - var err error - - output, err = parse.Generics(filename, pkgName, in, typesets) - if err != nil { - return err - } - - out.Write(output) - return nil -} diff --git a/vendor/github.com/cheekybits/genny/out/lazy_file.go b/vendor/github.com/cheekybits/genny/out/lazy_file.go deleted file mode 100644 index 7c8815f5..00000000 --- a/vendor/github.com/cheekybits/genny/out/lazy_file.go +++ /dev/null @@ -1,38 +0,0 @@ -package out - -import ( - "os" - "path" -) - -// LazyFile is an io.WriteCloser which defers creation of the file it is supposed to write in -// till the first call to its write function in order to prevent creation of file, if no write -// is supposed to happen. -type LazyFile struct { - // FileName is path to the file to which genny will write. - FileName string - file *os.File -} - -// Close closes the file if it is created. Returns nil if no file is created. -func (lw *LazyFile) Close() error { - if lw.file != nil { - return lw.file.Close() - } - return nil -} - -// Write writes to the specified file and creates the file first time it is called. -func (lw *LazyFile) Write(p []byte) (int, error) { - if lw.file == nil { - err := os.MkdirAll(path.Dir(lw.FileName), 0755) - if err != nil { - return 0, err - } - lw.file, err = os.Create(lw.FileName) - if err != nil { - return 0, err - } - } - return lw.file.Write(p) -} diff --git a/vendor/github.com/cheekybits/genny/parse/builtins.go b/vendor/github.com/cheekybits/genny/parse/builtins.go deleted file mode 100644 index e0299544..00000000 --- a/vendor/github.com/cheekybits/genny/parse/builtins.go +++ /dev/null @@ -1,41 +0,0 @@ -package parse - -// Builtins contains a slice of all built-in Go types. -var Builtins = []string{ - "bool", - "byte", - "complex128", - "complex64", - "error", - "float32", - "float64", - "int", - "int16", - "int32", - "int64", - "int8", - "rune", - "string", - "uint", - "uint16", - "uint32", - "uint64", - "uint8", - "uintptr", -} - -// Numbers contains a slice of all built-in number types. -var Numbers = []string{ - "float32", - "float64", - "int", - "int16", - "int32", - "int64", - "int8", - "uint", - "uint16", - "uint32", - "uint64", - "uint8", -} diff --git a/vendor/github.com/cheekybits/genny/parse/doc.go b/vendor/github.com/cheekybits/genny/parse/doc.go deleted file mode 100644 index 1be4fed8..00000000 --- a/vendor/github.com/cheekybits/genny/parse/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package parse contains the generic code generation capabilities -// that power genny. -// -// genny gen "{types}" -// -// gen - generates type specific code (to stdout) from generic code (via stdin) -// -// {types} - (required) Specific types for each generic type in the source -// {types} format: {generic}={specific}[,another][ {generic2}={specific2}] -// Examples: -// Generic=Specific -// Generic1=Specific1 Generic2=Specific2 -// Generic1=Specific1,Specific2 Generic2=Specific3,Specific4 -package parse diff --git a/vendor/github.com/cheekybits/genny/parse/errors.go b/vendor/github.com/cheekybits/genny/parse/errors.go deleted file mode 100644 index ab812bf9..00000000 --- a/vendor/github.com/cheekybits/genny/parse/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -package parse - -import ( - "errors" -) - -// errMissingSpecificType represents an error when a generic type is not -// satisfied by a specific type. -type errMissingSpecificType struct { - GenericType string -} - -// Error gets a human readable string describing this error. -func (e errMissingSpecificType) Error() string { - return "Missing specific type for '" + e.GenericType + "' generic type" -} - -// errImports represents an error from goimports. -type errImports struct { - Err error -} - -// Error gets a human readable string describing this error. -func (e errImports) Error() string { - return "Failed to goimports the generated code: " + e.Err.Error() -} - -// errSource represents an error with the source file. -type errSource struct { - Err error -} - -// Error gets a human readable string describing this error. -func (e errSource) Error() string { - return "Failed to parse source file: " + e.Err.Error() -} - -type errBadTypeArgs struct { - Message string - Arg string -} - -func (e errBadTypeArgs) Error() string { - return "\"" + e.Arg + "\" is bad: " + e.Message -} - -var errMissingTypeInformation = errors.New("No type arguments were specified and no \"// +gogen\" tag was found in the source.") diff --git a/vendor/github.com/cheekybits/genny/parse/parse.go b/vendor/github.com/cheekybits/genny/parse/parse.go deleted file mode 100644 index 08eb48b1..00000000 --- a/vendor/github.com/cheekybits/genny/parse/parse.go +++ /dev/null @@ -1,298 +0,0 @@ -package parse - -import ( - "bufio" - "bytes" - "fmt" - "go/ast" - "go/parser" - "go/scanner" - "go/token" - "io" - "os" - "strings" - "unicode" - - "golang.org/x/tools/imports" -) - -var header = []byte(` - -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -`) - -var ( - packageKeyword = []byte("package") - importKeyword = []byte("import") - openBrace = []byte("(") - closeBrace = []byte(")") - genericPackage = "generic" - genericType = "generic.Type" - genericNumber = "generic.Number" - linefeed = "\r\n" -) -var unwantedLinePrefixes = [][]byte{ - []byte("//go:generate genny "), -} - -func subIntoLiteral(lit, typeTemplate, specificType string) string { - if lit == typeTemplate { - return specificType - } - if !strings.Contains(lit, typeTemplate) { - return lit - } - specificLg := wordify(specificType, true) - specificSm := wordify(specificType, false) - result := strings.Replace(lit, typeTemplate, specificLg, -1) - if strings.HasPrefix(result, specificLg) && !isExported(lit) { - return strings.Replace(result, specificLg, specificSm, 1) - } - return result -} - -func subTypeIntoComment(line, typeTemplate, specificType string) string { - var subbed string - for _, w := range strings.Fields(line) { - subbed = subbed + subIntoLiteral(w, typeTemplate, specificType) + " " - } - return subbed -} - -// Does the heavy lifting of taking a line of our code and -// sbustituting a type into there for our generic type -func subTypeIntoLine(line, typeTemplate, specificType string) string { - src := []byte(line) - var s scanner.Scanner - fset := token.NewFileSet() - file := fset.AddFile("", fset.Base(), len(src)) - s.Init(file, src, nil, scanner.ScanComments) - output := "" - for { - _, tok, lit := s.Scan() - if tok == token.EOF { - break - } else if tok == token.COMMENT { - subbed := subTypeIntoComment(lit, typeTemplate, specificType) - output = output + subbed + " " - } else if tok.IsLiteral() { - subbed := subIntoLiteral(lit, typeTemplate, specificType) - output = output + subbed + " " - } else { - output = output + tok.String() + " " - } - } - return output -} - -// typeSet looks like "KeyType: int, ValueType: string" -func generateSpecific(filename string, in io.ReadSeeker, typeSet map[string]string) ([]byte, error) { - - // ensure we are at the beginning of the file - in.Seek(0, os.SEEK_SET) - - // parse the source file - fs := token.NewFileSet() - file, err := parser.ParseFile(fs, filename, in, 0) - if err != nil { - return nil, &errSource{Err: err} - } - - // make sure every generic.Type is represented in the types - // argument. - for _, decl := range file.Decls { - switch it := decl.(type) { - case *ast.GenDecl: - for _, spec := range it.Specs { - ts, ok := spec.(*ast.TypeSpec) - if !ok { - continue - } - switch tt := ts.Type.(type) { - case *ast.SelectorExpr: - if name, ok := tt.X.(*ast.Ident); ok { - if name.Name == genericPackage { - if _, ok := typeSet[ts.Name.Name]; !ok { - return nil, &errMissingSpecificType{GenericType: ts.Name.Name} - } - } - } - } - } - } - } - - in.Seek(0, os.SEEK_SET) - - var buf bytes.Buffer - - comment := "" - scanner := bufio.NewScanner(in) - for scanner.Scan() { - - line := scanner.Text() - - // does this line contain generic.Type? - if strings.Contains(line, genericType) || strings.Contains(line, genericNumber) { - comment = "" - continue - } - - for t, specificType := range typeSet { - if strings.Contains(line, t) { - newLine := subTypeIntoLine(line, t, specificType) - line = newLine - } - } - - if comment != "" { - buf.WriteString(makeLine(comment)) - comment = "" - } - - // is this line a comment? - // TODO: should we handle /* */ comments? - if strings.HasPrefix(line, "//") { - // record this line to print later - comment = line - continue - } - - // write the line - buf.WriteString(makeLine(line)) - } - - // write it out - return buf.Bytes(), nil -} - -// Generics parses the source file and generates the bytes replacing the -// generic types for the keys map with the specific types (its value). -func Generics(filename, pkgName string, in io.ReadSeeker, typeSets []map[string]string) ([]byte, error) { - - totalOutput := header - - for _, typeSet := range typeSets { - - // generate the specifics - parsed, err := generateSpecific(filename, in, typeSet) - if err != nil { - return nil, err - } - - totalOutput = append(totalOutput, parsed...) - - } - - // clean up the code line by line - packageFound := false - insideImportBlock := false - var cleanOutputLines []string - scanner := bufio.NewScanner(bytes.NewReader(totalOutput)) - for scanner.Scan() { - - // end of imports block? - if insideImportBlock { - if bytes.HasSuffix(scanner.Bytes(), closeBrace) { - insideImportBlock = false - } - continue - } - - if bytes.HasPrefix(scanner.Bytes(), packageKeyword) { - if packageFound { - continue - } else { - packageFound = true - } - } else if bytes.HasPrefix(scanner.Bytes(), importKeyword) { - if bytes.HasSuffix(scanner.Bytes(), openBrace) { - insideImportBlock = true - } - continue - } - - // check all unwantedLinePrefixes - and skip them - skipline := false - for _, prefix := range unwantedLinePrefixes { - if bytes.HasPrefix(scanner.Bytes(), prefix) { - skipline = true - continue - } - } - - if skipline { - continue - } - - cleanOutputLines = append(cleanOutputLines, makeLine(scanner.Text())) - } - - cleanOutput := strings.Join(cleanOutputLines, "") - - output := []byte(cleanOutput) - var err error - - // change package name - if pkgName != "" { - output = changePackage(bytes.NewReader([]byte(output)), pkgName) - } - // fix the imports - output, err = imports.Process(filename, output, nil) - if err != nil { - return nil, &errImports{Err: err} - } - - return output, nil -} - -func makeLine(s string) string { - return fmt.Sprintln(strings.TrimRight(s, linefeed)) -} - -// isAlphaNumeric gets whether the rune is alphanumeric or _. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} - -// wordify turns a type into a nice word for function and type -// names etc. -func wordify(s string, exported bool) string { - s = strings.TrimRight(s, "{}") - s = strings.TrimLeft(s, "*&") - s = strings.Replace(s, ".", "", -1) - if !exported { - return s - } - return strings.ToUpper(string(s[0])) + s[1:] -} - -func changePackage(r io.Reader, pkgName string) []byte { - var out bytes.Buffer - sc := bufio.NewScanner(r) - done := false - - for sc.Scan() { - s := sc.Text() - - if !done && strings.HasPrefix(s, "package") { - parts := strings.Split(s, " ") - parts[1] = pkgName - s = strings.Join(parts, " ") - done = true - } - - fmt.Fprintln(&out, s) - } - return out.Bytes() -} - -func isExported(lit string) bool { - if len(lit) == 0 { - return false - } - return unicode.IsUpper(rune(lit[0])) -} diff --git a/vendor/github.com/cheekybits/genny/parse/typesets.go b/vendor/github.com/cheekybits/genny/parse/typesets.go deleted file mode 100644 index c30b9728..00000000 --- a/vendor/github.com/cheekybits/genny/parse/typesets.go +++ /dev/null @@ -1,89 +0,0 @@ -package parse - -import "strings" - -const ( - typeSep = " " - keyValueSep = "=" - valuesSep = "," - builtins = "BUILTINS" - numbers = "NUMBERS" -) - -// TypeSet turns a type string into a []map[string]string -// that can be given to parse.Generics for it to do its magic. -// -// Acceptable args are: -// -// Person=man -// Person=man Animal=dog -// Person=man Animal=dog Animal2=cat -// Person=man,woman Animal=dog,cat -// Person=man,woman,child Animal=dog,cat Place=london,paris -func TypeSet(arg string) ([]map[string]string, error) { - - types := make(map[string][]string) - var keys []string - for _, pair := range strings.Split(arg, typeSep) { - segs := strings.Split(pair, keyValueSep) - if len(segs) != 2 { - return nil, &errBadTypeArgs{Arg: arg, Message: "Generic=Specific expected"} - } - key := segs[0] - keys = append(keys, key) - types[key] = make([]string, 0) - for _, t := range strings.Split(segs[1], valuesSep) { - if t == builtins { - types[key] = append(types[key], Builtins...) - } else if t == numbers { - types[key] = append(types[key], Numbers...) - } else { - types[key] = append(types[key], t) - } - } - } - - cursors := make(map[string]int) - for _, key := range keys { - cursors[key] = 0 - } - - outChan := make(chan map[string]string) - go func() { - buildTypeSet(keys, 0, cursors, types, outChan) - close(outChan) - }() - - var typeSets []map[string]string - for typeSet := range outChan { - typeSets = append(typeSets, typeSet) - } - - return typeSets, nil - -} - -func buildTypeSet(keys []string, keyI int, cursors map[string]int, types map[string][]string, out chan<- map[string]string) { - key := keys[keyI] - for cursors[key] < len(types[key]) { - if keyI < len(keys)-1 { - buildTypeSet(keys, keyI+1, copycursors(cursors), types, out) - } else { - // build the typeset for this combination - ts := make(map[string]string) - for k, vals := range types { - ts[k] = vals[cursors[k]] - } - out <- ts - } - cursors[key]++ - } -} - -func copycursors(source map[string]int) map[string]int { - copy := make(map[string]int) - for k, v := range source { - copy[k] = v - } - return copy -} diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore deleted file mode 100644 index a3062bea..00000000 --- a/vendor/github.com/chzyer/readline/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.vscode/* diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml deleted file mode 100644 index 9c359554..00000000 --- a/vendor/github.com/chzyer/readline/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.x -script: - - GOOS=windows go install github.com/chzyer/readline/example/... - - GOOS=linux go install github.com/chzyer/readline/example/... - - GOOS=darwin go install github.com/chzyer/readline/example/... - - go test -race -v diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md deleted file mode 100644 index 14ff5be1..00000000 --- a/vendor/github.com/chzyer/readline/CHANGELOG.md +++ /dev/null @@ -1,58 +0,0 @@ -# ChangeLog - -### 1.4 - 2016-07-25 - -* [#60][60] Support dynamic autocompletion -* Fix ANSI parser on Windows -* Fix wrong column width in complete mode on Windows -* Remove dependent package "golang.org/x/crypto/ssh/terminal" - -### 1.3 - 2016-05-09 - -* [#38][38] add SetChildren for prefix completer interface -* [#42][42] improve multiple lines compatibility -* [#43][43] remove sub-package(runes) for gopkg compatibility -* [#46][46] Auto complete with space prefixed line -* [#48][48] support suspend process (ctrl+Z) -* [#49][49] fix bug that check equals with previous command -* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty - -### 1.2 - 2016-03-05 - -* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib) -* [#23][23], support stdin remapping -* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM. -* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines. -* Supports performs even stdin/stdout is not a tty. -* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api. -* [#28][28], fixes the history is not working as expected. -* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)` - -### 1.1 - 2015-11-20 - -* [#12][12] Add support for key ``/``/`` -* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`. -* Bugs fixed for `PrefixCompleter` -* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience. -* Customable Interrupt/EOF prompt in `Config` -* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices -* Provides a new password user experience(`readline.ReadPasswordEx()`). - -### 1.0 - 2015-10-14 - -* Initial public release. - -[12]: https://github.com/chzyer/readline/pull/12 -[17]: https://github.com/chzyer/readline/pull/17 -[23]: https://github.com/chzyer/readline/pull/23 -[27]: https://github.com/chzyer/readline/pull/27 -[28]: https://github.com/chzyer/readline/pull/28 -[33]: https://github.com/chzyer/readline/pull/33 -[38]: https://github.com/chzyer/readline/pull/38 -[42]: https://github.com/chzyer/readline/pull/42 -[43]: https://github.com/chzyer/readline/pull/43 -[46]: https://github.com/chzyer/readline/pull/46 -[48]: https://github.com/chzyer/readline/pull/48 -[49]: https://github.com/chzyer/readline/pull/49 -[53]: https://github.com/chzyer/readline/pull/53 -[60]: https://github.com/chzyer/readline/pull/60 diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE deleted file mode 100644 index c9afab3d..00000000 --- a/vendor/github.com/chzyer/readline/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Chzyer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md deleted file mode 100644 index fab974b7..00000000 --- a/vendor/github.com/chzyer/readline/README.md +++ /dev/null @@ -1,114 +0,0 @@ -[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline) -[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md) -[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases) -[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline) -[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers) -[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors) - -

- - - -

- -A powerful readline library in `Linux` `macOS` `Windows` `Solaris` - -## Guide - -* [Demo](example/readline-demo/readline-demo.go) -* [Shortcut](doc/shortcut.md) - -## Repos using readline - -[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach) -[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto) -[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire) -[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg) -[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql) -[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman) -[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp) -[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell) -[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001) -[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p) - - -## Feedback - -If you have any questions, please submit a github issue and any pull requests is welcomed :) - -* [https://twitter.com/chzyer](https://twitter.com/chzyer) -* [http://weibo.com/2145262190](http://weibo.com/2145262190) - - -## Backers - -Love Readline? Help me keep it alive by donating funds to cover project expenses!
-[[Become a backer](https://opencollective.com/readline#backer)] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -## Sponsors - -Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go deleted file mode 100644 index 63b908c1..00000000 --- a/vendor/github.com/chzyer/readline/ansi_windows.go +++ /dev/null @@ -1,249 +0,0 @@ -// +build windows - -package readline - -import ( - "bufio" - "io" - "strconv" - "strings" - "sync" - "unicode/utf8" - "unsafe" -) - -const ( - _ = uint16(0) - COLOR_FBLUE = 0x0001 - COLOR_FGREEN = 0x0002 - COLOR_FRED = 0x0004 - COLOR_FINTENSITY = 0x0008 - - COLOR_BBLUE = 0x0010 - COLOR_BGREEN = 0x0020 - COLOR_BRED = 0x0040 - COLOR_BINTENSITY = 0x0080 - - COMMON_LVB_UNDERSCORE = 0x8000 - COMMON_LVB_BOLD = 0x0007 -) - -var ColorTableFg = []word{ - 0, // 30: Black - COLOR_FRED, // 31: Red - COLOR_FGREEN, // 32: Green - COLOR_FRED | COLOR_FGREEN, // 33: Yellow - COLOR_FBLUE, // 34: Blue - COLOR_FRED | COLOR_FBLUE, // 35: Magenta - COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan - COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White -} - -var ColorTableBg = []word{ - 0, // 40: Black - COLOR_BRED, // 41: Red - COLOR_BGREEN, // 42: Green - COLOR_BRED | COLOR_BGREEN, // 43: Yellow - COLOR_BBLUE, // 44: Blue - COLOR_BRED | COLOR_BBLUE, // 45: Magenta - COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan - COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White -} - -type ANSIWriter struct { - target io.Writer - wg sync.WaitGroup - ctx *ANSIWriterCtx - sync.Mutex -} - -func NewANSIWriter(w io.Writer) *ANSIWriter { - a := &ANSIWriter{ - target: w, - ctx: NewANSIWriterCtx(w), - } - return a -} - -func (a *ANSIWriter) Close() error { - a.wg.Wait() - return nil -} - -type ANSIWriterCtx struct { - isEsc bool - isEscSeq bool - arg []string - target *bufio.Writer - wantFlush bool -} - -func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx { - return &ANSIWriterCtx{ - target: bufio.NewWriter(target), - } -} - -func (a *ANSIWriterCtx) Flush() { - a.target.Flush() -} - -func (a *ANSIWriterCtx) process(r rune) bool { - if a.wantFlush { - if r == 0 || r == CharEsc { - a.wantFlush = false - a.target.Flush() - } - } - if a.isEscSeq { - a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg) - return true - } - - switch r { - case CharEsc: - a.isEsc = true - case '[': - if a.isEsc { - a.arg = nil - a.isEscSeq = true - a.isEsc = false - break - } - fallthrough - default: - a.target.WriteRune(r) - a.wantFlush = true - } - return true -} - -func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool { - arg := *argptr - var err error - - if r >= 'A' && r <= 'D' { - count := short(GetInt(arg, 1)) - info, err := GetConsoleScreenBufferInfo() - if err != nil { - return false - } - switch r { - case 'A': // up - info.dwCursorPosition.y -= count - case 'B': // down - info.dwCursorPosition.y += count - case 'C': // right - info.dwCursorPosition.x += count - case 'D': // left - info.dwCursorPosition.x -= count - } - SetConsoleCursorPosition(&info.dwCursorPosition) - return false - } - - switch r { - case 'J': - killLines() - case 'K': - eraseLine() - case 'm': - color := word(0) - for _, item := range arg { - var c int - c, err = strconv.Atoi(item) - if err != nil { - w.WriteString("[" + strings.Join(arg, ";") + "m") - break - } - if c >= 30 && c < 40 { - color ^= COLOR_FINTENSITY - color |= ColorTableFg[c-30] - } else if c >= 40 && c < 50 { - color ^= COLOR_BINTENSITY - color |= ColorTableBg[c-40] - } else if c == 4 { - color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7] - } else if c == 1 { - color |= COMMON_LVB_BOLD | COLOR_FINTENSITY - } else { // unknown code treat as reset - color = ColorTableFg[7] - } - } - if err != nil { - break - } - kernel.SetConsoleTextAttribute(stdout, uintptr(color)) - case '\007': // set title - case ';': - if len(arg) == 0 || arg[len(arg)-1] != "" { - arg = append(arg, "") - *argptr = arg - } - return true - default: - if len(arg) == 0 { - arg = append(arg, "") - } - arg[len(arg)-1] += string(r) - *argptr = arg - return true - } - *argptr = nil - return false -} - -func (a *ANSIWriter) Write(b []byte) (int, error) { - a.Lock() - defer a.Unlock() - - off := 0 - for len(b) > off { - r, size := utf8.DecodeRune(b[off:]) - if size == 0 { - return off, io.ErrShortWrite - } - off += size - a.ctx.process(r) - } - a.ctx.Flush() - return off, nil -} - -func killLines() error { - sbi, err := GetConsoleScreenBufferInfo() - if err != nil { - return err - } - - size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x - size += sbi.dwCursorPosition.x - - var written int - kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) - return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) -} - -func eraseLine() error { - sbi, err := GetConsoleScreenBufferInfo() - if err != nil { - return err - } - - size := sbi.dwSize.x - sbi.dwCursorPosition.x = 0 - var written int - return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), - uintptr(size), - sbi.dwCursorPosition.ptr(), - uintptr(unsafe.Pointer(&written)), - ) -} diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go deleted file mode 100644 index c08c9941..00000000 --- a/vendor/github.com/chzyer/readline/complete.go +++ /dev/null @@ -1,285 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "fmt" - "io" -) - -type AutoCompleter interface { - // Readline will pass the whole line and current offset to it - // Completer need to pass all the candidates, and how long they shared the same characters in line - // Example: - // [go, git, git-shell, grep] - // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1 - // Do("gi", 2) => ["t", "t-shell"], 2 - // Do("git", 3) => ["", "-shell"], 3 - Do(line []rune, pos int) (newLine [][]rune, length int) -} - -type TabCompleter struct{} - -func (t *TabCompleter) Do([]rune, int) ([][]rune, int) { - return [][]rune{[]rune("\t")}, 0 -} - -type opCompleter struct { - w io.Writer - op *Operation - width int - - inCompleteMode bool - inSelectMode bool - candidate [][]rune - candidateSource []rune - candidateOff int - candidateChoise int - candidateColNum int -} - -func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter { - return &opCompleter{ - w: w, - op: op, - width: width, - } -} - -func (o *opCompleter) doSelect() { - if len(o.candidate) == 1 { - o.op.buf.WriteRunes(o.candidate[0]) - o.ExitCompleteMode(false) - return - } - o.nextCandidate(1) - o.CompleteRefresh() -} - -func (o *opCompleter) nextCandidate(i int) { - o.candidateChoise += i - o.candidateChoise = o.candidateChoise % len(o.candidate) - if o.candidateChoise < 0 { - o.candidateChoise = len(o.candidate) + o.candidateChoise - } -} - -func (o *opCompleter) OnComplete() bool { - if o.width == 0 { - return false - } - if o.IsInCompleteSelectMode() { - o.doSelect() - return true - } - - buf := o.op.buf - rs := buf.Runes() - - if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) { - o.EnterCompleteSelectMode() - o.doSelect() - return true - } - - o.ExitCompleteSelectMode() - o.candidateSource = rs - newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx) - if len(newLines) == 0 { - o.ExitCompleteMode(false) - return true - } - - // only Aggregate candidates in non-complete mode - if !o.IsInCompleteMode() { - if len(newLines) == 1 { - buf.WriteRunes(newLines[0]) - o.ExitCompleteMode(false) - return true - } - - same, size := runes.Aggregate(newLines) - if size > 0 { - buf.WriteRunes(same) - o.ExitCompleteMode(false) - return true - } - } - - o.EnterCompleteMode(offset, newLines) - return true -} - -func (o *opCompleter) IsInCompleteSelectMode() bool { - return o.inSelectMode -} - -func (o *opCompleter) IsInCompleteMode() bool { - return o.inCompleteMode -} - -func (o *opCompleter) HandleCompleteSelect(r rune) bool { - next := true - switch r { - case CharEnter, CharCtrlJ: - next = false - o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise]) - o.ExitCompleteMode(false) - case CharLineStart: - num := o.candidateChoise % o.candidateColNum - o.nextCandidate(-num) - case CharLineEnd: - num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1 - o.candidateChoise += num - if o.candidateChoise >= len(o.candidate) { - o.candidateChoise = len(o.candidate) - 1 - } - case CharBackspace: - o.ExitCompleteSelectMode() - next = false - case CharTab, CharForward: - o.doSelect() - case CharBell, CharInterrupt: - o.ExitCompleteMode(true) - next = false - case CharNext: - tmpChoise := o.candidateChoise + o.candidateColNum - if tmpChoise >= o.getMatrixSize() { - tmpChoise -= o.getMatrixSize() - } else if tmpChoise >= len(o.candidate) { - tmpChoise += o.candidateColNum - tmpChoise -= o.getMatrixSize() - } - o.candidateChoise = tmpChoise - case CharBackward: - o.nextCandidate(-1) - case CharPrev: - tmpChoise := o.candidateChoise - o.candidateColNum - if tmpChoise < 0 { - tmpChoise += o.getMatrixSize() - if tmpChoise >= len(o.candidate) { - tmpChoise -= o.candidateColNum - } - } - o.candidateChoise = tmpChoise - default: - next = false - o.ExitCompleteSelectMode() - } - if next { - o.CompleteRefresh() - return true - } - return false -} - -func (o *opCompleter) getMatrixSize() int { - line := len(o.candidate) / o.candidateColNum - if len(o.candidate)%o.candidateColNum != 0 { - line++ - } - return line * o.candidateColNum -} - -func (o *opCompleter) OnWidthChange(newWidth int) { - o.width = newWidth -} - -func (o *opCompleter) CompleteRefresh() { - if !o.inCompleteMode { - return - } - lineCnt := o.op.buf.CursorLineCount() - colWidth := 0 - for _, c := range o.candidate { - w := runes.WidthAll(c) - if w > colWidth { - colWidth = w - } - } - colWidth += o.candidateOff + 1 - same := o.op.buf.RuneSlice(-o.candidateOff) - - // -1 to avoid reach the end of line - width := o.width - 1 - colNum := width / colWidth - if colNum != 0 { - colWidth += (width - (colWidth * colNum)) / colNum - } - - o.candidateColNum = colNum - buf := bufio.NewWriter(o.w) - buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) - - colIdx := 0 - lines := 1 - buf.WriteString("\033[J") - for idx, c := range o.candidate { - inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode() - if inSelect { - buf.WriteString("\033[30;47m") - } - buf.WriteString(string(same)) - buf.WriteString(string(c)) - buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same))) - - if inSelect { - buf.WriteString("\033[0m") - } - - colIdx++ - if colIdx == colNum { - buf.WriteString("\n") - lines++ - colIdx = 0 - } - } - - // move back - fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines) - fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen()) - buf.Flush() -} - -func (o *opCompleter) aggCandidate(candidate [][]rune) int { - offset := 0 - for i := 0; i < len(candidate[0]); i++ { - for j := 0; j < len(candidate)-1; j++ { - if i > len(candidate[j]) { - goto aggregate - } - if candidate[j][i] != candidate[j+1][i] { - goto aggregate - } - } - offset = i - } -aggregate: - return offset -} - -func (o *opCompleter) EnterCompleteSelectMode() { - o.inSelectMode = true - o.candidateChoise = -1 - o.CompleteRefresh() -} - -func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) { - o.inCompleteMode = true - o.candidate = candidate - o.candidateOff = offset - o.CompleteRefresh() -} - -func (o *opCompleter) ExitCompleteSelectMode() { - o.inSelectMode = false - o.candidate = nil - o.candidateChoise = -1 - o.candidateOff = -1 - o.candidateSource = nil -} - -func (o *opCompleter) ExitCompleteMode(revent bool) { - o.inCompleteMode = false - o.ExitCompleteSelectMode() -} diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go deleted file mode 100644 index 58d72487..00000000 --- a/vendor/github.com/chzyer/readline/complete_helper.go +++ /dev/null @@ -1,165 +0,0 @@ -package readline - -import ( - "bytes" - "strings" -) - -// Caller type for dynamic completion -type DynamicCompleteFunc func(string) []string - -type PrefixCompleterInterface interface { - Print(prefix string, level int, buf *bytes.Buffer) - Do(line []rune, pos int) (newLine [][]rune, length int) - GetName() []rune - GetChildren() []PrefixCompleterInterface - SetChildren(children []PrefixCompleterInterface) -} - -type DynamicPrefixCompleterInterface interface { - PrefixCompleterInterface - IsDynamic() bool - GetDynamicNames(line []rune) [][]rune -} - -type PrefixCompleter struct { - Name []rune - Dynamic bool - Callback DynamicCompleteFunc - Children []PrefixCompleterInterface -} - -func (p *PrefixCompleter) Tree(prefix string) string { - buf := bytes.NewBuffer(nil) - p.Print(prefix, 0, buf) - return buf.String() -} - -func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) { - if strings.TrimSpace(string(p.GetName())) != "" { - buf.WriteString(prefix) - if level > 0 { - buf.WriteString("├") - buf.WriteString(strings.Repeat("─", (level*4)-2)) - buf.WriteString(" ") - } - buf.WriteString(string(p.GetName()) + "\n") - level++ - } - for _, ch := range p.GetChildren() { - ch.Print(prefix, level, buf) - } -} - -func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) { - Print(p, prefix, level, buf) -} - -func (p *PrefixCompleter) IsDynamic() bool { - return p.Dynamic -} - -func (p *PrefixCompleter) GetName() []rune { - return p.Name -} - -func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune { - var names = [][]rune{} - for _, name := range p.Callback(string(line)) { - names = append(names, []rune(name+" ")) - } - return names -} - -func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface { - return p.Children -} - -func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) { - p.Children = children -} - -func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter { - return PcItem("", pc...) -} - -func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter { - name += " " - return &PrefixCompleter{ - Name: []rune(name), - Dynamic: false, - Children: pc, - } -} - -func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter { - return &PrefixCompleter{ - Callback: callback, - Dynamic: true, - Children: pc, - } -} - -func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) { - return doInternal(p, line, pos, line) -} - -func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) { - return doInternal(p, line, pos, line) -} - -func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) { - line = runes.TrimSpaceLeft(line[:pos]) - goNext := false - var lineCompleter PrefixCompleterInterface - for _, child := range p.GetChildren() { - childNames := make([][]rune, 1) - - childDynamic, ok := child.(DynamicPrefixCompleterInterface) - if ok && childDynamic.IsDynamic() { - childNames = childDynamic.GetDynamicNames(origLine) - } else { - childNames[0] = child.GetName() - } - - for _, childName := range childNames { - if len(line) >= len(childName) { - if runes.HasPrefix(line, childName) { - if len(line) == len(childName) { - newLine = append(newLine, []rune{' '}) - } else { - newLine = append(newLine, childName) - } - offset = len(childName) - lineCompleter = child - goNext = true - } - } else { - if runes.HasPrefix(childName, line) { - newLine = append(newLine, childName[len(line):]) - offset = len(line) - lineCompleter = child - } - } - } - } - - if len(newLine) != 1 { - return - } - - tmpLine := make([]rune, 0, len(line)) - for i := offset; i < len(line); i++ { - if line[i] == ' ' { - continue - } - - tmpLine = append(tmpLine, line[i:]...) - return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine) - } - - if goNext { - return doInternal(lineCompleter, nil, 0, origLine) - } - return -} diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go deleted file mode 100644 index 5ceadd80..00000000 --- a/vendor/github.com/chzyer/readline/complete_segment.go +++ /dev/null @@ -1,82 +0,0 @@ -package readline - -type SegmentCompleter interface { - // a - // |- a1 - // |--- a11 - // |- a2 - // b - // input: - // DoTree([], 0) [a, b] - // DoTree([a], 1) [a] - // DoTree([a, ], 0) [a1, a2] - // DoTree([a, a], 1) [a1, a2] - // DoTree([a, a1], 2) [a1] - // DoTree([a, a1, ], 0) [a11] - // DoTree([a, a1, a], 1) [a11] - DoSegment([][]rune, int) [][]rune -} - -type dumpSegmentCompleter struct { - f func([][]rune, int) [][]rune -} - -func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune { - return d.f(segment, n) -} - -func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter { - return &SegmentComplete{&dumpSegmentCompleter{f}} -} - -func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete { - return &SegmentComplete{ - SegmentCompleter: completer, - } -} - -type SegmentComplete struct { - SegmentCompleter -} - -func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) { - ret := make([][]rune, 0, len(cands)) - lastSegment := segments[len(segments)-1] - for _, cand := range cands { - if !runes.HasPrefix(cand, lastSegment) { - continue - } - ret = append(ret, cand[len(lastSegment):]) - } - return ret, idx -} - -func SplitSegment(line []rune, pos int) ([][]rune, int) { - segs := [][]rune{} - lastIdx := -1 - line = line[:pos] - pos = 0 - for idx, l := range line { - if l == ' ' { - pos = 0 - segs = append(segs, line[lastIdx+1:idx]) - lastIdx = idx - } else { - pos++ - } - } - segs = append(segs, line[lastIdx+1:]) - return segs, pos -} - -func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) { - - segment, idx := SplitSegment(line, pos) - - cands := c.DoSegment(segment, idx) - newLine, offset = RetSegment(segment, cands, idx) - for idx := range newLine { - newLine[idx] = append(newLine[idx], ' ') - } - return newLine, offset -} diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go deleted file mode 100644 index 6b17c464..00000000 --- a/vendor/github.com/chzyer/readline/history.go +++ /dev/null @@ -1,330 +0,0 @@ -package readline - -import ( - "bufio" - "container/list" - "fmt" - "os" - "strings" - "sync" -) - -type hisItem struct { - Source []rune - Version int64 - Tmp []rune -} - -func (h *hisItem) Clean() { - h.Source = nil - h.Tmp = nil -} - -type opHistory struct { - cfg *Config - history *list.List - historyVer int64 - current *list.Element - fd *os.File - fdLock sync.Mutex - enable bool -} - -func newOpHistory(cfg *Config) (o *opHistory) { - o = &opHistory{ - cfg: cfg, - history: list.New(), - enable: true, - } - return o -} - -func (o *opHistory) Reset() { - o.history = list.New() - o.current = nil -} - -func (o *opHistory) IsHistoryClosed() bool { - o.fdLock.Lock() - defer o.fdLock.Unlock() - return o.fd.Fd() == ^(uintptr(0)) -} - -func (o *opHistory) Init() { - if o.IsHistoryClosed() { - o.initHistory() - } -} - -func (o *opHistory) initHistory() { - if o.cfg.HistoryFile != "" { - o.historyUpdatePath(o.cfg.HistoryFile) - } -} - -// only called by newOpHistory -func (o *opHistory) historyUpdatePath(path string) { - o.fdLock.Lock() - defer o.fdLock.Unlock() - f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return - } - o.fd = f - r := bufio.NewReader(o.fd) - total := 0 - for ; ; total++ { - line, err := r.ReadString('\n') - if err != nil { - break - } - // ignore the empty line - line = strings.TrimSpace(line) - if len(line) == 0 { - continue - } - o.Push([]rune(line)) - o.Compact() - } - if total > o.cfg.HistoryLimit { - o.rewriteLocked() - } - o.historyVer++ - o.Push(nil) - return -} - -func (o *opHistory) Compact() { - for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 { - o.history.Remove(o.history.Front()) - } -} - -func (o *opHistory) Rewrite() { - o.fdLock.Lock() - defer o.fdLock.Unlock() - o.rewriteLocked() -} - -func (o *opHistory) rewriteLocked() { - if o.cfg.HistoryFile == "" { - return - } - - tmpFile := o.cfg.HistoryFile + ".tmp" - fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666) - if err != nil { - return - } - - buf := bufio.NewWriter(fd) - for elem := o.history.Front(); elem != nil; elem = elem.Next() { - buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n") - } - buf.Flush() - - // replace history file - if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil { - fd.Close() - return - } - - if o.fd != nil { - o.fd.Close() - } - // fd is write only, just satisfy what we need. - o.fd = fd -} - -func (o *opHistory) Close() { - o.fdLock.Lock() - defer o.fdLock.Unlock() - if o.fd != nil { - o.fd.Close() - } -} - -func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) { - for elem := o.current; elem != nil; elem = elem.Prev() { - item := o.showItem(elem.Value) - if isNewSearch { - start += len(rs) - } - if elem == o.current { - if len(item) >= start { - item = item[:start] - } - } - idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold) - if idx < 0 { - continue - } - return idx, elem - } - return -1, nil -} - -func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) { - for elem := o.current; elem != nil; elem = elem.Next() { - item := o.showItem(elem.Value) - if isNewSearch { - start -= len(rs) - if start < 0 { - start = 0 - } - } - if elem == o.current { - if len(item)-1 >= start { - item = item[start:] - } else { - continue - } - } - idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold) - if idx < 0 { - continue - } - if elem == o.current { - idx += start - } - return idx, elem - } - return -1, nil -} - -func (o *opHistory) showItem(obj interface{}) []rune { - item := obj.(*hisItem) - if item.Version == o.historyVer { - return item.Tmp - } - return item.Source -} - -func (o *opHistory) Prev() []rune { - if o.current == nil { - return nil - } - current := o.current.Prev() - if current == nil { - return nil - } - o.current = current - return runes.Copy(o.showItem(current.Value)) -} - -func (o *opHistory) Next() ([]rune, bool) { - if o.current == nil { - return nil, false - } - current := o.current.Next() - if current == nil { - return nil, false - } - - o.current = current - return runes.Copy(o.showItem(current.Value)), true -} - -// Disable the current history -func (o *opHistory) Disable() { - o.enable = false -} - -// Enable the current history -func (o *opHistory) Enable() { - o.enable = true -} - -func (o *opHistory) debug() { - Debug("-------") - for item := o.history.Front(); item != nil; item = item.Next() { - Debug(fmt.Sprintf("%+v", item.Value)) - } -} - -// save history -func (o *opHistory) New(current []rune) (err error) { - - // history deactivated - if !o.enable { - return nil - } - - current = runes.Copy(current) - - // if just use last command without modify - // just clean lastest history - if back := o.history.Back(); back != nil { - prev := back.Prev() - if prev != nil { - if runes.Equal(current, prev.Value.(*hisItem).Source) { - o.current = o.history.Back() - o.current.Value.(*hisItem).Clean() - o.historyVer++ - return nil - } - } - } - - if len(current) == 0 { - o.current = o.history.Back() - if o.current != nil { - o.current.Value.(*hisItem).Clean() - o.historyVer++ - return nil - } - } - - if o.current != o.history.Back() { - // move history item to current command - currentItem := o.current.Value.(*hisItem) - // set current to last item - o.current = o.history.Back() - - current = runes.Copy(currentItem.Tmp) - } - - // err only can be a IO error, just report - err = o.Update(current, true) - - // push a new one to commit current command - o.historyVer++ - o.Push(nil) - return -} - -func (o *opHistory) Revert() { - o.historyVer++ - o.current = o.history.Back() -} - -func (o *opHistory) Update(s []rune, commit bool) (err error) { - o.fdLock.Lock() - defer o.fdLock.Unlock() - s = runes.Copy(s) - if o.current == nil { - o.Push(s) - o.Compact() - return - } - r := o.current.Value.(*hisItem) - r.Version = o.historyVer - if commit { - r.Source = s - if o.fd != nil { - // just report the error - _, err = o.fd.Write([]byte(string(r.Source) + "\n")) - } - } else { - r.Tmp = append(r.Tmp[:0], s...) - } - o.current.Value = r - o.Compact() - return -} - -func (o *opHistory) Push(s []rune) { - s = runes.Copy(s) - elem := o.history.PushBack(&hisItem{Source: s}) - o.current = elem -} diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go deleted file mode 100644 index 4c31624f..00000000 --- a/vendor/github.com/chzyer/readline/operation.go +++ /dev/null @@ -1,531 +0,0 @@ -package readline - -import ( - "errors" - "io" - "sync" -) - -var ( - ErrInterrupt = errors.New("Interrupt") -) - -type InterruptError struct { - Line []rune -} - -func (*InterruptError) Error() string { - return "Interrupted" -} - -type Operation struct { - m sync.Mutex - cfg *Config - t *Terminal - buf *RuneBuffer - outchan chan []rune - errchan chan error - w io.Writer - - history *opHistory - *opSearch - *opCompleter - *opPassword - *opVim -} - -func (o *Operation) SetBuffer(what string) { - o.buf.Set([]rune(what)) -} - -type wrapWriter struct { - r *Operation - t *Terminal - target io.Writer -} - -func (w *wrapWriter) Write(b []byte) (int, error) { - if !w.t.IsReading() { - return w.target.Write(b) - } - - var ( - n int - err error - ) - w.r.buf.Refresh(func() { - n, err = w.target.Write(b) - }) - - if w.r.IsSearchMode() { - w.r.SearchRefresh(-1) - } - if w.r.IsInCompleteMode() { - w.r.CompleteRefresh() - } - return n, err -} - -func NewOperation(t *Terminal, cfg *Config) *Operation { - width := cfg.FuncGetWidth() - op := &Operation{ - t: t, - buf: NewRuneBuffer(t, cfg.Prompt, cfg, width), - outchan: make(chan []rune), - errchan: make(chan error, 1), - } - op.w = op.buf.w - op.SetConfig(cfg) - op.opVim = newVimMode(op) - op.opCompleter = newOpCompleter(op.buf.w, op, width) - op.opPassword = newOpPassword(op) - op.cfg.FuncOnWidthChanged(func() { - newWidth := cfg.FuncGetWidth() - op.opCompleter.OnWidthChange(newWidth) - op.opSearch.OnWidthChange(newWidth) - op.buf.OnWidthChange(newWidth) - }) - go op.ioloop() - return op -} - -func (o *Operation) SetPrompt(s string) { - o.buf.SetPrompt(s) -} - -func (o *Operation) SetMaskRune(r rune) { - o.buf.SetMask(r) -} - -func (o *Operation) GetConfig() *Config { - o.m.Lock() - cfg := *o.cfg - o.m.Unlock() - return &cfg -} - -func (o *Operation) ioloop() { - for { - keepInSearchMode := false - keepInCompleteMode := false - r := o.t.ReadRune() - if o.GetConfig().FuncFilterInputRune != nil { - var process bool - r, process = o.GetConfig().FuncFilterInputRune(r) - if !process { - o.buf.Refresh(nil) // to refresh the line - continue // ignore this rune - } - } - - if r == 0 { // io.EOF - if o.buf.Len() == 0 { - o.buf.Clean() - select { - case o.errchan <- io.EOF: - } - break - } else { - // if stdin got io.EOF and there is something left in buffer, - // let's flush them by sending CharEnter. - // And we will got io.EOF int next loop. - r = CharEnter - } - } - isUpdateHistory := true - - if o.IsInCompleteSelectMode() { - keepInCompleteMode = o.HandleCompleteSelect(r) - if keepInCompleteMode { - continue - } - - o.buf.Refresh(nil) - switch r { - case CharEnter, CharCtrlJ: - o.history.Update(o.buf.Runes(), false) - fallthrough - case CharInterrupt: - o.t.KickRead() - fallthrough - case CharBell: - continue - } - } - - if o.IsEnableVimMode() { - r = o.HandleVim(r, o.t.ReadRune) - if r == 0 { - continue - } - } - - switch r { - case CharBell: - if o.IsSearchMode() { - o.ExitSearchMode(true) - o.buf.Refresh(nil) - } - if o.IsInCompleteMode() { - o.ExitCompleteMode(true) - o.buf.Refresh(nil) - } - case CharTab: - if o.GetConfig().AutoComplete == nil { - o.t.Bell() - break - } - if o.OnComplete() { - keepInCompleteMode = true - } else { - o.t.Bell() - break - } - - case CharBckSearch: - if !o.SearchMode(S_DIR_BCK) { - o.t.Bell() - break - } - keepInSearchMode = true - case CharCtrlU: - o.buf.KillFront() - case CharFwdSearch: - if !o.SearchMode(S_DIR_FWD) { - o.t.Bell() - break - } - keepInSearchMode = true - case CharKill: - o.buf.Kill() - keepInCompleteMode = true - case MetaForward: - o.buf.MoveToNextWord() - case CharTranspose: - o.buf.Transpose() - case MetaBackward: - o.buf.MoveToPrevWord() - case MetaDelete: - o.buf.DeleteWord() - case CharLineStart: - o.buf.MoveToLineStart() - case CharLineEnd: - o.buf.MoveToLineEnd() - case CharBackspace, CharCtrlH: - if o.IsSearchMode() { - o.SearchBackspace() - keepInSearchMode = true - break - } - - if o.buf.Len() == 0 { - o.t.Bell() - break - } - o.buf.Backspace() - if o.IsInCompleteMode() { - o.OnComplete() - } - case CharCtrlZ: - o.buf.Clean() - o.t.SleepToResume() - o.Refresh() - case CharCtrlL: - ClearScreen(o.w) - o.Refresh() - case MetaBackspace, CharCtrlW: - o.buf.BackEscapeWord() - case CharCtrlY: - o.buf.Yank() - case CharEnter, CharCtrlJ: - if o.IsSearchMode() { - o.ExitSearchMode(false) - } - o.buf.MoveToLineEnd() - var data []rune - if !o.GetConfig().UniqueEditLine { - o.buf.WriteRune('\n') - data = o.buf.Reset() - data = data[:len(data)-1] // trim \n - } else { - o.buf.Clean() - data = o.buf.Reset() - } - o.outchan <- data - if !o.GetConfig().DisableAutoSaveHistory { - // ignore IO error - _ = o.history.New(data) - } else { - isUpdateHistory = false - } - case CharBackward: - o.buf.MoveBackward() - case CharForward: - o.buf.MoveForward() - case CharPrev: - buf := o.history.Prev() - if buf != nil { - o.buf.Set(buf) - } else { - o.t.Bell() - } - case CharNext: - buf, ok := o.history.Next() - if ok { - o.buf.Set(buf) - } else { - o.t.Bell() - } - case CharDelete: - if o.buf.Len() > 0 || !o.IsNormalMode() { - o.t.KickRead() - if !o.buf.Delete() { - o.t.Bell() - } - break - } - - // treat as EOF - if !o.GetConfig().UniqueEditLine { - o.buf.WriteString(o.GetConfig().EOFPrompt + "\n") - } - o.buf.Reset() - isUpdateHistory = false - o.history.Revert() - o.errchan <- io.EOF - if o.GetConfig().UniqueEditLine { - o.buf.Clean() - } - case CharInterrupt: - if o.IsSearchMode() { - o.t.KickRead() - o.ExitSearchMode(true) - break - } - if o.IsInCompleteMode() { - o.t.KickRead() - o.ExitCompleteMode(true) - o.buf.Refresh(nil) - break - } - o.buf.MoveToLineEnd() - o.buf.Refresh(nil) - hint := o.GetConfig().InterruptPrompt + "\n" - if !o.GetConfig().UniqueEditLine { - o.buf.WriteString(hint) - } - remain := o.buf.Reset() - if !o.GetConfig().UniqueEditLine { - remain = remain[:len(remain)-len([]rune(hint))] - } - isUpdateHistory = false - o.history.Revert() - o.errchan <- &InterruptError{remain} - default: - if o.IsSearchMode() { - o.SearchChar(r) - keepInSearchMode = true - break - } - o.buf.WriteRune(r) - if o.IsInCompleteMode() { - o.OnComplete() - keepInCompleteMode = true - } - } - - listener := o.GetConfig().Listener - if listener != nil { - newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r) - if ok { - o.buf.SetWithIdx(newPos, newLine) - } - } - - o.m.Lock() - if !keepInSearchMode && o.IsSearchMode() { - o.ExitSearchMode(false) - o.buf.Refresh(nil) - } else if o.IsInCompleteMode() { - if !keepInCompleteMode { - o.ExitCompleteMode(false) - o.Refresh() - } else { - o.buf.Refresh(nil) - o.CompleteRefresh() - } - } - if isUpdateHistory && !o.IsSearchMode() { - // it will cause null history - o.history.Update(o.buf.Runes(), false) - } - o.m.Unlock() - } -} - -func (o *Operation) Stderr() io.Writer { - return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t} -} - -func (o *Operation) Stdout() io.Writer { - return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t} -} - -func (o *Operation) String() (string, error) { - r, err := o.Runes() - return string(r), err -} - -func (o *Operation) Runes() ([]rune, error) { - o.t.EnterRawMode() - defer o.t.ExitRawMode() - - listener := o.GetConfig().Listener - if listener != nil { - listener.OnChange(nil, 0, 0) - } - - o.buf.Refresh(nil) // print prompt - o.t.KickRead() - select { - case r := <-o.outchan: - return r, nil - case err := <-o.errchan: - if e, ok := err.(*InterruptError); ok { - return e.Line, ErrInterrupt - } - return nil, err - } -} - -func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) { - cfg := o.GenPasswordConfig() - cfg.Prompt = prompt - cfg.Listener = l - return o.PasswordWithConfig(cfg) -} - -func (o *Operation) GenPasswordConfig() *Config { - return o.opPassword.PasswordConfig() -} - -func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) { - if err := o.opPassword.EnterPasswordMode(cfg); err != nil { - return nil, err - } - defer o.opPassword.ExitPasswordMode() - return o.Slice() -} - -func (o *Operation) Password(prompt string) ([]byte, error) { - return o.PasswordEx(prompt, nil) -} - -func (o *Operation) SetTitle(t string) { - o.w.Write([]byte("\033[2;" + t + "\007")) -} - -func (o *Operation) Slice() ([]byte, error) { - r, err := o.Runes() - if err != nil { - return nil, err - } - return []byte(string(r)), nil -} - -func (o *Operation) Close() { - o.history.Close() -} - -func (o *Operation) SetHistoryPath(path string) { - if o.history != nil { - o.history.Close() - } - o.cfg.HistoryFile = path - o.history = newOpHistory(o.cfg) -} - -func (o *Operation) IsNormalMode() bool { - return !o.IsInCompleteMode() && !o.IsSearchMode() -} - -func (op *Operation) SetConfig(cfg *Config) (*Config, error) { - op.m.Lock() - defer op.m.Unlock() - if op.cfg == cfg { - return op.cfg, nil - } - if err := cfg.Init(); err != nil { - return op.cfg, err - } - old := op.cfg - op.cfg = cfg - op.SetPrompt(cfg.Prompt) - op.SetMaskRune(cfg.MaskRune) - op.buf.SetConfig(cfg) - width := op.cfg.FuncGetWidth() - - if cfg.opHistory == nil { - op.SetHistoryPath(cfg.HistoryFile) - cfg.opHistory = op.history - cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width) - } - op.history = cfg.opHistory - - // SetHistoryPath will close opHistory which already exists - // so if we use it next time, we need to reopen it by `InitHistory()` - op.history.Init() - - if op.cfg.AutoComplete != nil { - op.opCompleter = newOpCompleter(op.buf.w, op, width) - } - - op.opSearch = cfg.opSearch - return old, nil -} - -func (o *Operation) ResetHistory() { - o.history.Reset() -} - -// if err is not nil, it just mean it fail to write to file -// other things goes fine. -func (o *Operation) SaveHistory(content string) error { - return o.history.New([]rune(content)) -} - -func (o *Operation) Refresh() { - if o.t.IsReading() { - o.buf.Refresh(nil) - } -} - -func (o *Operation) Clean() { - o.buf.Clean() -} - -func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener { - return &DumpListener{f: f} -} - -type DumpListener struct { - f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) -} - -func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { - return d.f(line, pos, key) -} - -type Listener interface { - OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) -} - -type Painter interface { - Paint(line []rune, pos int) []rune -} - -type defaultPainter struct{} - -func (p *defaultPainter) Paint(line []rune, _ int) []rune { - return line -} diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go deleted file mode 100644 index 414288c2..00000000 --- a/vendor/github.com/chzyer/readline/password.go +++ /dev/null @@ -1,33 +0,0 @@ -package readline - -type opPassword struct { - o *Operation - backupCfg *Config -} - -func newOpPassword(o *Operation) *opPassword { - return &opPassword{o: o} -} - -func (o *opPassword) ExitPasswordMode() { - o.o.SetConfig(o.backupCfg) - o.backupCfg = nil -} - -func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) { - o.backupCfg, err = o.o.SetConfig(cfg) - return -} - -func (o *opPassword) PasswordConfig() *Config { - return &Config{ - EnableMask: true, - InterruptPrompt: "\n", - EOFPrompt: "\n", - HistoryLimit: -1, - Painter: &defaultPainter{}, - - Stdout: o.o.cfg.Stdout, - Stderr: o.o.cfg.Stderr, - } -} diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go deleted file mode 100644 index 073ef150..00000000 --- a/vendor/github.com/chzyer/readline/rawreader_windows.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build windows - -package readline - -import "unsafe" - -const ( - VK_CANCEL = 0x03 - VK_BACK = 0x08 - VK_TAB = 0x09 - VK_RETURN = 0x0D - VK_SHIFT = 0x10 - VK_CONTROL = 0x11 - VK_MENU = 0x12 - VK_ESCAPE = 0x1B - VK_LEFT = 0x25 - VK_UP = 0x26 - VK_RIGHT = 0x27 - VK_DOWN = 0x28 - VK_DELETE = 0x2E - VK_LSHIFT = 0xA0 - VK_RSHIFT = 0xA1 - VK_LCONTROL = 0xA2 - VK_RCONTROL = 0xA3 -) - -// RawReader translate input record to ANSI escape sequence. -// To provides same behavior as unix terminal. -type RawReader struct { - ctrlKey bool - altKey bool -} - -func NewRawReader() *RawReader { - r := new(RawReader) - return r -} - -// only process one action in one read -func (r *RawReader) Read(buf []byte) (int, error) { - ir := new(_INPUT_RECORD) - var read int - var err error -next: - err = kernel.ReadConsoleInputW(stdin, - uintptr(unsafe.Pointer(ir)), - 1, - uintptr(unsafe.Pointer(&read)), - ) - if err != nil { - return 0, err - } - if ir.EventType != EVENT_KEY { - goto next - } - ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0])) - if ker.bKeyDown == 0 { // keyup - if r.ctrlKey || r.altKey { - switch ker.wVirtualKeyCode { - case VK_RCONTROL, VK_LCONTROL: - r.ctrlKey = false - case VK_MENU: //alt - r.altKey = false - } - } - goto next - } - - if ker.unicodeChar == 0 { - var target rune - switch ker.wVirtualKeyCode { - case VK_RCONTROL, VK_LCONTROL: - r.ctrlKey = true - case VK_MENU: //alt - r.altKey = true - case VK_LEFT: - target = CharBackward - case VK_RIGHT: - target = CharForward - case VK_UP: - target = CharPrev - case VK_DOWN: - target = CharNext - } - if target != 0 { - return r.write(buf, target) - } - goto next - } - char := rune(ker.unicodeChar) - if r.ctrlKey { - switch char { - case 'A': - char = CharLineStart - case 'E': - char = CharLineEnd - case 'R': - char = CharBckSearch - case 'S': - char = CharFwdSearch - } - } else if r.altKey { - switch char { - case VK_BACK: - char = CharBackspace - } - return r.writeEsc(buf, char) - } - return r.write(buf, char) -} - -func (r *RawReader) writeEsc(b []byte, char rune) (int, error) { - b[0] = '\033' - n := copy(b[1:], []byte(string(char))) - return n + 1, nil -} - -func (r *RawReader) write(b []byte, char rune) (int, error) { - n := copy(b, []byte(string(char))) - return n, nil -} - -func (r *RawReader) Close() error { - return nil -} diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go deleted file mode 100644 index 0e7aca06..00000000 --- a/vendor/github.com/chzyer/readline/readline.go +++ /dev/null @@ -1,326 +0,0 @@ -// Readline is a pure go implementation for GNU-Readline kind library. -// -// example: -// rl, err := readline.New("> ") -// if err != nil { -// panic(err) -// } -// defer rl.Close() -// -// for { -// line, err := rl.Readline() -// if err != nil { // io.EOF -// break -// } -// println(line) -// } -// -package readline - -import "io" - -type Instance struct { - Config *Config - Terminal *Terminal - Operation *Operation -} - -type Config struct { - // prompt supports ANSI escape sequence, so we can color some characters even in windows - Prompt string - - // readline will persist historys to file where HistoryFile specified - HistoryFile string - // specify the max length of historys, it's 500 by default, set it to -1 to disable history - HistoryLimit int - DisableAutoSaveHistory bool - // enable case-insensitive history searching - HistorySearchFold bool - - // AutoCompleter will called once user press TAB - AutoComplete AutoCompleter - - // Any key press will pass to Listener - // NOTE: Listener will be triggered by (nil, 0, 0) immediately - Listener Listener - - Painter Painter - - // If VimMode is true, readline will in vim.insert mode by default - VimMode bool - - InterruptPrompt string - EOFPrompt string - - FuncGetWidth func() int - - Stdin io.ReadCloser - StdinWriter io.Writer - Stdout io.Writer - Stderr io.Writer - - EnableMask bool - MaskRune rune - - // erase the editing line after user submited it - // it use in IM usually. - UniqueEditLine bool - - // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions) - // -> output = new (translated) rune and true/false if continue with processing this one - FuncFilterInputRune func(rune) (rune, bool) - - // force use interactive even stdout is not a tty - FuncIsTerminal func() bool - FuncMakeRaw func() error - FuncExitRaw func() error - FuncOnWidthChanged func(func()) - ForceUseInteractive bool - - // private fields - inited bool - opHistory *opHistory - opSearch *opSearch -} - -func (c *Config) useInteractive() bool { - if c.ForceUseInteractive { - return true - } - return c.FuncIsTerminal() -} - -func (c *Config) Init() error { - if c.inited { - return nil - } - c.inited = true - if c.Stdin == nil { - c.Stdin = NewCancelableStdin(Stdin) - } - - c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin) - - if c.Stdout == nil { - c.Stdout = Stdout - } - if c.Stderr == nil { - c.Stderr = Stderr - } - if c.HistoryLimit == 0 { - c.HistoryLimit = 500 - } - - if c.InterruptPrompt == "" { - c.InterruptPrompt = "^C" - } else if c.InterruptPrompt == "\n" { - c.InterruptPrompt = "" - } - if c.EOFPrompt == "" { - c.EOFPrompt = "^D" - } else if c.EOFPrompt == "\n" { - c.EOFPrompt = "" - } - - if c.AutoComplete == nil { - c.AutoComplete = &TabCompleter{} - } - if c.FuncGetWidth == nil { - c.FuncGetWidth = GetScreenWidth - } - if c.FuncIsTerminal == nil { - c.FuncIsTerminal = DefaultIsTerminal - } - rm := new(RawMode) - if c.FuncMakeRaw == nil { - c.FuncMakeRaw = rm.Enter - } - if c.FuncExitRaw == nil { - c.FuncExitRaw = rm.Exit - } - if c.FuncOnWidthChanged == nil { - c.FuncOnWidthChanged = DefaultOnWidthChanged - } - - return nil -} - -func (c Config) Clone() *Config { - c.opHistory = nil - c.opSearch = nil - return &c -} - -func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) { - c.Listener = FuncListener(f) -} - -func (c *Config) SetPainter(p Painter) { - c.Painter = p -} - -func NewEx(cfg *Config) (*Instance, error) { - t, err := NewTerminal(cfg) - if err != nil { - return nil, err - } - rl := t.Readline() - if cfg.Painter == nil { - cfg.Painter = &defaultPainter{} - } - return &Instance{ - Config: cfg, - Terminal: t, - Operation: rl, - }, nil -} - -func New(prompt string) (*Instance, error) { - return NewEx(&Config{Prompt: prompt}) -} - -func (i *Instance) ResetHistory() { - i.Operation.ResetHistory() -} - -func (i *Instance) SetPrompt(s string) { - i.Operation.SetPrompt(s) -} - -func (i *Instance) SetMaskRune(r rune) { - i.Operation.SetMaskRune(r) -} - -// change history persistence in runtime -func (i *Instance) SetHistoryPath(p string) { - i.Operation.SetHistoryPath(p) -} - -// readline will refresh automatic when write through Stdout() -func (i *Instance) Stdout() io.Writer { - return i.Operation.Stdout() -} - -// readline will refresh automatic when write through Stdout() -func (i *Instance) Stderr() io.Writer { - return i.Operation.Stderr() -} - -// switch VimMode in runtime -func (i *Instance) SetVimMode(on bool) { - i.Operation.SetVimMode(on) -} - -func (i *Instance) IsVimMode() bool { - return i.Operation.IsEnableVimMode() -} - -func (i *Instance) GenPasswordConfig() *Config { - return i.Operation.GenPasswordConfig() -} - -// we can generate a config by `i.GenPasswordConfig()` -func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) { - return i.Operation.PasswordWithConfig(cfg) -} - -func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) { - return i.Operation.PasswordEx(prompt, l) -} - -func (i *Instance) ReadPassword(prompt string) ([]byte, error) { - return i.Operation.Password(prompt) -} - -type Result struct { - Line string - Error error -} - -func (l *Result) CanContinue() bool { - return len(l.Line) != 0 && l.Error == ErrInterrupt -} - -func (l *Result) CanBreak() bool { - return !l.CanContinue() && l.Error != nil -} - -func (i *Instance) Line() *Result { - ret, err := i.Readline() - return &Result{ret, err} -} - -// err is one of (nil, io.EOF, readline.ErrInterrupt) -func (i *Instance) Readline() (string, error) { - return i.Operation.String() -} - -func (i *Instance) ReadlineWithDefault(what string) (string, error) { - i.Operation.SetBuffer(what) - return i.Operation.String() -} - -func (i *Instance) SaveHistory(content string) error { - return i.Operation.SaveHistory(content) -} - -// same as readline -func (i *Instance) ReadSlice() ([]byte, error) { - return i.Operation.Slice() -} - -// we must make sure that call Close() before process exit. -func (i *Instance) Close() error { - if err := i.Terminal.Close(); err != nil { - return err - } - i.Config.Stdin.Close() - i.Operation.Close() - return nil -} -func (i *Instance) Clean() { - i.Operation.Clean() -} - -func (i *Instance) Write(b []byte) (int, error) { - return i.Stdout().Write(b) -} - -// WriteStdin prefill the next Stdin fetch -// Next time you call ReadLine() this value will be writen before the user input -// ie : -// i := readline.New() -// i.WriteStdin([]byte("test")) -// _, _= i.Readline() -// -// gives -// -// > test[cursor] -func (i *Instance) WriteStdin(val []byte) (int, error) { - return i.Terminal.WriteStdin(val) -} - -func (i *Instance) SetConfig(cfg *Config) *Config { - if i.Config == cfg { - return cfg - } - old := i.Config - i.Config = cfg - i.Operation.SetConfig(cfg) - i.Terminal.SetConfig(cfg) - return old -} - -func (i *Instance) Refresh() { - i.Operation.Refresh() -} - -// HistoryDisable the save of the commands into the history -func (i *Instance) HistoryDisable() { - i.Operation.history.Disable() -} - -// HistoryEnable the save of the commands into the history (default on) -func (i *Instance) HistoryEnable() { - i.Operation.history.Enable() -} diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go deleted file mode 100644 index 74dbf569..00000000 --- a/vendor/github.com/chzyer/readline/remote.go +++ /dev/null @@ -1,475 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "io" - "net" - "os" - "sync" - "sync/atomic" -) - -type MsgType int16 - -const ( - T_DATA = MsgType(iota) - T_WIDTH - T_WIDTH_REPORT - T_ISTTY_REPORT - T_RAW - T_ERAW // exit raw - T_EOF -) - -type RemoteSvr struct { - eof int32 - closed int32 - width int32 - reciveChan chan struct{} - writeChan chan *writeCtx - conn net.Conn - isTerminal bool - funcWidthChan func() - stopChan chan struct{} - - dataBufM sync.Mutex - dataBuf bytes.Buffer -} - -type writeReply struct { - n int - err error -} - -type writeCtx struct { - msg *Message - reply chan *writeReply -} - -func newWriteCtx(msg *Message) *writeCtx { - return &writeCtx{ - msg: msg, - reply: make(chan *writeReply), - } -} - -func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) { - rs := &RemoteSvr{ - width: -1, - conn: conn, - writeChan: make(chan *writeCtx), - reciveChan: make(chan struct{}), - stopChan: make(chan struct{}), - } - buf := bufio.NewReader(rs.conn) - - if err := rs.init(buf); err != nil { - return nil, err - } - - go rs.readLoop(buf) - go rs.writeLoop() - return rs, nil -} - -func (r *RemoteSvr) init(buf *bufio.Reader) error { - m, err := ReadMessage(buf) - if err != nil { - return err - } - // receive isTerminal - if m.Type != T_ISTTY_REPORT { - return fmt.Errorf("unexpected init message") - } - r.GotIsTerminal(m.Data) - - // receive width - m, err = ReadMessage(buf) - if err != nil { - return err - } - if m.Type != T_WIDTH_REPORT { - return fmt.Errorf("unexpected init message") - } - r.GotReportWidth(m.Data) - - return nil -} - -func (r *RemoteSvr) HandleConfig(cfg *Config) { - cfg.Stderr = r - cfg.Stdout = r - cfg.Stdin = r - cfg.FuncExitRaw = r.ExitRawMode - cfg.FuncIsTerminal = r.IsTerminal - cfg.FuncMakeRaw = r.EnterRawMode - cfg.FuncExitRaw = r.ExitRawMode - cfg.FuncGetWidth = r.GetWidth - cfg.FuncOnWidthChanged = func(f func()) { - r.funcWidthChan = f - } -} - -func (r *RemoteSvr) IsTerminal() bool { - return r.isTerminal -} - -func (r *RemoteSvr) checkEOF() error { - if atomic.LoadInt32(&r.eof) == 1 { - return io.EOF - } - return nil -} - -func (r *RemoteSvr) Read(b []byte) (int, error) { - r.dataBufM.Lock() - n, err := r.dataBuf.Read(b) - r.dataBufM.Unlock() - if n == 0 { - if err := r.checkEOF(); err != nil { - return 0, err - } - } - - if n == 0 && err == io.EOF { - <-r.reciveChan - r.dataBufM.Lock() - n, err = r.dataBuf.Read(b) - r.dataBufM.Unlock() - } - if n == 0 { - if err := r.checkEOF(); err != nil { - return 0, err - } - } - - return n, err -} - -func (r *RemoteSvr) writeMsg(m *Message) error { - ctx := newWriteCtx(m) - r.writeChan <- ctx - reply := <-ctx.reply - return reply.err -} - -func (r *RemoteSvr) Write(b []byte) (int, error) { - ctx := newWriteCtx(NewMessage(T_DATA, b)) - r.writeChan <- ctx - reply := <-ctx.reply - return reply.n, reply.err -} - -func (r *RemoteSvr) EnterRawMode() error { - return r.writeMsg(NewMessage(T_RAW, nil)) -} - -func (r *RemoteSvr) ExitRawMode() error { - return r.writeMsg(NewMessage(T_ERAW, nil)) -} - -func (r *RemoteSvr) writeLoop() { - defer r.Close() - -loop: - for { - select { - case ctx, ok := <-r.writeChan: - if !ok { - break - } - n, err := ctx.msg.WriteTo(r.conn) - ctx.reply <- &writeReply{n, err} - case <-r.stopChan: - break loop - } - } -} - -func (r *RemoteSvr) Close() error { - if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { - close(r.stopChan) - r.conn.Close() - } - return nil -} - -func (r *RemoteSvr) readLoop(buf *bufio.Reader) { - defer r.Close() - for { - m, err := ReadMessage(buf) - if err != nil { - break - } - switch m.Type { - case T_EOF: - atomic.StoreInt32(&r.eof, 1) - select { - case r.reciveChan <- struct{}{}: - default: - } - case T_DATA: - r.dataBufM.Lock() - r.dataBuf.Write(m.Data) - r.dataBufM.Unlock() - select { - case r.reciveChan <- struct{}{}: - default: - } - case T_WIDTH_REPORT: - r.GotReportWidth(m.Data) - case T_ISTTY_REPORT: - r.GotIsTerminal(m.Data) - } - } -} - -func (r *RemoteSvr) GotIsTerminal(data []byte) { - if binary.BigEndian.Uint16(data) == 0 { - r.isTerminal = false - } else { - r.isTerminal = true - } -} - -func (r *RemoteSvr) GotReportWidth(data []byte) { - atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data))) - if r.funcWidthChan != nil { - r.funcWidthChan() - } -} - -func (r *RemoteSvr) GetWidth() int { - return int(atomic.LoadInt32(&r.width)) -} - -// ----------------------------------------------------------------------------- - -type Message struct { - Type MsgType - Data []byte -} - -func ReadMessage(r io.Reader) (*Message, error) { - m := new(Message) - var length int32 - if err := binary.Read(r, binary.BigEndian, &length); err != nil { - return nil, err - } - if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil { - return nil, err - } - m.Data = make([]byte, int(length)-2) - if _, err := io.ReadFull(r, m.Data); err != nil { - return nil, err - } - return m, nil -} - -func NewMessage(t MsgType, data []byte) *Message { - return &Message{t, data} -} - -func (m *Message) WriteTo(w io.Writer) (int, error) { - buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4)) - binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2)) - binary.Write(buf, binary.BigEndian, m.Type) - buf.Write(m.Data) - n, err := buf.WriteTo(w) - return int(n), err -} - -// ----------------------------------------------------------------------------- - -type RemoteCli struct { - conn net.Conn - raw RawMode - receiveChan chan struct{} - inited int32 - isTerminal *bool - - data bytes.Buffer - dataM sync.Mutex -} - -func NewRemoteCli(conn net.Conn) (*RemoteCli, error) { - r := &RemoteCli{ - conn: conn, - receiveChan: make(chan struct{}), - } - return r, nil -} - -func (r *RemoteCli) MarkIsTerminal(is bool) { - r.isTerminal = &is -} - -func (r *RemoteCli) init() error { - if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) { - return nil - } - - if err := r.reportIsTerminal(); err != nil { - return err - } - - if err := r.reportWidth(); err != nil { - return err - } - - // register sig for width changed - DefaultOnWidthChanged(func() { - r.reportWidth() - }) - return nil -} - -func (r *RemoteCli) writeMsg(m *Message) error { - r.dataM.Lock() - _, err := m.WriteTo(r.conn) - r.dataM.Unlock() - return err -} - -func (r *RemoteCli) Write(b []byte) (int, error) { - m := NewMessage(T_DATA, b) - r.dataM.Lock() - _, err := m.WriteTo(r.conn) - r.dataM.Unlock() - return len(b), err -} - -func (r *RemoteCli) reportWidth() error { - screenWidth := GetScreenWidth() - data := make([]byte, 2) - binary.BigEndian.PutUint16(data, uint16(screenWidth)) - msg := NewMessage(T_WIDTH_REPORT, data) - - if err := r.writeMsg(msg); err != nil { - return err - } - return nil -} - -func (r *RemoteCli) reportIsTerminal() error { - var isTerminal bool - if r.isTerminal != nil { - isTerminal = *r.isTerminal - } else { - isTerminal = DefaultIsTerminal() - } - data := make([]byte, 2) - if isTerminal { - binary.BigEndian.PutUint16(data, 1) - } else { - binary.BigEndian.PutUint16(data, 0) - } - msg := NewMessage(T_ISTTY_REPORT, data) - if err := r.writeMsg(msg); err != nil { - return err - } - return nil -} - -func (r *RemoteCli) readLoop() { - buf := bufio.NewReader(r.conn) - for { - msg, err := ReadMessage(buf) - if err != nil { - break - } - switch msg.Type { - case T_ERAW: - r.raw.Exit() - case T_RAW: - r.raw.Enter() - case T_DATA: - os.Stdout.Write(msg.Data) - } - } -} - -func (r *RemoteCli) ServeBy(source io.Reader) error { - if err := r.init(); err != nil { - return err - } - - go func() { - defer r.Close() - for { - n, _ := io.Copy(r, source) - if n == 0 { - break - } - } - }() - defer r.raw.Exit() - r.readLoop() - return nil -} - -func (r *RemoteCli) Close() { - r.writeMsg(NewMessage(T_EOF, nil)) -} - -func (r *RemoteCli) Serve() error { - return r.ServeBy(os.Stdin) -} - -func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error { - ln, err := net.Listen(n, addr) - if err != nil { - return err - } - if len(onListen) > 0 { - if err := onListen[0](ln); err != nil { - return err - } - } - for { - conn, err := ln.Accept() - if err != nil { - break - } - go func() { - defer conn.Close() - rl, err := HandleConn(*cfg, conn) - if err != nil { - return - } - h(rl) - }() - } - return nil -} - -func HandleConn(cfg Config, conn net.Conn) (*Instance, error) { - r, err := NewRemoteSvr(conn) - if err != nil { - return nil, err - } - r.HandleConfig(&cfg) - - rl, err := NewEx(&cfg) - if err != nil { - return nil, err - } - return rl, nil -} - -func DialRemote(n, addr string) error { - conn, err := net.Dial(n, addr) - if err != nil { - return err - } - defer conn.Close() - - cli, err := NewRemoteCli(conn) - if err != nil { - return err - } - return cli.Serve() -} diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go deleted file mode 100644 index 81d2da50..00000000 --- a/vendor/github.com/chzyer/readline/runebuf.go +++ /dev/null @@ -1,629 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "io" - "strconv" - "strings" - "sync" -) - -type runeBufferBck struct { - buf []rune - idx int -} - -type RuneBuffer struct { - buf []rune - idx int - prompt []rune - w io.Writer - - hadClean bool - interactive bool - cfg *Config - - width int - - bck *runeBufferBck - - offset string - - lastKill []rune - - sync.Mutex -} - -func (r* RuneBuffer) pushKill(text []rune) { - r.lastKill = append([]rune{}, text...) -} - -func (r *RuneBuffer) OnWidthChange(newWidth int) { - r.Lock() - r.width = newWidth - r.Unlock() -} - -func (r *RuneBuffer) Backup() { - r.Lock() - r.bck = &runeBufferBck{r.buf, r.idx} - r.Unlock() -} - -func (r *RuneBuffer) Restore() { - r.Refresh(func() { - if r.bck == nil { - return - } - r.buf = r.bck.buf - r.idx = r.bck.idx - }) -} - -func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer { - rb := &RuneBuffer{ - w: w, - interactive: cfg.useInteractive(), - cfg: cfg, - width: width, - } - rb.SetPrompt(prompt) - return rb -} - -func (r *RuneBuffer) SetConfig(cfg *Config) { - r.Lock() - r.cfg = cfg - r.interactive = cfg.useInteractive() - r.Unlock() -} - -func (r *RuneBuffer) SetMask(m rune) { - r.Lock() - r.cfg.MaskRune = m - r.Unlock() -} - -func (r *RuneBuffer) CurrentWidth(x int) int { - r.Lock() - defer r.Unlock() - return runes.WidthAll(r.buf[:x]) -} - -func (r *RuneBuffer) PromptLen() int { - r.Lock() - width := r.promptLen() - r.Unlock() - return width -} - -func (r *RuneBuffer) promptLen() int { - return runes.WidthAll(runes.ColorFilter(r.prompt)) -} - -func (r *RuneBuffer) RuneSlice(i int) []rune { - r.Lock() - defer r.Unlock() - - if i > 0 { - rs := make([]rune, i) - copy(rs, r.buf[r.idx:r.idx+i]) - return rs - } - rs := make([]rune, -i) - copy(rs, r.buf[r.idx+i:r.idx]) - return rs -} - -func (r *RuneBuffer) Runes() []rune { - r.Lock() - newr := make([]rune, len(r.buf)) - copy(newr, r.buf) - r.Unlock() - return newr -} - -func (r *RuneBuffer) Pos() int { - r.Lock() - defer r.Unlock() - return r.idx -} - -func (r *RuneBuffer) Len() int { - r.Lock() - defer r.Unlock() - return len(r.buf) -} - -func (r *RuneBuffer) MoveToLineStart() { - r.Refresh(func() { - if r.idx == 0 { - return - } - r.idx = 0 - }) -} - -func (r *RuneBuffer) MoveBackward() { - r.Refresh(func() { - if r.idx == 0 { - return - } - r.idx-- - }) -} - -func (r *RuneBuffer) WriteString(s string) { - r.WriteRunes([]rune(s)) -} - -func (r *RuneBuffer) WriteRune(s rune) { - r.WriteRunes([]rune{s}) -} - -func (r *RuneBuffer) WriteRunes(s []rune) { - r.Refresh(func() { - tail := append(s, r.buf[r.idx:]...) - r.buf = append(r.buf[:r.idx], tail...) - r.idx += len(s) - }) -} - -func (r *RuneBuffer) MoveForward() { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - r.idx++ - }) -} - -func (r *RuneBuffer) IsCursorInEnd() bool { - r.Lock() - defer r.Unlock() - return r.idx == len(r.buf) -} - -func (r *RuneBuffer) Replace(ch rune) { - r.Refresh(func() { - r.buf[r.idx] = ch - }) -} - -func (r *RuneBuffer) Erase() { - r.Refresh(func() { - r.idx = 0 - r.pushKill(r.buf[:]) - r.buf = r.buf[:0] - }) -} - -func (r *RuneBuffer) Delete() (success bool) { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - r.pushKill(r.buf[r.idx : r.idx+1]) - r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) - success = true - }) - return -} - -func (r *RuneBuffer) DeleteWord() { - if r.idx == len(r.buf) { - return - } - init := r.idx - for init < len(r.buf) && IsWordBreak(r.buf[init]) { - init++ - } - for i := init + 1; i < len(r.buf); i++ { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.pushKill(r.buf[r.idx:i-1]) - r.Refresh(func() { - r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) - }) - return - } - } - r.Kill() -} - -func (r *RuneBuffer) MoveToPrevWord() (success bool) { - r.Refresh(func() { - if r.idx == 0 { - return - } - - for i := r.idx - 1; i > 0; i-- { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.idx = i - success = true - return - } - } - r.idx = 0 - success = true - }) - return -} - -func (r *RuneBuffer) KillFront() { - r.Refresh(func() { - if r.idx == 0 { - return - } - - length := len(r.buf) - r.idx - r.pushKill(r.buf[:r.idx]) - copy(r.buf[:length], r.buf[r.idx:]) - r.idx = 0 - r.buf = r.buf[:length] - }) -} - -func (r *RuneBuffer) Kill() { - r.Refresh(func() { - r.pushKill(r.buf[r.idx:]) - r.buf = r.buf[:r.idx] - }) -} - -func (r *RuneBuffer) Transpose() { - r.Refresh(func() { - if len(r.buf) == 1 { - r.idx++ - } - - if len(r.buf) < 2 { - return - } - - if r.idx == 0 { - r.idx = 1 - } else if r.idx >= len(r.buf) { - r.idx = len(r.buf) - 1 - } - r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx] - r.idx++ - }) -} - -func (r *RuneBuffer) MoveToNextWord() { - r.Refresh(func() { - for i := r.idx + 1; i < len(r.buf); i++ { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.idx = i - return - } - } - - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) MoveToEndWord() { - r.Refresh(func() { - // already at the end, so do nothing - if r.idx == len(r.buf) { - return - } - // if we are at the end of a word already, go to next - if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) { - r.idx++ - } - - // keep going until at the end of a word - for i := r.idx + 1; i < len(r.buf); i++ { - if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) { - r.idx = i - 1 - return - } - } - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) BackEscapeWord() { - r.Refresh(func() { - if r.idx == 0 { - return - } - for i := r.idx - 1; i > 0; i-- { - if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { - r.pushKill(r.buf[i:r.idx]) - r.buf = append(r.buf[:i], r.buf[r.idx:]...) - r.idx = i - return - } - } - - r.buf = r.buf[:0] - r.idx = 0 - }) -} - -func (r *RuneBuffer) Yank() { - if len(r.lastKill) == 0 { - return - } - r.Refresh(func() { - buf := make([]rune, 0, len(r.buf) + len(r.lastKill)) - buf = append(buf, r.buf[:r.idx]...) - buf = append(buf, r.lastKill...) - buf = append(buf, r.buf[r.idx:]...) - r.buf = buf - r.idx += len(r.lastKill) - }) -} - -func (r *RuneBuffer) Backspace() { - r.Refresh(func() { - if r.idx == 0 { - return - } - - r.idx-- - r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) - }) -} - -func (r *RuneBuffer) MoveToLineEnd() { - r.Refresh(func() { - if r.idx == len(r.buf) { - return - } - - r.idx = len(r.buf) - }) -} - -func (r *RuneBuffer) LineCount(width int) int { - if width == -1 { - width = r.width - } - return LineCount(width, - runes.WidthAll(r.buf)+r.PromptLen()) -} - -func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) { - r.Refresh(func() { - if reverse { - for i := r.idx - 1; i >= 0; i-- { - if r.buf[i] == ch { - r.idx = i - if prevChar { - r.idx++ - } - success = true - return - } - } - return - } - for i := r.idx + 1; i < len(r.buf); i++ { - if r.buf[i] == ch { - r.idx = i - if prevChar { - r.idx-- - } - success = true - return - } - } - }) - return -} - -func (r *RuneBuffer) isInLineEdge() bool { - if isWindows { - return false - } - sp := r.getSplitByLine(r.buf) - return len(sp[len(sp)-1]) == 0 -} - -func (r *RuneBuffer) getSplitByLine(rs []rune) []string { - return SplitByLine(r.promptLen(), r.width, rs) -} - -func (r *RuneBuffer) IdxLine(width int) int { - r.Lock() - defer r.Unlock() - return r.idxLine(width) -} - -func (r *RuneBuffer) idxLine(width int) int { - if width == 0 { - return 0 - } - sp := r.getSplitByLine(r.buf[:r.idx]) - return len(sp) - 1 -} - -func (r *RuneBuffer) CursorLineCount() int { - return r.LineCount(r.width) - r.IdxLine(r.width) -} - -func (r *RuneBuffer) Refresh(f func()) { - r.Lock() - defer r.Unlock() - - if !r.interactive { - if f != nil { - f() - } - return - } - - r.clean() - if f != nil { - f() - } - r.print() -} - -func (r *RuneBuffer) SetOffset(offset string) { - r.Lock() - r.offset = offset - r.Unlock() -} - -func (r *RuneBuffer) print() { - r.w.Write(r.output()) - r.hadClean = false -} - -func (r *RuneBuffer) output() []byte { - buf := bytes.NewBuffer(nil) - buf.WriteString(string(r.prompt)) - if r.cfg.EnableMask && len(r.buf) > 0 { - buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1))) - if r.buf[len(r.buf)-1] == '\n' { - buf.Write([]byte{'\n'}) - } else { - buf.Write([]byte(string(r.cfg.MaskRune))) - } - if len(r.buf) > r.idx { - buf.Write(r.getBackspaceSequence()) - } - - } else { - for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) { - if e == '\t' { - buf.WriteString(strings.Repeat(" ", TabWidth)) - } else { - buf.WriteRune(e) - } - } - if r.isInLineEdge() { - buf.Write([]byte(" \b")) - } - } - // cursor position - if len(r.buf) > r.idx { - buf.Write(r.getBackspaceSequence()) - } - return buf.Bytes() -} - -func (r *RuneBuffer) getBackspaceSequence() []byte { - var sep = map[int]bool{} - - var i int - for { - if i >= runes.WidthAll(r.buf) { - break - } - - if i == 0 { - i -= r.promptLen() - } - i += r.width - - sep[i] = true - } - var buf []byte - for i := len(r.buf); i > r.idx; i-- { - // move input to the left of one - buf = append(buf, '\b') - if sep[i] { - // up one line, go to the start of the line and move cursor right to the end (r.width) - buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...) - } - } - - return buf - -} - -func (r *RuneBuffer) Reset() []rune { - ret := runes.Copy(r.buf) - r.buf = r.buf[:0] - r.idx = 0 - return ret -} - -func (r *RuneBuffer) calWidth(m int) int { - if m > 0 { - return runes.WidthAll(r.buf[r.idx : r.idx+m]) - } - return runes.WidthAll(r.buf[r.idx+m : r.idx]) -} - -func (r *RuneBuffer) SetStyle(start, end int, style string) { - if end < start { - panic("end < start") - } - - // goto start - move := start - r.idx - if move > 0 { - r.w.Write([]byte(string(r.buf[r.idx : r.idx+move]))) - } else { - r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move))) - } - r.w.Write([]byte("\033[" + style + "m")) - r.w.Write([]byte(string(r.buf[start:end]))) - r.w.Write([]byte("\033[0m")) - // TODO: move back -} - -func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) { - r.Refresh(func() { - r.buf = buf - r.idx = idx - }) -} - -func (r *RuneBuffer) Set(buf []rune) { - r.SetWithIdx(len(buf), buf) -} - -func (r *RuneBuffer) SetPrompt(prompt string) { - r.Lock() - r.prompt = []rune(prompt) - r.Unlock() -} - -func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) { - buf := bufio.NewWriter(w) - - if r.width == 0 { - buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen())) - buf.Write([]byte("\033[J")) - } else { - buf.Write([]byte("\033[J")) // just like ^k :) - if idxLine == 0 { - buf.WriteString("\033[2K") - buf.WriteString("\r") - } else { - for i := 0; i < idxLine; i++ { - io.WriteString(buf, "\033[2K\r\033[A") - } - io.WriteString(buf, "\033[2K\r") - } - } - buf.Flush() - return -} - -func (r *RuneBuffer) Clean() { - r.Lock() - r.clean() - r.Unlock() -} - -func (r *RuneBuffer) clean() { - r.cleanWithIdxLine(r.idxLine(r.width)) -} - -func (r *RuneBuffer) cleanWithIdxLine(idxLine int) { - if r.hadClean || !r.interactive { - return - } - r.hadClean = true - r.cleanOutput(r.w, idxLine) -} diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go deleted file mode 100644 index a669bc48..00000000 --- a/vendor/github.com/chzyer/readline/runes.go +++ /dev/null @@ -1,223 +0,0 @@ -package readline - -import ( - "bytes" - "unicode" - "unicode/utf8" -) - -var runes = Runes{} -var TabWidth = 4 - -type Runes struct{} - -func (Runes) EqualRune(a, b rune, fold bool) bool { - if a == b { - return true - } - if !fold { - return false - } - if a > b { - a, b = b, a - } - if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' { - if b == a+'a'-'A' { - return true - } - } - return false -} - -func (r Runes) EqualRuneFold(a, b rune) bool { - return r.EqualRune(a, b, true) -} - -func (r Runes) EqualFold(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if r.EqualRuneFold(a[i], b[i]) { - continue - } - return false - } - - return true -} - -func (Runes) Equal(a, b []rune) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - return true -} - -func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int { - for i := len(r) - len(sub); i >= 0; i-- { - found := true - for j := 0; j < len(sub); j++ { - if !rs.EqualRune(r[i+j], sub[j], fold) { - found = false - break - } - } - if found { - return i - } - } - return -1 -} - -// Search in runes from end to front -func (rs Runes) IndexAllBck(r, sub []rune) int { - return rs.IndexAllBckEx(r, sub, false) -} - -// Search in runes from front to end -func (rs Runes) IndexAll(r, sub []rune) int { - return rs.IndexAllEx(r, sub, false) -} - -func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int { - for i := 0; i < len(r); i++ { - found := true - if len(r[i:]) < len(sub) { - return -1 - } - for j := 0; j < len(sub); j++ { - if !rs.EqualRune(r[i+j], sub[j], fold) { - found = false - break - } - } - if found { - return i - } - } - return -1 -} - -func (Runes) Index(r rune, rs []rune) int { - for i := 0; i < len(rs); i++ { - if rs[i] == r { - return i - } - } - return -1 -} - -func (Runes) ColorFilter(r []rune) []rune { - newr := make([]rune, 0, len(r)) - for pos := 0; pos < len(r); pos++ { - if r[pos] == '\033' && r[pos+1] == '[' { - idx := runes.Index('m', r[pos+2:]) - if idx == -1 { - continue - } - pos += idx + 2 - continue - } - newr = append(newr, r[pos]) - } - return newr -} - -var zeroWidth = []*unicode.RangeTable{ - unicode.Mn, - unicode.Me, - unicode.Cc, - unicode.Cf, -} - -var doubleWidth = []*unicode.RangeTable{ - unicode.Han, - unicode.Hangul, - unicode.Hiragana, - unicode.Katakana, -} - -func (Runes) Width(r rune) int { - if r == '\t' { - return TabWidth - } - if unicode.IsOneOf(zeroWidth, r) { - return 0 - } - if unicode.IsOneOf(doubleWidth, r) { - return 2 - } - return 1 -} - -func (Runes) WidthAll(r []rune) (length int) { - for i := 0; i < len(r); i++ { - length += runes.Width(r[i]) - } - return -} - -func (Runes) Backspace(r []rune) []byte { - return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r)) -} - -func (Runes) Copy(r []rune) []rune { - n := make([]rune, len(r)) - copy(n, r) - return n -} - -func (Runes) HasPrefixFold(r, prefix []rune) bool { - if len(r) < len(prefix) { - return false - } - return runes.EqualFold(r[:len(prefix)], prefix) -} - -func (Runes) HasPrefix(r, prefix []rune) bool { - if len(r) < len(prefix) { - return false - } - return runes.Equal(r[:len(prefix)], prefix) -} - -func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) { - for i := 0; i < len(candicate[0]); i++ { - for j := 0; j < len(candicate)-1; j++ { - if i >= len(candicate[j]) || i >= len(candicate[j+1]) { - goto aggregate - } - if candicate[j][i] != candicate[j+1][i] { - goto aggregate - } - } - size = i + 1 - } -aggregate: - if size > 0 { - same = runes.Copy(candicate[0][:size]) - for i := 0; i < len(candicate); i++ { - n := runes.Copy(candicate[i]) - copy(n, n[size:]) - candicate[i] = n[:len(n)-size] - } - } - return -} - -func (Runes) TrimSpaceLeft(in []rune) []rune { - firstIndex := len(in) - for i, r := range in { - if unicode.IsSpace(r) == false { - firstIndex = i - break - } - } - return in[firstIndex:] -} diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go deleted file mode 100644 index 52e8ff09..00000000 --- a/vendor/github.com/chzyer/readline/search.go +++ /dev/null @@ -1,164 +0,0 @@ -package readline - -import ( - "bytes" - "container/list" - "fmt" - "io" -) - -const ( - S_STATE_FOUND = iota - S_STATE_FAILING -) - -const ( - S_DIR_BCK = iota - S_DIR_FWD -) - -type opSearch struct { - inMode bool - state int - dir int - source *list.Element - w io.Writer - buf *RuneBuffer - data []rune - history *opHistory - cfg *Config - markStart int - markEnd int - width int -} - -func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch { - return &opSearch{ - w: w, - buf: buf, - cfg: cfg, - history: history, - width: width, - } -} - -func (o *opSearch) OnWidthChange(newWidth int) { - o.width = newWidth -} - -func (o *opSearch) IsSearchMode() bool { - return o.inMode -} - -func (o *opSearch) SearchBackspace() { - if len(o.data) > 0 { - o.data = o.data[:len(o.data)-1] - o.search(true) - } -} - -func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) { - if o.dir == S_DIR_BCK { - return o.history.FindBck(isNewSearch, o.data, o.buf.idx) - } - return o.history.FindFwd(isNewSearch, o.data, o.buf.idx) -} - -func (o *opSearch) search(isChange bool) bool { - if len(o.data) == 0 { - o.state = S_STATE_FOUND - o.SearchRefresh(-1) - return true - } - idx, elem := o.findHistoryBy(isChange) - if elem == nil { - o.SearchRefresh(-2) - return false - } - o.history.current = elem - - item := o.history.showItem(o.history.current.Value) - start, end := 0, 0 - if o.dir == S_DIR_BCK { - start, end = idx, idx+len(o.data) - } else { - start, end = idx, idx+len(o.data) - idx += len(o.data) - } - o.buf.SetWithIdx(idx, item) - o.markStart, o.markEnd = start, end - o.SearchRefresh(idx) - return true -} - -func (o *opSearch) SearchChar(r rune) { - o.data = append(o.data, r) - o.search(true) -} - -func (o *opSearch) SearchMode(dir int) bool { - if o.width == 0 { - return false - } - alreadyInMode := o.inMode - o.inMode = true - o.dir = dir - o.source = o.history.current - if alreadyInMode { - o.search(false) - } else { - o.SearchRefresh(-1) - } - return true -} - -func (o *opSearch) ExitSearchMode(revert bool) { - if revert { - o.history.current = o.source - o.buf.Set(o.history.showItem(o.history.current.Value)) - } - o.markStart, o.markEnd = 0, 0 - o.state = S_STATE_FOUND - o.inMode = false - o.source = nil - o.data = nil -} - -func (o *opSearch) SearchRefresh(x int) { - if x == -2 { - o.state = S_STATE_FAILING - } else if x >= 0 { - o.state = S_STATE_FOUND - } - if x < 0 { - x = o.buf.idx - } - x = o.buf.CurrentWidth(x) - x += o.buf.PromptLen() - x = x % o.width - - if o.markStart > 0 { - o.buf.SetStyle(o.markStart, o.markEnd, "4") - } - - lineCnt := o.buf.CursorLineCount() - buf := bytes.NewBuffer(nil) - buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) - buf.WriteString("\033[J") - if o.state == S_STATE_FAILING { - buf.WriteString("failing ") - } - if o.dir == S_DIR_BCK { - buf.WriteString("bck") - } else if o.dir == S_DIR_FWD { - buf.WriteString("fwd") - } - buf.WriteString("-i-search: ") - buf.WriteString(string(o.data)) // keyword - buf.WriteString("\033[4m \033[0m") // _ - fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev - if x > 0 { - fmt.Fprintf(buf, "\033[%dC", x) // move forward - } - o.w.Write(buf.Bytes()) -} diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go deleted file mode 100644 index 61d44b75..00000000 --- a/vendor/github.com/chzyer/readline/std.go +++ /dev/null @@ -1,197 +0,0 @@ -package readline - -import ( - "io" - "os" - "sync" - "sync/atomic" -) - -var ( - Stdin io.ReadCloser = os.Stdin - Stdout io.WriteCloser = os.Stdout - Stderr io.WriteCloser = os.Stderr -) - -var ( - std *Instance - stdOnce sync.Once -) - -// global instance will not submit history automatic -func getInstance() *Instance { - stdOnce.Do(func() { - std, _ = NewEx(&Config{ - DisableAutoSaveHistory: true, - }) - }) - return std -} - -// let readline load history from filepath -// and try to persist history into disk -// set fp to "" to prevent readline persisting history to disk -// so the `AddHistory` will return nil error forever. -func SetHistoryPath(fp string) { - ins := getInstance() - cfg := ins.Config.Clone() - cfg.HistoryFile = fp - ins.SetConfig(cfg) -} - -// set auto completer to global instance -func SetAutoComplete(completer AutoCompleter) { - ins := getInstance() - cfg := ins.Config.Clone() - cfg.AutoComplete = completer - ins.SetConfig(cfg) -} - -// add history to global instance manually -// raise error only if `SetHistoryPath` is set with a non-empty path -func AddHistory(content string) error { - ins := getInstance() - return ins.SaveHistory(content) -} - -func Password(prompt string) ([]byte, error) { - ins := getInstance() - return ins.ReadPassword(prompt) -} - -// readline with global configs -func Line(prompt string) (string, error) { - ins := getInstance() - ins.SetPrompt(prompt) - return ins.Readline() -} - -type CancelableStdin struct { - r io.Reader - mutex sync.Mutex - stop chan struct{} - closed int32 - notify chan struct{} - data []byte - read int - err error -} - -func NewCancelableStdin(r io.Reader) *CancelableStdin { - c := &CancelableStdin{ - r: r, - notify: make(chan struct{}), - stop: make(chan struct{}), - } - go c.ioloop() - return c -} - -func (c *CancelableStdin) ioloop() { -loop: - for { - select { - case <-c.notify: - c.read, c.err = c.r.Read(c.data) - select { - case c.notify <- struct{}{}: - case <-c.stop: - break loop - } - case <-c.stop: - break loop - } - } -} - -func (c *CancelableStdin) Read(b []byte) (n int, err error) { - c.mutex.Lock() - defer c.mutex.Unlock() - if atomic.LoadInt32(&c.closed) == 1 { - return 0, io.EOF - } - - c.data = b - select { - case c.notify <- struct{}{}: - case <-c.stop: - return 0, io.EOF - } - select { - case <-c.notify: - return c.read, c.err - case <-c.stop: - return 0, io.EOF - } -} - -func (c *CancelableStdin) Close() error { - if atomic.CompareAndSwapInt32(&c.closed, 0, 1) { - close(c.stop) - } - return nil -} - -// FillableStdin is a stdin reader which can prepend some data before -// reading into the real stdin -type FillableStdin struct { - sync.Mutex - stdin io.Reader - stdinBuffer io.ReadCloser - buf []byte - bufErr error -} - -// NewFillableStdin gives you FillableStdin -func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) { - r, w := io.Pipe() - s := &FillableStdin{ - stdinBuffer: r, - stdin: stdin, - } - s.ioloop() - return s, w -} - -func (s *FillableStdin) ioloop() { - go func() { - for { - bufR := make([]byte, 100) - var n int - n, s.bufErr = s.stdinBuffer.Read(bufR) - if s.bufErr != nil { - if s.bufErr == io.ErrClosedPipe { - break - } - } - s.Lock() - s.buf = append(s.buf, bufR[:n]...) - s.Unlock() - } - }() -} - -// Read will read from the local buffer and if no data, read from stdin -func (s *FillableStdin) Read(p []byte) (n int, err error) { - s.Lock() - i := len(s.buf) - if len(p) < i { - i = len(p) - } - if i > 0 { - n := copy(p, s.buf) - s.buf = s.buf[:0] - cerr := s.bufErr - s.bufErr = nil - s.Unlock() - return n, cerr - } - s.Unlock() - n, err = s.stdin.Read(p) - return n, err -} - -func (s *FillableStdin) Close() error { - s.stdinBuffer.Close() - return nil -} diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go deleted file mode 100644 index b10f91bc..00000000 --- a/vendor/github.com/chzyer/readline/std_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package readline - -func init() { - Stdin = NewRawReader() - Stdout = NewANSIWriter(Stdout) - Stderr = NewANSIWriter(Stderr) -} diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go deleted file mode 100644 index 133993ca..00000000 --- a/vendor/github.com/chzyer/readline/term.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package readline - -import ( - "io" - "syscall" -) - -// State contains the state of a terminal. -type State struct { - termios Termios -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - _, err := getTermios(fd) - return err == nil -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var oldState State - - if termios, err := getTermios(fd); err != nil { - return nil, err - } else { - oldState.termios = *termios - } - - newState := oldState.termios - // This attempts to replicate the behaviour documented for cfmakeraw in - // the termios(3) manpage. - newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - // newState.Oflag &^= syscall.OPOST - newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newState.Cflag &^= syscall.CSIZE | syscall.PARENB - newState.Cflag |= syscall.CS8 - - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - return &oldState, setTermios(fd, &newState) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - termios, err := getTermios(fd) - if err != nil { - return nil, err - } - - return &State{termios: *termios}, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func restoreTerm(fd int, state *State) error { - return setTermios(fd, &state.termios) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - oldState, err := getTermios(fd) - if err != nil { - return nil, err - } - - newState := oldState - newState.Lflag &^= syscall.ECHO - newState.Lflag |= syscall.ICANON | syscall.ISIG - newState.Iflag |= syscall.ICRNL - if err := setTermios(fd, newState); err != nil { - return nil, err - } - - defer func() { - setTermios(fd, oldState) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(fd, buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go deleted file mode 100644 index 68b56ea6..00000000 --- a/vendor/github.com/chzyer/readline/term_bsd.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd netbsd openbsd - -package readline - -import ( - "syscall" - "unsafe" -) - -func getTermios(fd int) (*Termios, error) { - termios := new(Termios) - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return nil, err - } - return termios, nil -} - -func setTermios(fd int, termios *Termios) error { - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go deleted file mode 100644 index e3392b4a..00000000 --- a/vendor/github.com/chzyer/readline/term_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package readline - -import ( - "syscall" - "unsafe" -) - -// These constants are declared here, rather than importing -// them from the syscall package as some syscall packages, even -// on linux, for example gccgo, do not declare them. -const ioctlReadTermios = 0x5401 // syscall.TCGETS -const ioctlWriteTermios = 0x5402 // syscall.TCSETS - -func getTermios(fd int) (*Termios, error) { - termios := new(Termios) - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return nil, err - } - return termios, nil -} - -func setTermios(fd int, termios *Termios) error { - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go deleted file mode 100644 index 4c27273c..00000000 --- a/vendor/github.com/chzyer/readline/term_solaris.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package readline - -import "golang.org/x/sys/unix" - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, 0, err - } - return int(ws.Col), int(ws.Row), nil -} - -type Termios unix.Termios - -func getTermios(fd int) (*Termios, error) { - termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) - if err != nil { - return nil, err - } - return (*Termios)(termios), nil -} - -func setTermios(fd int, termios *Termios) error { - return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios)) -} diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go deleted file mode 100644 index d3ea2424..00000000 --- a/vendor/github.com/chzyer/readline/term_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd - -package readline - -import ( - "syscall" - "unsafe" -) - -type Termios syscall.Termios - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (int, int, error) { - var dimensions [4]uint16 - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) - if err != 0 { - return 0, 0, err - } - return int(dimensions[1]), int(dimensions[0]), nil -} diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go deleted file mode 100644 index 1290e00b..00000000 --- a/vendor/github.com/chzyer/readline/term_windows.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Putting a terminal into raw mode is the most common requirement: -// -// oldState, err := terminal.MakeRaw(0) -// if err != nil { -// panic(err) -// } -// defer terminal.Restore(0, oldState) -package readline - -import ( - "io" - "syscall" - "unsafe" -) - -const ( - enableLineInput = 2 - enableEchoInput = 4 - enableProcessedInput = 1 - enableWindowInput = 8 - enableMouseInput = 16 - enableInsertMode = 32 - enableQuickEditMode = 64 - enableExtendedFlags = 128 - enableAutoPosition = 256 - enableProcessedOutput = 1 - enableWrapAtEolOutput = 2 -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procSetConsoleMode = kernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") -) - -type ( - coord struct { - x short - y short - } - smallRect struct { - left short - top short - right short - bottom short - } - consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord - } -) - -type State struct { - mode uint32 -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - return &State{st}, nil -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func restoreTerm(fd int, state *State) error { - _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) - return err -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return 0, 0, error(e) - } - return int(info.size.x), int(info.size.y), nil -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - var st uint32 - _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - if e != 0 { - return nil, error(e) - } - old := st - - st &^= (enableEchoInput) - st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) - _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) - if e != 0 { - return nil, error(e) - } - - defer func() { - syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) - }() - - var buf [16]byte - var ret []byte - for { - n, err := syscall.Read(syscall.Handle(fd), buf[:]) - if err != nil { - return nil, err - } - if n == 0 { - if len(ret) == 0 { - return nil, io.EOF - } - break - } - if buf[n-1] == '\n' { - n-- - } - if n > 0 && buf[n-1] == '\r' { - n-- - } - ret = append(ret, buf[:n]...) - if n < len(buf) { - break - } - } - - return ret, nil -} diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go deleted file mode 100644 index 1078631c..00000000 --- a/vendor/github.com/chzyer/readline/terminal.go +++ /dev/null @@ -1,238 +0,0 @@ -package readline - -import ( - "bufio" - "fmt" - "io" - "strings" - "sync" - "sync/atomic" -) - -type Terminal struct { - m sync.Mutex - cfg *Config - outchan chan rune - closed int32 - stopChan chan struct{} - kickChan chan struct{} - wg sync.WaitGroup - isReading int32 - sleeping int32 - - sizeChan chan string -} - -func NewTerminal(cfg *Config) (*Terminal, error) { - if err := cfg.Init(); err != nil { - return nil, err - } - t := &Terminal{ - cfg: cfg, - kickChan: make(chan struct{}, 1), - outchan: make(chan rune), - stopChan: make(chan struct{}, 1), - sizeChan: make(chan string, 1), - } - - go t.ioloop() - return t, nil -} - -// SleepToResume will sleep myself, and return only if I'm resumed. -func (t *Terminal) SleepToResume() { - if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) { - return - } - defer atomic.StoreInt32(&t.sleeping, 0) - - t.ExitRawMode() - ch := WaitForResume() - SuspendMe() - <-ch - t.EnterRawMode() -} - -func (t *Terminal) EnterRawMode() (err error) { - return t.cfg.FuncMakeRaw() -} - -func (t *Terminal) ExitRawMode() (err error) { - return t.cfg.FuncExitRaw() -} - -func (t *Terminal) Write(b []byte) (int, error) { - return t.cfg.Stdout.Write(b) -} - -// WriteStdin prefill the next Stdin fetch -// Next time you call ReadLine() this value will be writen before the user input -func (t *Terminal) WriteStdin(b []byte) (int, error) { - return t.cfg.StdinWriter.Write(b) -} - -type termSize struct { - left int - top int -} - -func (t *Terminal) GetOffset(f func(offset string)) { - go func() { - f(<-t.sizeChan) - }() - t.Write([]byte("\033[6n")) -} - -func (t *Terminal) Print(s string) { - fmt.Fprintf(t.cfg.Stdout, "%s", s) -} - -func (t *Terminal) PrintRune(r rune) { - fmt.Fprintf(t.cfg.Stdout, "%c", r) -} - -func (t *Terminal) Readline() *Operation { - return NewOperation(t, t.cfg) -} - -// return rune(0) if meet EOF -func (t *Terminal) ReadRune() rune { - ch, ok := <-t.outchan - if !ok { - return rune(0) - } - return ch -} - -func (t *Terminal) IsReading() bool { - return atomic.LoadInt32(&t.isReading) == 1 -} - -func (t *Terminal) KickRead() { - select { - case t.kickChan <- struct{}{}: - default: - } -} - -func (t *Terminal) ioloop() { - t.wg.Add(1) - defer func() { - t.wg.Done() - close(t.outchan) - }() - - var ( - isEscape bool - isEscapeEx bool - expectNextChar bool - ) - - buf := bufio.NewReader(t.getStdin()) - for { - if !expectNextChar { - atomic.StoreInt32(&t.isReading, 0) - select { - case <-t.kickChan: - atomic.StoreInt32(&t.isReading, 1) - case <-t.stopChan: - return - } - } - expectNextChar = false - r, _, err := buf.ReadRune() - if err != nil { - if strings.Contains(err.Error(), "interrupted system call") { - expectNextChar = true - continue - } - break - } - - if isEscape { - isEscape = false - if r == CharEscapeEx { - expectNextChar = true - isEscapeEx = true - continue - } - r = escapeKey(r, buf) - } else if isEscapeEx { - isEscapeEx = false - if key := readEscKey(r, buf); key != nil { - r = escapeExKey(key) - // offset - if key.typ == 'R' { - if _, _, ok := key.Get2(); ok { - select { - case t.sizeChan <- key.attr: - default: - } - } - expectNextChar = true - continue - } - } - if r == 0 { - expectNextChar = true - continue - } - } - - expectNextChar = true - switch r { - case CharEsc: - if t.cfg.VimMode { - t.outchan <- r - break - } - isEscape = true - case CharInterrupt, CharEnter, CharCtrlJ, CharDelete: - expectNextChar = false - fallthrough - default: - t.outchan <- r - } - } - -} - -func (t *Terminal) Bell() { - fmt.Fprintf(t, "%c", CharBell) -} - -func (t *Terminal) Close() error { - if atomic.SwapInt32(&t.closed, 1) != 0 { - return nil - } - if closer, ok := t.cfg.Stdin.(io.Closer); ok { - closer.Close() - } - close(t.stopChan) - t.wg.Wait() - return t.ExitRawMode() -} - -func (t *Terminal) GetConfig() *Config { - t.m.Lock() - cfg := *t.cfg - t.m.Unlock() - return &cfg -} - -func (t *Terminal) getStdin() io.Reader { - t.m.Lock() - r := t.cfg.Stdin - t.m.Unlock() - return r -} - -func (t *Terminal) SetConfig(c *Config) error { - if err := c.Init(); err != nil { - return err - } - t.m.Lock() - t.cfg = c - t.m.Unlock() - return nil -} diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go deleted file mode 100644 index af4e0052..00000000 --- a/vendor/github.com/chzyer/readline/utils.go +++ /dev/null @@ -1,277 +0,0 @@ -package readline - -import ( - "bufio" - "bytes" - "container/list" - "fmt" - "os" - "strconv" - "strings" - "sync" - "time" - "unicode" -) - -var ( - isWindows = false -) - -const ( - CharLineStart = 1 - CharBackward = 2 - CharInterrupt = 3 - CharDelete = 4 - CharLineEnd = 5 - CharForward = 6 - CharBell = 7 - CharCtrlH = 8 - CharTab = 9 - CharCtrlJ = 10 - CharKill = 11 - CharCtrlL = 12 - CharEnter = 13 - CharNext = 14 - CharPrev = 16 - CharBckSearch = 18 - CharFwdSearch = 19 - CharTranspose = 20 - CharCtrlU = 21 - CharCtrlW = 23 - CharCtrlY = 25 - CharCtrlZ = 26 - CharEsc = 27 - CharEscapeEx = 91 - CharBackspace = 127 -) - -const ( - MetaBackward rune = -iota - 1 - MetaForward - MetaDelete - MetaBackspace - MetaTranspose -) - -// WaitForResume need to call before current process got suspend. -// It will run a ticker until a long duration is occurs, -// which means this process is resumed. -func WaitForResume() chan struct{} { - ch := make(chan struct{}) - var wg sync.WaitGroup - wg.Add(1) - go func() { - ticker := time.NewTicker(10 * time.Millisecond) - t := time.Now() - wg.Done() - for { - now := <-ticker.C - if now.Sub(t) > 100*time.Millisecond { - break - } - t = now - } - ticker.Stop() - ch <- struct{}{} - }() - wg.Wait() - return ch -} - -func Restore(fd int, state *State) error { - err := restoreTerm(fd, state) - if err != nil { - // errno 0 means everything is ok :) - if err.Error() == "errno 0" { - return nil - } else { - return err - } - } - return nil -} - -func IsPrintable(key rune) bool { - isInSurrogateArea := key >= 0xd800 && key <= 0xdbff - return key >= 32 && !isInSurrogateArea -} - -// translate Esc[X -func escapeExKey(key *escapeKeyPair) rune { - var r rune - switch key.typ { - case 'D': - r = CharBackward - case 'C': - r = CharForward - case 'A': - r = CharPrev - case 'B': - r = CharNext - case 'H': - r = CharLineStart - case 'F': - r = CharLineEnd - case '~': - if key.attr == "3" { - r = CharDelete - } - default: - } - return r -} - -type escapeKeyPair struct { - attr string - typ rune -} - -func (e *escapeKeyPair) Get2() (int, int, bool) { - sp := strings.Split(e.attr, ";") - if len(sp) < 2 { - return -1, -1, false - } - s1, err := strconv.Atoi(sp[0]) - if err != nil { - return -1, -1, false - } - s2, err := strconv.Atoi(sp[1]) - if err != nil { - return -1, -1, false - } - return s1, s2, true -} - -func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair { - p := escapeKeyPair{} - buf := bytes.NewBuffer(nil) - for { - if r == ';' { - } else if unicode.IsNumber(r) { - } else { - p.typ = r - break - } - buf.WriteRune(r) - r, _, _ = reader.ReadRune() - } - p.attr = buf.String() - return &p -} - -// translate EscX to Meta+X -func escapeKey(r rune, reader *bufio.Reader) rune { - switch r { - case 'b': - r = MetaBackward - case 'f': - r = MetaForward - case 'd': - r = MetaDelete - case CharTranspose: - r = MetaTranspose - case CharBackspace: - r = MetaBackspace - case 'O': - d, _, _ := reader.ReadRune() - switch d { - case 'H': - r = CharLineStart - case 'F': - r = CharLineEnd - default: - reader.UnreadRune() - } - case CharEsc: - - } - return r -} - -func SplitByLine(start, screenWidth int, rs []rune) []string { - var ret []string - buf := bytes.NewBuffer(nil) - currentWidth := start - for _, r := range rs { - w := runes.Width(r) - currentWidth += w - buf.WriteRune(r) - if currentWidth >= screenWidth { - ret = append(ret, buf.String()) - buf.Reset() - currentWidth = 0 - } - } - ret = append(ret, buf.String()) - return ret -} - -// calculate how many lines for N character -func LineCount(screenWidth, w int) int { - r := w / screenWidth - if w%screenWidth != 0 { - r++ - } - return r -} - -func IsWordBreak(i rune) bool { - switch { - case i >= 'a' && i <= 'z': - case i >= 'A' && i <= 'Z': - case i >= '0' && i <= '9': - default: - return true - } - return false -} - -func GetInt(s []string, def int) int { - if len(s) == 0 { - return def - } - c, err := strconv.Atoi(s[0]) - if err != nil { - return def - } - return c -} - -type RawMode struct { - state *State -} - -func (r *RawMode) Enter() (err error) { - r.state, err = MakeRaw(GetStdin()) - return err -} - -func (r *RawMode) Exit() error { - if r.state == nil { - return nil - } - return Restore(GetStdin(), r.state) -} - -// ----------------------------------------------------------------------------- - -func sleep(n int) { - Debug(n) - time.Sleep(2000 * time.Millisecond) -} - -// print a linked list to Debug() -func debugList(l *list.List) { - idx := 0 - for e := l.Front(); e != nil; e = e.Next() { - Debug(idx, fmt.Sprintf("%+v", e.Value)) - idx++ - } -} - -// append log info to another file -func Debug(o ...interface{}) { - f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - fmt.Fprintln(f, o...) - f.Close() -} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go deleted file mode 100644 index f88dac97..00000000 --- a/vendor/github.com/chzyer/readline/utils_unix.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris - -package readline - -import ( - "io" - "os" - "os/signal" - "sync" - "syscall" -) - -type winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -// SuspendMe use to send suspend signal to myself, when we in the raw mode. -// For OSX it need to send to parent's pid -// For Linux it need to send to myself -func SuspendMe() { - p, _ := os.FindProcess(os.Getppid()) - p.Signal(syscall.SIGTSTP) - p, _ = os.FindProcess(os.Getpid()) - p.Signal(syscall.SIGTSTP) -} - -// get width of the terminal -func getWidth(stdoutFd int) int { - cols, _, err := GetSize(stdoutFd) - if err != nil { - return -1 - } - return cols -} - -func GetScreenWidth() int { - w := getWidth(syscall.Stdout) - if w < 0 { - w = getWidth(syscall.Stderr) - } - return w -} - -// ClearScreen clears the console screen -func ClearScreen(w io.Writer) (int, error) { - return w.Write([]byte("\033[H")) -} - -func DefaultIsTerminal() bool { - return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr)) -} - -func GetStdin() int { - return syscall.Stdin -} - -// ----------------------------------------------------------------------------- - -var ( - widthChange sync.Once - widthChangeCallback func() -) - -func DefaultOnWidthChanged(f func()) { - widthChangeCallback = f - widthChange.Do(func() { - ch := make(chan os.Signal, 1) - signal.Notify(ch, syscall.SIGWINCH) - - go func() { - for { - _, ok := <-ch - if !ok { - break - } - widthChangeCallback() - } - }() - }) -} diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go deleted file mode 100644 index 5bfa55dc..00000000 --- a/vendor/github.com/chzyer/readline/utils_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build windows - -package readline - -import ( - "io" - "syscall" -) - -func SuspendMe() { -} - -func GetStdin() int { - return int(syscall.Stdin) -} - -func init() { - isWindows = true -} - -// get width of the terminal -func GetScreenWidth() int { - info, _ := GetConsoleScreenBufferInfo() - if info == nil { - return -1 - } - return int(info.dwSize.x) -} - -// ClearScreen clears the console screen -func ClearScreen(_ io.Writer) error { - return SetConsoleCursorPosition(&_COORD{0, 0}) -} - -func DefaultIsTerminal() bool { - return true -} - -func DefaultOnWidthChanged(func()) { - -} diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go deleted file mode 100644 index bedf2c1a..00000000 --- a/vendor/github.com/chzyer/readline/vim.go +++ /dev/null @@ -1,176 +0,0 @@ -package readline - -const ( - VIM_NORMAL = iota - VIM_INSERT - VIM_VISUAL -) - -type opVim struct { - cfg *Config - op *Operation - vimMode int -} - -func newVimMode(op *Operation) *opVim { - ov := &opVim{ - cfg: op.cfg, - op: op, - } - ov.SetVimMode(ov.cfg.VimMode) - return ov -} - -func (o *opVim) SetVimMode(on bool) { - if o.cfg.VimMode && !on { // turn off - o.ExitVimMode() - } - o.cfg.VimMode = on - o.vimMode = VIM_INSERT -} - -func (o *opVim) ExitVimMode() { - o.vimMode = VIM_INSERT -} - -func (o *opVim) IsEnableVimMode() bool { - return o.cfg.VimMode -} - -func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) { - rb := o.op.buf - handled = true - switch r { - case 'h': - t = CharBackward - case 'j': - t = CharNext - case 'k': - t = CharPrev - case 'l': - t = CharForward - case '0', '^': - rb.MoveToLineStart() - case '$': - rb.MoveToLineEnd() - case 'x': - rb.Delete() - if rb.IsCursorInEnd() { - rb.MoveBackward() - } - case 'r': - rb.Replace(readNext()) - case 'd': - next := readNext() - switch next { - case 'd': - rb.Erase() - case 'w': - rb.DeleteWord() - case 'h': - rb.Backspace() - case 'l': - rb.Delete() - } - case 'p': - rb.Yank() - case 'b', 'B': - rb.MoveToPrevWord() - case 'w', 'W': - rb.MoveToNextWord() - case 'e', 'E': - rb.MoveToEndWord() - case 'f', 'F', 't', 'T': - next := readNext() - prevChar := r == 't' || r == 'T' - reverse := r == 'F' || r == 'T' - switch next { - case CharEsc: - default: - rb.MoveTo(next, prevChar, reverse) - } - default: - return r, false - } - return t, true -} - -func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) { - rb := o.op.buf - handled = true - switch r { - case 'i': - case 'I': - rb.MoveToLineStart() - case 'a': - rb.MoveForward() - case 'A': - rb.MoveToLineEnd() - case 's': - rb.Delete() - case 'S': - rb.Erase() - case 'c': - next := readNext() - switch next { - case 'c': - rb.Erase() - case 'w': - rb.DeleteWord() - case 'h': - rb.Backspace() - case 'l': - rb.Delete() - } - default: - return r, false - } - - o.EnterVimInsertMode() - return -} - -func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) { - switch r { - case CharEnter, CharInterrupt: - o.ExitVimMode() - return r - } - - if r, handled := o.handleVimNormalMovement(r, readNext); handled { - return r - } - - if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled { - return r - } - - // invalid operation - o.op.t.Bell() - return 0 -} - -func (o *opVim) EnterVimInsertMode() { - o.vimMode = VIM_INSERT -} - -func (o *opVim) ExitVimInsertMode() { - o.vimMode = VIM_NORMAL -} - -func (o *opVim) HandleVim(r rune, readNext func() rune) rune { - if o.vimMode == VIM_NORMAL { - return o.HandleVimNormal(r, readNext) - } - if r == CharEsc { - o.ExitVimInsertMode() - return 0 - } - - switch o.vimMode { - case VIM_INSERT: - return r - case VIM_VISUAL: - } - return r -} diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go deleted file mode 100644 index 63f4f7b7..00000000 --- a/vendor/github.com/chzyer/readline/windows_api.go +++ /dev/null @@ -1,152 +0,0 @@ -// +build windows - -package readline - -import ( - "reflect" - "syscall" - "unsafe" -) - -var ( - kernel = NewKernel() - stdout = uintptr(syscall.Stdout) - stdin = uintptr(syscall.Stdin) -) - -type Kernel struct { - SetConsoleCursorPosition, - SetConsoleTextAttribute, - FillConsoleOutputCharacterW, - FillConsoleOutputAttribute, - ReadConsoleInputW, - GetConsoleScreenBufferInfo, - GetConsoleCursorInfo, - GetStdHandle CallFunc -} - -type short int16 -type word uint16 -type dword uint32 -type wchar uint16 - -type _COORD struct { - x short - y short -} - -func (c *_COORD) ptr() uintptr { - return uintptr(*(*int32)(unsafe.Pointer(c))) -} - -const ( - EVENT_KEY = 0x0001 - EVENT_MOUSE = 0x0002 - EVENT_WINDOW_BUFFER_SIZE = 0x0004 - EVENT_MENU = 0x0008 - EVENT_FOCUS = 0x0010 -) - -type _KEY_EVENT_RECORD struct { - bKeyDown int32 - wRepeatCount word - wVirtualKeyCode word - wVirtualScanCode word - unicodeChar wchar - dwControlKeyState dword -} - -// KEY_EVENT_RECORD KeyEvent; -// MOUSE_EVENT_RECORD MouseEvent; -// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent; -// MENU_EVENT_RECORD MenuEvent; -// FOCUS_EVENT_RECORD FocusEvent; -type _INPUT_RECORD struct { - EventType word - Padding uint16 - Event [16]byte -} - -type _CONSOLE_SCREEN_BUFFER_INFO struct { - dwSize _COORD - dwCursorPosition _COORD - wAttributes word - srWindow _SMALL_RECT - dwMaximumWindowSize _COORD -} - -type _SMALL_RECT struct { - left short - top short - right short - bottom short -} - -type _CONSOLE_CURSOR_INFO struct { - dwSize dword - bVisible bool -} - -type CallFunc func(u ...uintptr) error - -func NewKernel() *Kernel { - k := &Kernel{} - kernel32 := syscall.NewLazyDLL("kernel32.dll") - v := reflect.ValueOf(k).Elem() - t := v.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - f := kernel32.NewProc(name) - v.Field(i).Set(reflect.ValueOf(k.Wrap(f))) - } - return k -} - -func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc { - return func(args ...uintptr) error { - var r0 uintptr - var e1 syscall.Errno - size := uintptr(len(args)) - if len(args) <= 3 { - buf := make([]uintptr, 3) - copy(buf, args) - r0, _, e1 = syscall.Syscall(p.Addr(), size, - buf[0], buf[1], buf[2]) - } else { - buf := make([]uintptr, 6) - copy(buf, args) - r0, _, e1 = syscall.Syscall6(p.Addr(), size, - buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], - ) - } - - if int(r0) == 0 { - if e1 != 0 { - return error(e1) - } else { - return syscall.EINVAL - } - } - return nil - } - -} - -func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) { - t := new(_CONSOLE_SCREEN_BUFFER_INFO) - err := kernel.GetConsoleScreenBufferInfo( - stdout, - uintptr(unsafe.Pointer(t)), - ) - return t, err -} - -func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) { - t := new(_CONSOLE_CURSOR_INFO) - err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t))) - return t, err -} - -func SetConsoleCursorPosition(c *_COORD) error { - return kernel.SetConsoleCursorPosition(stdout, c.ptr()) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md deleted file mode 100644 index 1cade6ce..00000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go deleted file mode 100644 index b4800567..00000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ /dev/null @@ -1,14 +0,0 @@ -package md2man - -import ( - "github.com/russross/blackfriday/v2" -) - -// Render converts a markdown document into a roff formatted document. -func Render(doc []byte) []byte { - renderer := NewRoffRenderer() - - return blackfriday.Run(doc, - []blackfriday.Option{blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions())}...) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go deleted file mode 100644 index 0668a66c..00000000 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ /dev/null @@ -1,345 +0,0 @@ -package md2man - -import ( - "fmt" - "io" - "os" - "strings" - - "github.com/russross/blackfriday/v2" -) - -// roffRenderer implements the blackfriday.Renderer interface for creating -// roff format (manpages) from markdown text -type roffRenderer struct { - extensions blackfriday.Extensions - listCounters []int - firstHeader bool - defineTerm bool - listDepth int -} - -const ( - titleHeader = ".TH " - topLevelHeader = "\n\n.SH " - secondLevelHdr = "\n.SH " - otherHeader = "\n.SS " - crTag = "\n" - emphTag = "\\fI" - emphCloseTag = "\\fP" - strongTag = "\\fB" - strongCloseTag = "\\fP" - breakTag = "\n.br\n" - paraTag = "\n.PP\n" - hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" - linkTag = "\n\\[la]" - linkCloseTag = "\\[ra]" - codespanTag = "\\fB\\fC" - codespanCloseTag = "\\fR" - codeTag = "\n.PP\n.RS\n\n.nf\n" - codeCloseTag = "\n.fi\n.RE\n" - quoteTag = "\n.PP\n.RS\n" - quoteCloseTag = "\n.RE\n" - listTag = "\n.RS\n" - listCloseTag = "\n.RE\n" - arglistTag = "\n.TP\n" - tableStart = "\n.TS\nallbox;\n" - tableEnd = ".TE\n" - tableCellStart = "T{\n" - tableCellEnd = "\nT}\n" -) - -// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents -// from markdown -func NewRoffRenderer() *roffRenderer { // nolint: golint - var extensions blackfriday.Extensions - - extensions |= blackfriday.NoIntraEmphasis - extensions |= blackfriday.Tables - extensions |= blackfriday.FencedCode - extensions |= blackfriday.SpaceHeadings - extensions |= blackfriday.Footnotes - extensions |= blackfriday.Titleblock - extensions |= blackfriday.DefinitionLists - return &roffRenderer{ - extensions: extensions, - } -} - -// GetExtensions returns the list of extensions used by this renderer implementation -func (r *roffRenderer) GetExtensions() blackfriday.Extensions { - return r.extensions -} - -// RenderHeader handles outputting the header at document start -func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { - // disable hyphenation - out(w, ".nh\n") -} - -// RenderFooter handles outputting the footer at the document end; the roff -// renderer has no footer information -func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { -} - -// RenderNode is called for each node in a markdown document; based on the node -// type the equivalent roff output is sent to the writer -func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - - var walkAction = blackfriday.GoToNext - - switch node.Type { - case blackfriday.Text: - r.handleText(w, node, entering) - case blackfriday.Softbreak: - out(w, crTag) - case blackfriday.Hardbreak: - out(w, breakTag) - case blackfriday.Emph: - if entering { - out(w, emphTag) - } else { - out(w, emphCloseTag) - } - case blackfriday.Strong: - if entering { - out(w, strongTag) - } else { - out(w, strongCloseTag) - } - case blackfriday.Link: - if !entering { - out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) - } - case blackfriday.Image: - // ignore images - walkAction = blackfriday.SkipChildren - case blackfriday.Code: - out(w, codespanTag) - escapeSpecialChars(w, node.Literal) - out(w, codespanCloseTag) - case blackfriday.Document: - break - case blackfriday.Paragraph: - // roff .PP markers break lists - if r.listDepth > 0 { - return blackfriday.GoToNext - } - if entering { - out(w, paraTag) - } else { - out(w, crTag) - } - case blackfriday.BlockQuote: - if entering { - out(w, quoteTag) - } else { - out(w, quoteCloseTag) - } - case blackfriday.Heading: - r.handleHeading(w, node, entering) - case blackfriday.HorizontalRule: - out(w, hruleTag) - case blackfriday.List: - r.handleList(w, node, entering) - case blackfriday.Item: - r.handleItem(w, node, entering) - case blackfriday.CodeBlock: - out(w, codeTag) - escapeSpecialChars(w, node.Literal) - out(w, codeCloseTag) - case blackfriday.Table: - r.handleTable(w, node, entering) - case blackfriday.TableCell: - r.handleTableCell(w, node, entering) - case blackfriday.TableHead: - case blackfriday.TableBody: - case blackfriday.TableRow: - // no action as cell entries do all the nroff formatting - return blackfriday.GoToNext - default: - fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) - } - return walkAction -} - -func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - // handle special roff table cell text encapsulation - if node.Parent.Type == blackfriday.TableCell { - if len(node.Literal) > 30 { - start = tableCellStart - end = tableCellEnd - } else { - // end rows that aren't terminated by "tableCellEnd" with a cr if end of row - if node.Parent.Next == nil && !node.Parent.IsHeader { - end = crTag - } - } - } - out(w, start) - escapeSpecialChars(w, node.Literal) - out(w, end) -} - -func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - switch node.Level { - case 1: - if !r.firstHeader { - out(w, titleHeader) - r.firstHeader = true - break - } - out(w, topLevelHeader) - case 2: - out(w, secondLevelHdr) - default: - out(w, otherHeader) - } - } -} - -func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { - openTag := listTag - closeTag := listCloseTag - if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // tags for definition lists handled within Item node - openTag = "" - closeTag = "" - } - if entering { - r.listDepth++ - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = append(r.listCounters, 1) - } - out(w, openTag) - } else { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - r.listCounters = r.listCounters[:len(r.listCounters)-1] - } - out(w, closeTag) - r.listDepth-- - } -} - -func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - if node.ListFlags&blackfriday.ListTypeOrdered != 0 { - out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) - r.listCounters[len(r.listCounters)-1]++ - } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { - // state machine for handling terms and following definitions - // since blackfriday does not distinguish them properly, nor - // does it seperate them into separate lists as it should - if !r.defineTerm { - out(w, arglistTag) - r.defineTerm = true - } else { - r.defineTerm = false - } - } else { - out(w, ".IP \\(bu 2\n") - } - } else { - out(w, "\n") - } -} - -func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { - if entering { - out(w, tableStart) - //call walker to count cells (and rows?) so format section can be produced - columns := countColumns(node) - out(w, strings.Repeat("l ", columns)+"\n") - out(w, strings.Repeat("l ", columns)+".\n") - } else { - out(w, tableEnd) - } -} - -func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { - var ( - start, end string - ) - if node.IsHeader { - start = codespanTag - end = codespanCloseTag - } - if entering { - if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { - out(w, "\t"+start) - } else { - out(w, start) - } - } else { - // need to carriage return if we are at the end of the header row - if node.IsHeader && node.Next == nil { - end = end + crTag - } - out(w, end) - } -} - -// because roff format requires knowing the column count before outputting any table -// data we need to walk a table tree and count the columns -func countColumns(node *blackfriday.Node) int { - var columns int - - node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - switch node.Type { - case blackfriday.TableRow: - if !entering { - return blackfriday.Terminate - } - case blackfriday.TableCell: - if entering { - columns++ - } - default: - } - return blackfriday.GoToNext - }) - return columns -} - -func out(w io.Writer, output string) { - io.WriteString(w, output) // nolint: errcheck -} - -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - -func escapeSpecialChars(w io.Writer, text []byte) { - for i := 0; i < len(text); i++ { - // escape initial apostrophe or period - if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { - out(w, "\\&") - } - - // directly copy normal characters - org := i - - for i < len(text) && !needsBackslash(text[i]) { - i++ - } - if i > org { - w.Write(text[org:i]) // nolint: errcheck - } - - // escape a character - if i >= len(text) { - break - } - - w.Write([]byte{'\\', text[i]}) // nolint: errcheck - } -} diff --git a/vendor/github.com/dgraph-io/badger/.deepsource.toml b/vendor/github.com/dgraph-io/badger/.deepsource.toml deleted file mode 100644 index 266045f0..00000000 --- a/vendor/github.com/dgraph-io/badger/.deepsource.toml +++ /dev/null @@ -1,18 +0,0 @@ -version = 1 - -test_patterns = [ - 'integration/testgc/**', - '**/*_test.go' -] - -exclude_patterns = [ - -] - -[[analyzers]] -name = 'go' -enabled = true - - - [analyzers.meta] - import_path = 'github.com/dgraph-io/badger' diff --git a/vendor/github.com/dgraph-io/badger/.gitignore b/vendor/github.com/dgraph-io/badger/.gitignore deleted file mode 100644 index e3efdf58..00000000 --- a/vendor/github.com/dgraph-io/badger/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -p/ -badger-test*/ diff --git a/vendor/github.com/dgraph-io/badger/.golangci.yml b/vendor/github.com/dgraph-io/badger/.golangci.yml deleted file mode 100644 index fecb8644..00000000 --- a/vendor/github.com/dgraph-io/badger/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -run: - tests: false - -linters-settings: - lll: - line-length: 100 - -linters: - disable-all: true - enable: - - errcheck - - ineffassign - - gas - - gofmt - - golint - - gosimple - - govet - - lll - - varcheck - - unused - -issues: - exclude-rules: - - linters: - - gosec - text: "G404: " - \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/.travis.yml b/vendor/github.com/dgraph-io/badger/.travis.yml deleted file mode 100644 index ea05101a..00000000 --- a/vendor/github.com/dgraph-io/badger/.travis.yml +++ /dev/null @@ -1,45 +0,0 @@ -language: go - -go: - - "1.12" - - "1.13" - - tip -os: - - osx -env: - jobs: - - GOARCH=386 - - GOARCH=amd64 - global: - - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8= - - -jobs: - allow_failures: - - go: tip - exclude: - # Exclude builds for 386 architecture on 1.12 and tip - # Since we don't want it to run for 32 bit - - go: "1.12" - env: GOARCH=386 - - go: tip - env: GOARCH=386 - -notifications: - email: false - slack: - secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk= - -script: >- - if [ $TRAVIS_OS_NAME = "linux" ] && [ $go_32 ]; then - uname -a - GOOS=linux GOARCH=arm go test -v ./... - # Another round of tests after turning off mmap. - GOOS=linux GOARCH=arm go test -v -vlog_mmap=false github.com/dgraph-io/badger - else - go test -v ./... - # Another round of tests after turning off mmap. - go test -v -vlog_mmap=false github.com/dgraph-io/badger - # Cross-compile for Plan 9 - GOOS=plan9 go build ./... - fi diff --git a/vendor/github.com/dgraph-io/badger/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/CHANGELOG.md deleted file mode 100644 index fce00ab5..00000000 --- a/vendor/github.com/dgraph-io/badger/CHANGELOG.md +++ /dev/null @@ -1,270 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project adheres to [Serialization Versioning](VERSIONING.md). - -## [Unreleased] - -## [1.6.2] - 2020-09-10 - -### Fixed - - Fix Sequence generates duplicate values (#1281) - - Ensure `bitValuePointer` flag is cleared for LSM entry values written to LSM (#1313) - - Confirm `badgerMove` entry required before rewrite (#1302) - - Drop move keys when its key prefix is dropped (#1331) - - Compaction: Expired keys and delete markers are never purged (#1354) - - Restore: Account for value size as well (#1358) - - GC: Consider size of value while rewriting (#1357) - - Rework DB.DropPrefix (#1381) - - Update head while replaying value log (#1372) - - Remove vlog file if bootstrap, syncDir or mmap fails (#1434) - - Levels: Compaction incorrectly drops some delete markers (#1422) - - Fix(replay) - Update head for LSM entries also (#1456) - - Fix(Backup/Restore): Keep all versions (#1462) - - Fix build on Plan 9 (#1451) - -## [1.6.1] - 2020-03-26 - -### New APIs - - Badger.DB - - NewWriteBatchAt (#948) - - Badger.Options - - WithEventLogging (#1035) - - WithVerifyValueChecksum (#1052) - - WithBypassLockGuard (#1243) - -### Features - - Support checksum verification for values read from vlog (#1052) - - Add EventLogging option (#1035) - - Support WriteBatch API in managed mode (#948) - - Add support for watching nil prefix in Subscribe API (#1246) - -### Fixed - - Initialize vlog before starting compactions in db.Open (#1226) - - Fix int overflow for 32bit (#1216) - - Remove the 'this entry should've caught' log from value.go (#1170) - - Fix merge iterator duplicates issue (#1157) - - Fix segmentation fault in vlog.Read (header.Decode) (#1150) - - Fix VerifyValueChecksum checks (#1138) - - Fix windows dataloss issue (#1134) - - Fix request increment ref bug (#1121) - - Limit manifest's change set size (#1119) - - Fix deadlock in discard stats (#1070) - - Acquire lock before unmapping vlog files (#1050) - - Set move key's expiresAt for keys with TTL (#1006) - - Fix deadlock when flushing discard stats. (#976) - - Fix table.Smallest/Biggest and iterator Prefix bug (#997) - - Fix boundaries on GC batch size (#987) - - Lock log file before munmap (#949) - - VlogSize to store correct directory name to expvar.Map (#956) - - Fix transaction too big issue in restore (#957) - - Fix race condition in updateDiscardStats (#973) - - Cast results of len to uint32 to fix compilation in i386 arch. (#961) - - Drop discard stats if we can't unmarshal it (#936) - - Open all vlog files in RDWR mode (#923) - - Fix race condition in flushDiscardStats function (#921) - - Ensure rewrite in vlog is within transactional limits (#911) - - Fix prefix bug in key iterator and allow all versions (#950) - - Fix discard stats moved by GC bug (#929) - -### Performance - - Use fastRand instead of locked-rand in skiplist (#1173) - - Fix checkOverlap in compaction (#1166) - - Optimize createTable in stream_writer.go (#1132) - - Add capacity to slice creation when capacity is known (#1103) - - Introduce fast merge iterator (#1080) - - Introduce StreamDone in Stream Writer (#1061) - - Flush vlog buffer if it grows beyond threshold (#1067) - - Binary search based table picker (#983) - - Making the stream writer APIs goroutine-safe (#959) - - Replace FarmHash with AESHash for Oracle conflicts (#952) - - Change file picking strategy in compaction (#894) - - Use trie for prefix matching (#851) - - Fix busy-wait loop in Watermark (#920) - - -## [1.6.0] - 2019-07-01 - -This is a release including almost 200 commits, so expect many changes - some of them -not backward compatible. - -Regarding backward compatibility in Badger versions, you might be interested on reading -[VERSIONING.md](VERSIONING.md). - -_Note_: The hashes in parentheses correspond to the commits that impacted the given feature. - -### New APIs - -- badger.DB - - DropPrefix (291295e) - - Flatten (7e41bba) - - KeySplits (4751ef1) - - MaxBatchCount (b65e2a3) - - MaxBatchSize (b65e2a3) - - PrintKeyValueHistogram (fd59907) - - Subscribe (26128a7) - - Sync (851e462) - -- badger.DefaultOptions() and badger.LSMOnlyOptions() (91ce687) - - badger.Options.WithX methods - -- badger.Entry (e9447c9) - - NewEntry - - WithMeta - - WithDiscard - - WithTTL - -- badger.Item - - KeySize (fd59907) - - ValueSize (5242a99) - -- badger.IteratorOptions - - PickTable (7d46029, 49a49e3) - - Prefix (7d46029) - -- badger.Logger (fbb2778) - -- badger.Options - - CompactL0OnClose (7e41bba) - - Logger (3f66663) - - LogRotatesToFlush (2237832) - -- badger.Stream (14cbd89, 3258067) -- badger.StreamWriter (7116e16) -- badger.TableInfo.KeyCount (fd59907) -- badger.TableManifest (2017987) -- badger.Tx.NewKeyIterator (49a49e3) -- badger.WriteBatch (6daccf9, 7e78e80) - -### Modified APIs - -#### Breaking changes: - -- badger.DefaultOptions and badger.LSMOnlyOptions are now functions rather than variables (91ce687) -- badger.Item.Value now receives a function that returns an error (439fd46) -- badger.Txn.Commit doesn't receive any params now (6daccf9) -- badger.DB.Tables now receives a boolean (76b5341) - -#### Not breaking changes: - -- badger.LSMOptions changed values (799c33f) -- badger.DB.NewIterator now allows multiple iterators per RO txn (41d9656) -- badger.Options.TableLoadingMode's new default is options.MemoryMap (6b97bac) - -### Removed APIs - -- badger.ManagedDB (d22c0e8) -- badger.Options.DoNotCompact (7e41bba) -- badger.Txn.SetWithX (e9447c9) - -### Tools: - -- badger bank disect (13db058) -- badger bank test (13db058) --mmap (03870e3) -- badger fill (7e41bba) -- badger flatten (7e41bba) -- badger info --histogram (fd59907) --history --lookup --show-keys --show-meta --with-prefix (09e9b63) --show-internal (fb2eed9) -- badger benchmark read (239041e) -- badger benchmark write (6d3b67d) - -## [1.5.5] - 2019-06-20 - -* Introduce support for Go Modules - -## [1.5.3] - 2018-07-11 -Bug Fixes: -* Fix a panic caused due to item.vptr not copying over vs.Value, when looking - for a move key. - -## [1.5.2] - 2018-06-19 -Bug Fixes: -* Fix the way move key gets generated. -* If a transaction has unclosed, or multiple iterators running simultaneously, - throw a panic. Every iterator must be properly closed. At any point in time, - only one iterator per transaction can be running. This is to avoid bugs in a - transaction data structure which is thread unsafe. - -* *Warning: This change might cause panics in user code. Fix is to properly - close your iterators, and only have one running at a time per transaction.* - -## [1.5.1] - 2018-06-04 -Bug Fixes: -* Fix for infinite yieldItemValue recursion. #503 -* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f -* Use file size based window size for sampling, instead of fixing it to 10MB. #501 - -Cleanup: -* Clarify comments and documentation. -* Move badger tool one directory level up. - -## [1.5.0] - 2018-05-08 -* Introduce `NumVersionsToKeep` option. This option is used to discard many - versions of the same key, which saves space. -* Add a new `SetWithDiscard` method, which would indicate that all the older - versions of the key are now invalid. Those versions would be discarded during - compactions. -* Value log GC moves are now bound to another keyspace to ensure latest versions - of data are always at the top in LSM tree. -* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per - value log file. This helps bound the time it takes to garbage collect one - file. - -## [1.4.0] - 2018-05-04 -* Make mmap-ing of value log optional. -* Run GC multiple times, based on recorded discard statistics. -* Add MergeOperator. -* Force compact L0 on clsoe (#439). -* Add truncate option to warn about data loss (#452). -* Discard key versions during compaction (#464). -* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB. - -Bug fix: -* (Temporary) Check max version across all tables in Get (removed in next - release). -* Update commit and read ts while loading from backup. -* Ensure all transaction entries are part of the same value log file. -* On commit, run unlock callbacks before doing writes (#413). -* Wait for goroutines to finish before closing iterators (#421). - -## [1.3.0] - 2017-12-12 -* Add `DB.NextSequence()` method to generate monotonically increasing integer - sequences. -* Add `DB.Size()` method to return the size of LSM and value log files. -* Tweaked mmap code to make Windows 32-bit builds work. -* Tweaked build tags on some files to make iOS builds work. -* Fix `DB.PurgeOlderVersions()` to not violate some constraints. - -## [1.2.0] - 2017-11-30 -* Expose a `Txn.SetEntry()` method to allow setting the key-value pair - and all the metadata at the same time. - -## [1.1.1] - 2017-11-28 -* Fix bug where txn.Get was returing key deleted in same transaction. -* Fix race condition while decrementing reference in oracle. -* Update doneCommit in the callback for CommitAsync. -* Iterator see writes of current txn. - -## [1.1.0] - 2017-11-13 -* Create Badger directory if it does not exist when `badger.Open` is called. -* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations -* Fixed 64-bit alignment issues to make Badger run on Arm v7 - -## [1.0.1] - 2017-11-06 -* Fix an uint16 overflow when resizing key slice - -[Unreleased]: https://github.com/dgraph-io/badger/compare/v1.6.2...HEAD -[1.6.2]: https://github.com/dgraph-io/badger/compare/v1.6.1...v1.6.2 -[1.6.1]: https://github.com/dgraph-io/badger/compare/v1.6.0...v1.6.1 -[1.6.0]: https://github.com/dgraph-io/badger/compare/v1.5.5...v1.6.0 -[1.5.5]: https://github.com/dgraph-io/badger/compare/v1.5.3...v1.5.5 -[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3 -[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2 -[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1 -[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0 -[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0 -[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0 -[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1 -[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0 -[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1 diff --git a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md deleted file mode 100644 index bf7bbc29..00000000 --- a/vendor/github.com/dgraph-io/badger/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,5 +0,0 @@ -# Code of Conduct - -Our Code of Conduct can be found here: - -https://dgraph.io/conduct diff --git a/vendor/github.com/dgraph-io/badger/LICENSE b/vendor/github.com/dgraph-io/badger/LICENSE deleted file mode 100644 index d9a10c0d..00000000 --- a/vendor/github.com/dgraph-io/badger/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/dgraph-io/badger/README.md b/vendor/github.com/dgraph-io/badger/README.md deleted file mode 100644 index 535f2a0d..00000000 --- a/vendor/github.com/dgraph-io/badger/README.md +++ /dev/null @@ -1,898 +0,0 @@ -# BadgerDB [![GoDoc](https://godoc.org/github.com/dgraph-io/badger?status.svg)](https://godoc.org/github.com/dgraph-io/badger) [![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger)](https://goreportcard.com/report/github.com/dgraph-io/badger) [![Sourcegraph](https://sourcegraph.com/github.com/dgraph-io/badger/-/badge.svg)](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [![Build Status](https://teamcity.dgraph.io/guestAuth/app/rest/builds/buildType:(id:Badger_UnitTests)/statusIcon.svg)](https://teamcity.dgraph.io/viewLog.html?buildTypeId=Badger_UnitTests&buildId=lastFinished&guest=1) ![Appveyor](https://ci.appveyor.com/api/projects/status/github/dgraph-io/badger?branch=master&svg=true) [![Coverage Status](https://coveralls.io/repos/github/dgraph-io/badger/badge.svg?branch=master)](https://coveralls.io/github/dgraph-io/badger?branch=master) - -![Badger mascot](images/diggy-shadow.png) - -BadgerDB is an embeddable, persistent and fast key-value (KV) database written -in pure Go. It is the underlying database for [Dgraph](https://dgraph.io), a -fast, distributed graph database. It's meant to be a performant alternative to -non-Go-based key-value stores like RocksDB. - -## Project Status [Jun 26, 2019] - -Badger is stable and is being used to serve data sets worth hundreds of -terabytes. Badger supports concurrent ACID transactions with serializable -snapshot isolation (SSI) guarantees. A Jepsen-style bank test runs nightly for -8h, with `--race` flag and ensures the maintenance of transactional guarantees. -Badger has also been tested to work with filesystem level anomalies, to ensure -persistence and consistency. - -Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible -with v1.0 is v1.6.0. - -Badger v2.0, a new release coming up very soon will use a new storage format which won't -be compatible with all of the v1.x. The [Changelog] is kept fairly up-to-date. - -For more details on our version naming schema please read [Choosing a version](#choosing-a-version). - -[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md - -## Table of Contents - * [Getting Started](#getting-started) - + [Installing](#installing) - - [Choosing a version](#choosing-a-version) - + [Opening a database](#opening-a-database) - + [Transactions](#transactions) - - [Read-only transactions](#read-only-transactions) - - [Read-write transactions](#read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - + [Using key/value pairs](#using-keyvalue-pairs) - + [Monotonically increasing integers](#monotonically-increasing-integers) - * [Merge Operations](#merge-operations) - + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys) - + [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Key-only iteration](#key-only-iteration) - + [Stream](#stream) - + [Garbage Collection](#garbage-collection) - + [Database backup](#database-backup) - + [Memory usage](#memory-usage) - + [Statistics](#statistics) - * [Resources](#resources) - + [Blog Posts](#blog-posts) - * [Contact](#contact) - * [Design](#design) - + [Comparisons](#comparisons) - + [Benchmarks](#benchmarks) - * [Other Projects Using Badger](#other-projects-using-badger) - * [Frequently Asked Questions](#frequently-asked-questions) - -## Getting Started - -### Installing -To start using Badger, install Go 1.11 or above and run `go get`: - -```sh -$ go get github.com/dgraph-io/badger/... -``` - -This will retrieve the library and install the `badger` command line -utility into your `$GOBIN` path. - -#### Choosing a version - -BadgerDB is a pretty special package from the point of view that the most important change we can -make to it is not on its API but rather on how data is stored on disk. - -This is why we follow a version naming schema that differs from Semantic Versioning. - -- New major versions are released when the data format on disk changes in an incompatible way. -- New minor versions are released whenever the API changes but data compatibility is maintained. - Note that the changes on the API could be backward-incompatible - unlike Semantic Versioning. -- New patch versions are released when there's no changes to the data format nor the API. - -Following these rules: - -- v1.5.0 and v1.6.0 can be used on top of the same files without any concerns, as their major - version is the same, therefore the data format on disk is compatible. -- v1.6.0 and v2.0.0 are data incompatible as their major version implies, so files created with - v1.6.0 will need to be converted into the new format before they can be used by v2.0.0. - -For a longer explanation on the reasons behind using a new versioning naming schema, you can read -[VERSIONING.md](VERSIONING.md). - -### Opening a database -The top-level object in Badger is a `DB`. It represents multiple files on disk -in specific directories, which contain the data for a single database. - -To open your database, use the `badger.Open()` function, with the appropriate -options. The `Dir` and `ValueDir` options are mandatory and must be -specified by the client. They can be set to the same value to simplify things. - -```go -package main - -import ( - "log" - - badger "github.com/dgraph-io/badger" -) - -func main() { - // Open the Badger database located in the /tmp/badger directory. - // It will be created if it doesn't exist. - db, err := badger.Open(badger.DefaultOptions("/tmp/badger")) - if err != nil { - log.Fatal(err) - } - defer db.Close() -  // Your code here… -} -``` - -Please note that Badger obtains a lock on the directories so multiple processes -cannot open the same database at the same time. - -### Transactions - -#### Read-only transactions -To start a read-only transaction, you can use the `DB.View()` method: - -```go -err := db.View(func(txn *badger.Txn) error { -  // Your code here… -  return nil -}) -``` - -You cannot perform any writes or deletes within this transaction. Badger -ensures that you get a consistent view of the database within this closure. Any -writes that happen elsewhere after the transaction has started, will not be -seen by calls made within the closure. - -#### Read-write transactions -To start a read-write transaction, you can use the `DB.Update()` method: - -```go -err := db.Update(func(txn *badger.Txn) error { -  // Your code here… -  return nil -}) -``` - -All database operations are allowed inside a read-write transaction. - -Always check the returned error value. If you return an error -within your closure it will be passed through. - -An `ErrConflict` error will be reported in case of a conflict. Depending on the state -of your application, you have the option to retry the operation if you receive -this error. - -An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in -the transaction exceeds a certain limit. In that case, it is best to commit the -transaction and start a new transaction immediately. Here is an example (we are -not checking for errors in some places for simplicity): - -```go -updates := make(map[string]string) -txn := db.NewTransaction(true) -for k,v := range updates { - if err := txn.Set([]byte(k),[]byte(v)); err == badger.ErrTxnTooBig { - _ = txn.Commit() - txn = db.NewTransaction(true) - _ = txn.Set([]byte(k),[]byte(v)) - } -} -_ = txn.Commit() -``` - -#### Managing transactions manually -The `DB.View()` and `DB.Update()` methods are wrappers around the -`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of -read-only transactions). These helper methods will start the transaction, -execute a function, and then safely discard your transaction if an error is -returned. This is the recommended way to use Badger transactions. - -However, sometimes you may want to manually create and commit your -transactions. You can use the `DB.NewTransaction()` function directly, which -takes in a boolean argument to specify whether a read-write transaction is -required. For read-write transactions, it is necessary to call `Txn.Commit()` -to ensure the transaction is committed. For read-only transactions, calling -`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()` -internally to cleanup the transaction, so just calling `Txn.Commit()` is -sufficient for read-write transaction. However, if your code doesn’t call -`Txn.Commit()` for some reason (for e.g it returns prematurely with an error), -then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the -code below. - -```go -// Start a writable transaction. -txn := db.NewTransaction(true) -defer txn.Discard() - -// Use the transaction... -err := txn.Set([]byte("answer"), []byte("42")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := txn.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.NewTransaction()` is a boolean stating if the transaction -should be writable. - -Badger allows an optional callback to the `Txn.Commit()` method. Normally, the -callback can be set to `nil`, and the method will return after all the writes -have succeeded. However, if this callback is provided, the `Txn.Commit()` -method returns as soon as it has checked for any conflicts. The actual writing -to the disk happens asynchronously, and the callback is invoked once the -writing has finished, or an error has occurred. This can improve the throughput -of the application in some cases. But it also means that a transaction is not -durable until the callback has been invoked with a `nil` error value. - -### Using key/value pairs -To save a key/value pair, use the `Txn.Set()` method: - -```go -err := db.Update(func(txn *badger.Txn) error { - err := txn.Set([]byte("answer"), []byte("42")) - return err -}) -``` - -Key/Value pair can also be saved by first creating `Entry`, then setting this -`Entry` using `Txn.SetEntry()`. `Entry` also exposes methods to set properties -on it. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")) - err := txn.SetEntry(e) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"`. To retrieve this -value, we can use the `Txn.Get()` method: - -```go -err := db.View(func(txn *badger.Txn) error { - item, err := txn.Get([]byte("answer")) - handle(err) - - var valNot, valCopy []byte - err := item.Value(func(val []byte) error { - // This func with val would only be called if item.Value encounters no error. - - // Accessing val here is valid. - fmt.Printf("The answer is: %s\n", val) - - // Copying or parsing val is valid. - valCopy = append([]byte{}, val...) - - // Assigning val slice to another variable is NOT OK. - valNot = val // Do not do this. - return nil - }) - handle(err) - - // DO NOT access val here. It is the most common cause of bugs. - fmt.Printf("NEVER do this. %s\n", valNot) - - // You must copy it to use it outside item.Value(...). - fmt.Printf("The answer is: %s\n", valCopy) - - // Alternatively, you could also use item.ValueCopy(). - valCopy, err = item.ValueCopy(nil) - handle(err) - fmt.Printf("The answer is: %s\n", valCopy) - - return nil -}) -``` - -`Txn.Get()` returns `ErrKeyNotFound` if the value is not found. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - -Use the `Txn.Delete()` method to delete a key. - -### Monotonically increasing integers - -To get unique monotonically increasing integers with strong durability, you can -use the `DB.GetSequence` method. This method returns a `Sequence` object, which -is thread-safe and can be used concurrently via various goroutines. - -Badger would lease a range of integers to hand out from memory, with the -bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are -done is determined by this lease bandwidth and the frequency of `Next` -invocations. Setting a bandwidth too low would do more disk writes, setting it -too high would result in wasted integers if Badger is closed or crashes. -To avoid wasted integers, call `Release` before closing Badger. - -```go -seq, err := db.GetSequence(key, 1000) -defer seq.Release() -for { - num, err := seq.Next() -} -``` - -### Merge Operations -Badger provides support for ordered merge operations. You can define a func -of type `MergeFunc` which takes in an existing value, and a value to be -_merged_ with it. It returns a new value which is the result of the _merge_ -operation. All values are specified in byte arrays. For e.g., here is a merge -function (`add`) which appends a `[]byte` value to an existing `[]byte` value. - -```Go -// Merge function to append one byte slice to another -func add(originalValue, newValue []byte) []byte { - return append(originalValue, newValue...) -} -``` - -This function can then be passed to the `DB.GetMergeOperator()` method, along -with a key, and a duration value. The duration specifies how often the merge -function is run on values that have been added using the `MergeOperator.Add()` -method. - -`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key -associated with the merge operation. - -```Go -key := []byte("merge") - -m := db.GetMergeOperator(key, add, 200*time.Millisecond) -defer m.Stop() - -m.Add([]byte("A")) -m.Add([]byte("B")) -m.Add([]byte("C")) - -res, _ := m.Get() // res should have value ABC encoded -``` - -Example: Merge operator which increments a counter - -```Go -func uint64ToBytes(i uint64) []byte { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], i) - return buf[:] -} - -func bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// Merge function to add two uint64 numbers -func add(existing, new []byte) []byte { - return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new)) -} -``` -It can be used as -```Go -key := []byte("merge") - -m := db.GetMergeOperator(key, add, 200*time.Millisecond) -defer m.Stop() - -m.Add(uint64ToBytes(1)) -m.Add(uint64ToBytes(2)) -m.Add(uint64ToBytes(3)) - -res, _ := m.Get() // res should have value 6 encoded -``` - -### Setting Time To Live(TTL) and User Metadata on Keys -Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has -elapsed, the key will no longer be retrievable and will be eligible for garbage -collection. A TTL can be set as a `time.Duration` value using the `Entry.WithTTL()` -and `Txn.SetEntry()` API methods. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")).WithTTL(time.Hour) - err := txn.SetEntry(e) - return err -}) -``` - -An optional user metadata value can be set on each key. A user metadata value -is represented by a single byte. It can be used to set certain bits along -with the key to aid in interpreting or decoding the key-value pair. User -metadata can be set using `Entry.WithMeta()` and `Txn.SetEntry()` API methods. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)) - err := txn.SetEntry(e) - return err -}) -``` - -`Entry` APIs can be used to add the user metadata and TTL for same key. This `Entry` -then can be set using `Txn.SetEntry()`. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)).WithTTL(time.Hour) - err := txn.SetEntry(e) - return err -}) -``` - -### Iterating over keys -To iterate over keys, we can use an `Iterator`, which can be obtained using the -`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting -order. - - -```go -err := db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.PrefetchSize = 10 - it := txn.NewIterator(opts) - defer it.Close() - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - k := item.Key() - err := item.Value(func(v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - if err != nil { - return err - } - } - return nil -}) -``` - -The iterator allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -By default, Badger prefetches the values of the next 100 items. You can adjust -that with the `IteratorOptions.PrefetchSize` field. However, setting it to -a value higher than `GOMAXPROCS` (which we recommend to be 128 or higher) -shouldn’t give any additional benefits. You can also turn off the fetching of -values altogether. See section below on key-only iteration. - -#### Prefix scans -To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`: - -```go -db.View(func(txn *badger.Txn) error { - it := txn.NewIterator(badger.DefaultIteratorOptions) - defer it.Close() - prefix := []byte("1234") - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - item := it.Item() - k := item.Key() - err := item.Value(func(v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - if err != nil { - return err - } - } - return nil -}) -``` - -#### Key-only iteration -Badger supports a unique mode of iteration called _key-only_ iteration. It is -several order of magnitudes faster than regular iteration, because it involves -access to the LSM-tree only, which is usually resident entirely in RAM. To -enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues` -field to `false`. This can also be used to do sparse reads for selected keys -during an iteration, by calling `item.Value()` only when required. - -```go -err := db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - k := item.Key() - fmt.Printf("key=%s\n", k) - } - return nil -}) -``` - -### Stream -Badger provides a Stream framework, which concurrently iterates over all or a -portion of the DB, converting data into custom key-values, and streams it out -serially to be sent over network, written to disk, or even written back to -Badger. This is a lot faster way to iterate over Badger than using a single -Iterator. Stream supports Badger in both managed and normal mode. - -Stream uses the natural boundaries created by SSTables within the LSM tree, to -quickly generate key ranges. Each goroutine then picks a range and runs an -iterator to iterate over it. Each iterator iterates over all versions of values -and is created from the same transaction, thus working over a snapshot of the -DB. Every time a new key is encountered, it calls `ChooseKey(item)`, followed -by `KeyToList(key, itr)`. This allows a user to select or reject that key, and -if selected, convert the value versions into custom key-values. The goroutine -batches up 4MB worth of key-values, before sending it over to a channel. -Another goroutine further batches up data from this channel using *smart -batching* algorithm and calls `Send` serially. - -This framework is designed for high throughput key-value iteration, spreading -the work of iteration across many goroutines. `DB.Backup` uses this framework to -provide full and incremental backups quickly. Dgraph is a heavy user of this -framework. In fact, this framework was developed and used within Dgraph, before -getting ported over to Badger. - -```go -stream := db.NewStream() -// db.NewStreamAt(readTs) for managed mode. - -// -- Optional settings -stream.NumGo = 16 // Set number of goroutines to use for iteration. -stream.Prefix = []byte("some-prefix") // Leave nil for iteration over the whole DB. -stream.LogPrefix = "Badger.Streaming" // For identifying stream logs. Outputs to Logger. - -// ChooseKey is called concurrently for every key. If left nil, assumes true by default. -stream.ChooseKey = func(item *badger.Item) bool { - return bytes.HasSuffix(item.Key(), []byte("er")) -} - -// KeyToList is called concurrently for chosen keys. This can be used to convert -// Badger data into custom key-values. If nil, uses stream.ToList, a default -// implementation, which picks all valid key-values. -stream.KeyToList = nil - -// -- End of optional settings. - -// Send is called serially, while Stream.Orchestrate is running. -stream.Send = func(list *pb.KVList) error { - return proto.MarshalText(w, list) // Write to w. -} - -// Run the stream -if err := stream.Orchestrate(context.Background()); err != nil { - return err -} -// Done. -``` - -### Garbage Collection -Badger values need to be garbage collected, because of two reasons: - -* Badger keeps values separately from the LSM tree. This means that the compaction operations -that clean up the LSM tree do not touch the values at all. Values need to be cleaned up -separately. - -* Concurrent read/write transactions could leave behind multiple values for a single key, because they -are stored with different versions. These could accumulate, and take up unneeded space beyond the -time these older versions are needed. - -Badger relies on the client to perform garbage collection at a time of their choosing. It provides -the following method, which can be invoked at an appropriate time: - -* `DB.RunValueLogGC()`: This method is designed to do garbage collection while - Badger is online. Along with randomly picking a file, it uses statistics generated by the - LSM-tree compactions to pick files that are likely to lead to maximum space - reclamation. It is recommended to be called during periods of low activity in - your system, or periodically. One call would only result in removal of at max - one log file. As an optimization, you could also immediately re-run it whenever - it returns nil error (indicating a successful value log GC), as shown below. - - ```go - ticker := time.NewTicker(5 * time.Minute) - defer ticker.Stop() - for range ticker.C { - again: - err := db.RunValueLogGC(0.7) - if err == nil { - goto again - } - } - ``` - -* `DB.PurgeOlderVersions()`: This method is **DEPRECATED** since v1.5.0. Now, Badger's LSM tree automatically discards older/invalid versions of keys. - -**Note: The RunValueLogGC method would not garbage collect the latest value log.** - -### Database backup -There are two public API methods `DB.Backup()` and `DB.Load()` which can be -used to do online backups and restores. Badger v0.9 provides a CLI tool -`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin` -in your PATH to use this tool. - -The command below will create a version-agnostic backup of the database, to a -file `badger.bak` in the current working directory - -``` -badger backup --dir -``` - -To restore `badger.bak` in the current working directory to a new database: - -``` -badger restore --dir -``` - -See `badger --help` for more details. - -If you have a Badger database that was created using v0.8 (or below), you can -use the `badger_backup` tool provided in v0.8.1, and then restore it using the -command above to upgrade your database to work with the latest version. - -``` -badger_backup --dir --backup-file badger.bak -``` - -We recommend all users to use the `Backup` and `Restore` APIs and tools. However, -Badger is also rsync-friendly because all files are immutable, barring the -latest value log which is append-only. So, rsync can be used as rudimentary way -to perform a backup. In the following script, we repeat rsync to ensure that the -LSM tree remains consistent with the MANIFEST file while doing a full backup. - -``` -#!/bin/bash -set -o history -set -o histexpand -# Makes a complete copy of a Badger database directory. -# Repeat rsync if the MANIFEST and SSTables are updated. -rsync -avz --delete db/ dst -while !! | grep -q "(MANIFEST\|\.sst)$"; do :; done -``` - -### Memory usage -Badger's memory usage can be managed by tweaking several options available in -the `Options` struct that is passed in when opening the database using -`DB.Open`. - -- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the - default `options.MemoryMap`) to avoid memory-mapping log files. This can be - useful in environments with low RAM. -- Number of memtables (`Options.NumMemtables`) - - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and - `Options.NumLevelZeroTablesStall` accordingly. -- Number of concurrent compactions (`Options.NumCompactors`) -- Mode in which LSM tree is loaded (`Options.TableLoadingMode`) -- Size of table (`Options.MaxTableSize`) -- Size of value log file (`Options.ValueLogFileSize`) - -If you want to decrease the memory usage of Badger instance, tweak these -options (ideally one at a time) until you achieve the desired -memory usage. - -### Statistics -Badger records metrics using the [expvar] package, which is included in the Go -standard library. All the metrics are documented in [y/metrics.go][metrics] -file. - -`expvar` package adds a handler in to the default HTTP server (which has to be -started explicitly), and serves up the metrics at the `/debug/vars` endpoint. -These metrics can then be collected by a system like [Prometheus], to get -better visibility into what Badger is doing. - -[expvar]: https://golang.org/pkg/expvar/ -[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go -[Prometheus]: https://prometheus.io/ - -## Resources - -### Blog Posts -1. [Introducing Badger: A fast key-value store written natively in -Go](https://open.dgraph.io/post/badger/) -2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/) -3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/) -4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) - -## Design -Badger was written with these design goals in mind: - -- Write a key-value database in pure Go. -- Use latest research to build the fastest KV database for data sets spanning terabytes. -- Optimize for SSDs. - -Badger’s design is based on a paper titled _[WiscKey: Separating Keys from -Values in SSD-conscious Storage][wisckey]_. - -[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf - -### Comparisons -| Feature | Badger | RocksDB | BoltDB | -| ------- | ------ | ------- | ------ | -| Design | LSM tree with value log | LSM tree only | B+ tree | -| High Read throughput | Yes | No | Yes | -| High Write throughput | Yes | Yes | No | -| Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No | -| Embeddable | Yes | Yes | Yes | -| Sorted KV access | Yes | Yes | Yes | -| Pure Go (no Cgo) | Yes | No | Yes | -| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID | -| Snapshots | Yes | Yes | Yes | -| TTL support | Yes | Yes | No | -| 3D access (key-value-version) | Yes4 | No | No | - -1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big -wins with separating values from keys, significantly reducing the write -amplification compared to a typical LSM tree. - -2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks. -As such RocksDB's design isn't aimed at SSDs. - -3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) - -4 Badger provides direct access to value versions via its Iterator API. -Users can also specify how many versions to keep per key via Options. - -### Benchmarks -We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The -benchmarking code, and the detailed logs for the benchmarks can be found in the -[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked -above). - -[badger-bench]: https://github.com/dgraph-io/badger-bench - -## Other Projects Using Badger -Below is a list of known projects that use Badger: - -* [0-stor](https://github.com/zero-os/0-stor) - Single device object store. -* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database. -* [Jaeger](https://github.com/jaegertracing/jaeger) - Distributed tracing platform. -* [TalariaDB](https://github.com/grab/talaria) - Distributed, low latency time-series database. -* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics. -* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue. -* [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger. -* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol. -* [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go. -* [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger. -* [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go. -* [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol. -* [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft. -* [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine. -* [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications. -* [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain. -* [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language. -* [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots. -* [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform. -* [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains. -* [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp. -* [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications. -* [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects. -* [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger -* [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB -* [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger. -* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft. -* [Volument](https://volument.com/) - A new take on website analytics backed by Badger. -* [Sloop](https://github.com/salesforce/sloop) - Kubernetes History Visualization. -* [KVdb](https://kvdb.io/) - Hosted key-value store and serverless platform built on top of Badger. -* [Dkron](https://dkron.io/) - Distributed, fault tolerant job scheduling system. - -If you are using Badger in a project please send a pull request to add it to the list. - -## Frequently Asked Questions -### My writes are getting stuck. Why? - -**Update: With the new `Value(func(v []byte))` API, this deadlock can no longer -happen.** - -The following is true for users on Badger v1.x. - -This can happen if a long running iteration with `Prefetch` is set to false, but -a `Item::Value` call is made internally in the loop. That causes Badger to -acquire read locks over the value log files to avoid value log GC removing the -file from underneath. As a side effect, this also blocks a new value log GC -file from being created, when the value log file boundary is hit. - -Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293) -and [#315](https://github.com/dgraph-io/badger/issues/315). - -There are multiple workarounds during iteration: - -1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value. -1. Set `Prefetch` to true. Badger would then copy over the value and release the - file lock immediately. -1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only - iteration. This might be useful if you just want to delete a lot of keys. -1. Do the writes in a separate transaction after the reads. - -### My writes are really slow. Why? - -Are you creating a new transaction for every single key update, and waiting for -it to `Commit` fully before creating a new one? This will lead to very low -throughput. - -We have created `WriteBatch` API which provides a way to batch up -many updates into a single transaction and `Commit` that transaction using -callbacks to avoid blocking. This amortizes the cost of a transaction really -well, and provides the most efficient way to do bulk writes. - -```go -wb := db.NewWriteBatch() -defer wb.Cancel() - -for i := 0; i < N; i++ { - err := wb.Set(key(i), value(i), 0) // Will create txns as needed. - handle(err) -} -handle(wb.Flush()) // Wait for all txns to finish. -``` - -Note that `WriteBatch` API does not allow any reads. For read-modify-write -workloads, you should be using the `Transaction` API. - -### I don't see any disk writes. Why? - -If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log -and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they -get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if -you're doing a few writes and then checking, you might not see anything on disk. Once you `Close` -the database, you'll see these writes on disk. - -### Reverse iteration doesn't give me the right results. - -Just like forward iteration goes to the first key which is equal or greater than the SEEK key, reverse iteration goes to the first key which is equal or lesser than the SEEK key. Therefore, SEEK key would not be part of the results. You can typically add a `0xff` byte as a suffix to the SEEK key to include it in the results. See the following issues: [#436](https://github.com/dgraph-io/badger/issues/436) and [#347](https://github.com/dgraph-io/badger/issues/347). - -### Which instances should I use for Badger? - -We recommend using instances which provide local SSD storage, without any limit -on the maximum IOPS. In AWS, these are storage optimized instances like i3. They -provide local SSDs which clock 100K IOPS over 4KB blocks easily. - -### I'm getting a closed channel error. Why? - -``` -panic: close of closed channel -panic: send on closed channel -``` - -If you're seeing panics like above, this would be because you're operating on a closed DB. This can happen, if you call `Close()` before sending a write, or multiple times. You should ensure that you only call `Close()` once, and all your read/write operations finish before closing. - -### Are there any Go specific settings that I should use? - -We *highly* recommend setting a high number for `GOMAXPROCS`, which allows Go to -observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set -it to 128. For more details, [see this -thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion). - -### Are there any Linux specific settings that I should use? - -We recommend setting `max file descriptors` to a high number depending upon the expected size of -your data. On Linux and Mac, you can check the file descriptor limit with `ulimit -n -H` for the -hard limit and `ulimit -n -S` for the soft limit. A soft limit of `65535` is a good lower bound. -You can adjust the limit as needed. - -### I see "manifest has unsupported version: X (we support Y)" error. - -This error means you have a badger directory which was created by an older version of badger and -you're trying to open in a newer version of badger. The underlying data format can change across -badger versions and users will have to migrate their data directory. -Badger data can be migrated from version X of badger to version Y of badger by following the steps -listed below. -Assume you were on badger v1.6.0 and you wish to migrate to v2.0.0 version. -1. Install badger version v1.6.0 - - `cd $GOPATH/src/github.com/dgraph-io/badger` - - `git checkout v1.6.0` - - `cd badger && go install` - - This should install the old badger binary in your $GOBIN. -2. Create Backup - - `badger backup --dir path/to/badger/directory -f badger.backup` -3. Install badger version v2.0.0 - - `cd $GOPATH/src/github.com/dgraph-io/badger` - - `git checkout v2.0.0` - - `cd badger && go install` - - This should install new badger binary in your $GOBIN -4. Install badger version v2.0.0 - - `badger restore --dir path/to/new/badger/directory -f badger.backup` - - This will create a new directory on `path/to/new/badger/directory` and add badger data in - newer format to it. - -NOTE - The above steps shouldn't cause any data loss but please ensure the new data is valid before -deleting the old badger directory. -## Contact -- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. -- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests. -- Join [![Slack Status](http://slack.dgraph.io/badge.svg)](http://slack.dgraph.io). -- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). - diff --git a/vendor/github.com/dgraph-io/badger/VERSIONING.md b/vendor/github.com/dgraph-io/badger/VERSIONING.md deleted file mode 100644 index a890a36f..00000000 --- a/vendor/github.com/dgraph-io/badger/VERSIONING.md +++ /dev/null @@ -1,47 +0,0 @@ -# Serialization Versioning: Semantic Versioning for databases - -Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as -a way to decide how to name software versions. The whole concept is very well summarized on -semver.org with the following lines: - -> Given a version number MAJOR.MINOR.PATCH, increment the: -> -> 1. MAJOR version when you make incompatible API changes, -> 2. MINOR version when you add functionality in a backwards-compatible manner, and -> 3. PATCH version when you make backwards-compatible bug fixes. -> -> Additional labels for pre-release and build metadata are available as extensions to the -> MAJOR.MINOR.PATCH format. - -Unfortunately, API changes are not the most important changes for libraries that serialize data for -later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to -handle than change to the data format used to store data on disk. - -## Serialization Version specification - -Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them -MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified: - -Given a version number MAJOR.MINOR.PATCH, increment the: - -- MAJOR version when you make changes that require a transformation of the dataset before it can be -used again. -- MINOR version when old datasets are still readable but the API might have changed in -backwards-compatible or incompatible ways. -- PATCH version when you make backwards-compatible bug fixes. - -Additional labels for pre-release and build metadata are available as extensions to the -MAJOR.MINOR.PATCH format. - -Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your -existing dataset, and as such has to be carefully planned. Migrations in between different minor -versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once -your code compiles there's no need for any data migration. Lastly, changes in between two different -patch versions should never break your build or dataset. - -For more background on our decision to adopt Serialization Versioning, read the blog post -[Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on -[this comment on Dgraph's Discuss forum][discuss]. - -[blog]: https://blog.dgraph.io/post/serialization-versioning/ -[discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7 \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/appveyor.yml b/vendor/github.com/dgraph-io/badger/appveyor.yml deleted file mode 100644 index 36853e9d..00000000 --- a/vendor/github.com/dgraph-io/badger/appveyor.yml +++ /dev/null @@ -1,49 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Platform. -platform: x64 - -clone_folder: c:\gopath\src\github.com\dgraph-io\badger - -# Environment variables -environment: - GOVERSION: 1.12 - GOPATH: c:\gopath - GO111MODULE: on - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - - python --version - -# To run your custom scripts instead of automatic MSBuild -build_script: - # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 - - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' - - cd c:\gopath\src\github.com\dgraph-io\badger - - git branch - - go get -t ./... - -# To run your custom scripts instead of automatic tests -test_script: - # Unit tests - - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - - go test -v github.com/dgraph-io/badger/... - - go test -v -vlog_mmap=false github.com/dgraph-io/badger/... - - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - -notifications: - - provider: Email - to: - - pawan@dgraph.io - on_build_failure: true - on_build_status_changed: true -# to disable deployment -deploy: off - diff --git a/vendor/github.com/dgraph-io/badger/backup.go b/vendor/github.com/dgraph-io/badger/backup.go deleted file mode 100644 index 365668d0..00000000 --- a/vendor/github.com/dgraph-io/badger/backup.go +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "io" - - "github.com/dgraph-io/badger/pb" - "github.com/dgraph-io/badger/y" - "github.com/golang/protobuf/proto" -) - -// flushThreshold determines when a buffer will be flushed. When performing a -// backup/restore, the entries will be batched up until the total size of batch -// is more than flushThreshold or entry size (without the value size) is more -// than the maxBatchSize. -const flushThreshold = 100 << 20 - -// Backup is a wrapper function over Stream.Backup to generate full and incremental backups of the -// DB. For more control over how many goroutines are used to generate the backup, or if you wish to -// backup only a certain range of keys, use Stream.Backup directly. -func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) { - stream := db.NewStream() - stream.LogPrefix = "DB.Backup" - return stream.Backup(w, since) -} - -// Backup dumps a protobuf-encoded list of all entries in the database into the -// given writer, that are newer than the specified version. It returns a -// timestamp indicating when the entries were dumped which can be passed into a -// later invocation to generate an incremental dump, of entries that have been -// added/modified since the last invocation of Stream.Backup(). -// -// This can be used to backup the data in a database at a given point in time. -func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) { - stream.KeyToList = func(key []byte, itr *Iterator) (*pb.KVList, error) { - list := &pb.KVList{} - for ; itr.Valid(); itr.Next() { - item := itr.Item() - if !bytes.Equal(item.Key(), key) { - return list, nil - } - if item.Version() < since { - // Ignore versions less than given timestamp, or skip older - // versions of the given key. - return list, nil - } - - var valCopy []byte - if !item.IsDeletedOrExpired() { - // No need to copy value, if item is deleted or expired. - var err error - valCopy, err = item.ValueCopy(nil) - if err != nil { - stream.db.opt.Errorf("Key [%x, %d]. Error while fetching value [%v]\n", - item.Key(), item.Version(), err) - return nil, err - } - } - - // clear txn bits - meta := item.meta &^ (bitTxn | bitFinTxn) - kv := &pb.KV{ - Key: item.KeyCopy(nil), - Value: valCopy, - UserMeta: []byte{item.UserMeta()}, - Version: item.Version(), - ExpiresAt: item.ExpiresAt(), - Meta: []byte{meta}, - } - list.Kv = append(list.Kv, kv) - - switch { - case item.DiscardEarlierVersions(): - // If we need to discard earlier versions of this item, add a delete - // marker just below the current version. - list.Kv = append(list.Kv, &pb.KV{ - Key: item.KeyCopy(nil), - Version: item.Version() - 1, - Meta: []byte{bitDelete}, - }) - return list, nil - - case item.IsDeletedOrExpired(): - return list, nil - } - } - return list, nil - } - - var maxVersion uint64 - stream.Send = func(list *pb.KVList) error { - for _, kv := range list.Kv { - if maxVersion < kv.Version { - maxVersion = kv.Version - } - } - return writeTo(list, w) - } - - if err := stream.Orchestrate(context.Background()); err != nil { - return 0, err - } - return maxVersion, nil -} - -func writeTo(list *pb.KVList, w io.Writer) error { - if err := binary.Write(w, binary.LittleEndian, uint64(proto.Size(list))); err != nil { - return err - } - buf, err := proto.Marshal(list) - if err != nil { - return err - } - _, err = w.Write(buf) - return err -} - -// KVLoader is used to write KVList objects in to badger. It can be used to restore a backup. -type KVLoader struct { - db *DB - throttle *y.Throttle - entries []*Entry - entriesSize int64 - totalSize int64 -} - -// NewKVLoader returns a new instance of KVLoader. -func (db *DB) NewKVLoader(maxPendingWrites int) *KVLoader { - return &KVLoader{ - db: db, - throttle: y.NewThrottle(maxPendingWrites), - entries: make([]*Entry, 0, db.opt.maxBatchCount), - } -} - -// Set writes the key-value pair to the database. -func (l *KVLoader) Set(kv *pb.KV) error { - var userMeta, meta byte - if len(kv.UserMeta) > 0 { - userMeta = kv.UserMeta[0] - } - if len(kv.Meta) > 0 { - meta = kv.Meta[0] - } - e := &Entry{ - Key: y.KeyWithTs(kv.Key, kv.Version), - Value: kv.Value, - UserMeta: userMeta, - ExpiresAt: kv.ExpiresAt, - meta: meta, - } - estimatedSize := int64(e.estimateSize(l.db.opt.ValueThreshold)) - // Flush entries if inserting the next entry would overflow the transactional limits. - if int64(len(l.entries))+1 >= l.db.opt.maxBatchCount || - l.entriesSize+estimatedSize >= l.db.opt.maxBatchSize || - l.totalSize >= flushThreshold { - if err := l.send(); err != nil { - return err - } - } - l.entries = append(l.entries, e) - l.entriesSize += estimatedSize - l.totalSize += estimatedSize + int64(len(e.Value)) - return nil -} - -func (l *KVLoader) send() error { - if err := l.throttle.Do(); err != nil { - return err - } - if err := l.db.batchSetAsync(l.entries, func(err error) { - l.throttle.Done(err) - }); err != nil { - return err - } - - l.entries = make([]*Entry, 0, l.db.opt.maxBatchCount) - l.entriesSize = 0 - l.totalSize = 0 - return nil -} - -// Finish is meant to be called after all the key-value pairs have been loaded. -func (l *KVLoader) Finish() error { - if len(l.entries) > 0 { - if err := l.send(); err != nil { - return err - } - } - return l.throttle.Finish() -} - -// Load reads a protobuf-encoded list of all entries from a reader and writes -// them to the database. This can be used to restore the database from a backup -// made by calling DB.Backup(). If more complex logic is needed to restore a badger -// backup, the KVLoader interface should be used instead. -// -// DB.Load() should be called on a database that is not running any other -// concurrent transactions while it is running. -func (db *DB) Load(r io.Reader, maxPendingWrites int) error { - br := bufio.NewReaderSize(r, 16<<10) - unmarshalBuf := make([]byte, 1<<10) - - ldr := db.NewKVLoader(maxPendingWrites) - for { - var sz uint64 - err := binary.Read(br, binary.LittleEndian, &sz) - if err == io.EOF { - break - } else if err != nil { - return err - } - - if cap(unmarshalBuf) < int(sz) { - unmarshalBuf = make([]byte, sz) - } - - if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil { - return err - } - - list := &pb.KVList{} - if err := proto.Unmarshal(unmarshalBuf[:sz], list); err != nil { - return err - } - - for _, kv := range list.Kv { - if err := ldr.Set(kv); err != nil { - return err - } - - // Update nextTxnTs, memtable stores this - // timestamp in badger head when flushed. - if kv.Version >= db.orc.nextTxnTs { - db.orc.nextTxnTs = kv.Version + 1 - } - } - } - - if err := ldr.Finish(); err != nil { - return err - } - db.orc.txnMark.Done(db.orc.nextTxnTs - 1) - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/batch.go b/vendor/github.com/dgraph-io/badger/batch.go deleted file mode 100644 index 76230a0b..00000000 --- a/vendor/github.com/dgraph-io/badger/batch.go +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "sync" - - "github.com/dgraph-io/badger/y" -) - -// WriteBatch holds the necessary info to perform batched writes. -type WriteBatch struct { - sync.Mutex - txn *Txn - db *DB - throttle *y.Throttle - err error - commitTs uint64 -} - -// NewWriteBatch creates a new WriteBatch. This provides a way to conveniently do a lot of writes, -// batching them up as tightly as possible in a single transaction and using callbacks to avoid -// waiting for them to commit, thus achieving good performance. This API hides away the logic of -// creating and committing transactions. Due to the nature of SSI guaratees provided by Badger, -// blind writes can never encounter transaction conflicts (ErrConflict). -func (db *DB) NewWriteBatch() *WriteBatch { - if db.opt.managedTxns { - panic("cannot use NewWriteBatch in managed mode. Use NewWriteBatchAt instead") - } - return db.newWriteBatch() -} - -func (db *DB) newWriteBatch() *WriteBatch { - return &WriteBatch{ - db: db, - txn: db.newTransaction(true, true), - throttle: y.NewThrottle(16), - } -} - -// SetMaxPendingTxns sets a limit on maximum number of pending transactions while writing batches. -// This function should be called before using WriteBatch. Default value of MaxPendingTxns is -// 16 to minimise memory usage. -func (wb *WriteBatch) SetMaxPendingTxns(max int) { - wb.throttle = y.NewThrottle(max) -} - -// Cancel function must be called if there's a chance that Flush might not get -// called. If neither Flush or Cancel is called, the transaction oracle would -// never get a chance to clear out the row commit timestamp map, thus causing an -// unbounded memory consumption. Typically, you can call Cancel as a defer -// statement right after NewWriteBatch is called. -// -// Note that any committed writes would still go through despite calling Cancel. -func (wb *WriteBatch) Cancel() { - if err := wb.throttle.Finish(); err != nil { - wb.db.opt.Errorf("WatchBatch.Cancel error while finishing: %v", err) - } - wb.txn.Discard() -} - -func (wb *WriteBatch) callback(err error) { - // sync.WaitGroup is thread-safe, so it doesn't need to be run inside wb.Lock. - defer wb.throttle.Done(err) - if err == nil { - return - } - - wb.Lock() - defer wb.Unlock() - if wb.err != nil { - return - } - wb.err = err -} - -// SetEntry is the equivalent of Txn.SetEntry. -func (wb *WriteBatch) SetEntry(e *Entry) error { - wb.Lock() - defer wb.Unlock() - - if err := wb.txn.SetEntry(e); err != ErrTxnTooBig { - return err - } - // Txn has reached it's zenith. Commit now. - if cerr := wb.commit(); cerr != nil { - return cerr - } - // This time the error must not be ErrTxnTooBig, otherwise, we make the - // error permanent. - if err := wb.txn.SetEntry(e); err != nil { - wb.err = err - return err - } - return nil -} - -// Set is equivalent of Txn.Set(). -func (wb *WriteBatch) Set(k, v []byte) error { - e := &Entry{Key: k, Value: v} - return wb.SetEntry(e) -} - -// Delete is equivalent of Txn.Delete. -func (wb *WriteBatch) Delete(k []byte) error { - wb.Lock() - defer wb.Unlock() - - if err := wb.txn.Delete(k); err != ErrTxnTooBig { - return err - } - if err := wb.commit(); err != nil { - return err - } - if err := wb.txn.Delete(k); err != nil { - wb.err = err - return err - } - return nil -} - -// Caller to commit must hold a write lock. -func (wb *WriteBatch) commit() error { - if wb.err != nil { - return wb.err - } - if err := wb.throttle.Do(); err != nil { - return err - } - wb.txn.CommitWith(wb.callback) - wb.txn = wb.db.newTransaction(true, true) - wb.txn.readTs = 0 // We're not reading anything. - wb.txn.commitTs = wb.commitTs - return wb.err -} - -// Flush must be called at the end to ensure that any pending writes get committed to Badger. Flush -// returns any error stored by WriteBatch. -func (wb *WriteBatch) Flush() error { - wb.Lock() - _ = wb.commit() - wb.txn.Discard() - wb.Unlock() - - if err := wb.throttle.Finish(); err != nil { - return err - } - - return wb.err -} - -// Error returns any errors encountered so far. No commits would be run once an error is detected. -func (wb *WriteBatch) Error() error { - wb.Lock() - defer wb.Unlock() - return wb.err -} diff --git a/vendor/github.com/dgraph-io/badger/compaction.go b/vendor/github.com/dgraph-io/badger/compaction.go deleted file mode 100644 index 375e40be..00000000 --- a/vendor/github.com/dgraph-io/badger/compaction.go +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "fmt" - "log" - "math" - "sync" - - "golang.org/x/net/trace" - - "github.com/dgraph-io/badger/table" - "github.com/dgraph-io/badger/y" -) - -type keyRange struct { - left []byte - right []byte - inf bool -} - -var infRange = keyRange{inf: true} - -func (r keyRange) String() string { - return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf) -} - -func (r keyRange) equals(dst keyRange) bool { - return bytes.Equal(r.left, dst.left) && - bytes.Equal(r.right, dst.right) && - r.inf == dst.inf -} - -func (r keyRange) overlapsWith(dst keyRange) bool { - if r.inf || dst.inf { - return true - } - - // If my left is greater than dst right, we have no overlap. - if y.CompareKeys(r.left, dst.right) > 0 { - return false - } - // If my right is less than dst left, we have no overlap. - if y.CompareKeys(r.right, dst.left) < 0 { - return false - } - // We have overlap. - return true -} - -func getKeyRange(tables ...*table.Table) keyRange { - if len(tables) == 0 { - return keyRange{} - } - smallest := tables[0].Smallest() - biggest := tables[0].Biggest() - for i := 1; i < len(tables); i++ { - if y.CompareKeys(tables[i].Smallest(), smallest) < 0 { - smallest = tables[i].Smallest() - } - if y.CompareKeys(tables[i].Biggest(), biggest) > 0 { - biggest = tables[i].Biggest() - } - } - - // We pick all the versions of the smallest and the biggest key. Note that version zero would - // be the rightmost key, considering versions are default sorted in descending order. - return keyRange{ - left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64), - right: y.KeyWithTs(y.ParseKey(biggest), 0), - } -} - -type levelCompactStatus struct { - ranges []keyRange - delSize int64 -} - -func (lcs *levelCompactStatus) debug() string { - var b bytes.Buffer - for _, r := range lcs.ranges { - b.WriteString(r.String()) - } - return b.String() -} - -func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool { - for _, r := range lcs.ranges { - if r.overlapsWith(dst) { - return true - } - } - return false -} - -func (lcs *levelCompactStatus) remove(dst keyRange) bool { - final := lcs.ranges[:0] - var found bool - for _, r := range lcs.ranges { - if !r.equals(dst) { - final = append(final, r) - } else { - found = true - } - } - lcs.ranges = final - return found -} - -type compactStatus struct { - sync.RWMutex - levels []*levelCompactStatus -} - -func (cs *compactStatus) toLog(tr trace.Trace) { - cs.RLock() - defer cs.RUnlock() - - tr.LazyPrintf("Compaction status:") - for i, l := range cs.levels { - if l.debug() == "" { - continue - } - tr.LazyPrintf("[%d] %s", i, l.debug()) - } -} - -func (cs *compactStatus) overlapsWith(level int, this keyRange) bool { - cs.RLock() - defer cs.RUnlock() - - thisLevel := cs.levels[level] - return thisLevel.overlapsWith(this) -} - -func (cs *compactStatus) delSize(l int) int64 { - cs.RLock() - defer cs.RUnlock() - return cs.levels[l].delSize -} - -type thisAndNextLevelRLocked struct{} - -// compareAndAdd will check whether we can run this compactDef. That it doesn't overlap with any -// other running compaction. If it can be run, it would store this run in the compactStatus state. -func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd compactDef) bool { - cs.Lock() - defer cs.Unlock() - - level := cd.thisLevel.level - - y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) - thisLevel := cs.levels[level] - nextLevel := cs.levels[level+1] - - if thisLevel.overlapsWith(cd.thisRange) { - return false - } - if nextLevel.overlapsWith(cd.nextRange) { - return false - } - // Check whether this level really needs compaction or not. Otherwise, we'll end up - // running parallel compactions for the same level. - // Update: We should not be checking size here. Compaction priority already did the size checks. - // Here we should just be executing the wish of others. - - thisLevel.ranges = append(thisLevel.ranges, cd.thisRange) - nextLevel.ranges = append(nextLevel.ranges, cd.nextRange) - thisLevel.delSize += cd.thisSize - return true -} - -func (cs *compactStatus) delete(cd compactDef) { - cs.Lock() - defer cs.Unlock() - - level := cd.thisLevel.level - y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) - - thisLevel := cs.levels[level] - nextLevel := cs.levels[level+1] - - thisLevel.delSize -= cd.thisSize - found := thisLevel.remove(cd.thisRange) - found = nextLevel.remove(cd.nextRange) && found - - if !found { - this := cd.thisRange - next := cd.nextRange - fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf) - fmt.Printf("This Level:\n%s\n", thisLevel.debug()) - fmt.Println() - fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf) - fmt.Printf("Next Level:\n%s\n", nextLevel.debug()) - log.Fatal("keyRange not found") - } -} diff --git a/vendor/github.com/dgraph-io/badger/db.go b/vendor/github.com/dgraph-io/badger/db.go deleted file mode 100644 index 1d340ecd..00000000 --- a/vendor/github.com/dgraph-io/badger/db.go +++ /dev/null @@ -1,1560 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "context" - "encoding/binary" - "expvar" - "io" - "math" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/options" - "github.com/dgraph-io/badger/pb" - "github.com/dgraph-io/badger/skl" - "github.com/dgraph-io/badger/table" - "github.com/dgraph-io/badger/y" - humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" - "golang.org/x/net/trace" -) - -var ( - badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger. - head = []byte("!badger!head") // For storing value offset for replay. - txnKey = []byte("!badger!txn") // For indicating end of entries in txn. - badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC. - lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats -) - -type closers struct { - updateSize *y.Closer - compactors *y.Closer - memtable *y.Closer - writes *y.Closer - valueGC *y.Closer - pub *y.Closer -} - -// DB provides the various functions required to interact with Badger. -// DB is thread-safe. -type DB struct { - sync.RWMutex // Guards list of inmemory tables, not individual reads and writes. - - dirLockGuard *directoryLockGuard - // nil if Dir and ValueDir are the same - valueDirGuard *directoryLockGuard - - closers closers - elog trace.EventLog - mt *skl.Skiplist // Our latest (actively written) in-memory table - imm []*skl.Skiplist // Add here only AFTER pushing to flushChan. - opt Options - manifest *manifestFile - lc *levelsController - vlog valueLog - vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt - writeCh chan *request - flushChan chan flushTask // For flushing memtables. - closeOnce sync.Once // For closing DB only once. - - // Number of log rotates since the last memtable flush. We will access this field via atomic - // functions. Since we are not going to use any 64bit atomic functions, there is no need for - // 64 bit alignment of this struct(see #311). - logRotates int32 - - blockWrites int32 - - orc *oracle - - pub *publisher -} - -const ( - kvWriteChCapacity = 1000 -) - -func (db *DB) replayFunction() func(Entry, valuePointer) error { - type txnEntry struct { - nk []byte - v y.ValueStruct - } - - var txn []txnEntry - var lastCommit uint64 - - toLSM := func(nk []byte, vs y.ValueStruct) { - for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() { - db.elog.Printf("Replay: Making room for writes") - time.Sleep(10 * time.Millisecond) - } - db.mt.Put(nk, vs) - } - - first := true - return func(e Entry, vp valuePointer) error { // Function for replaying. - if first { - db.elog.Printf("First key=%q\n", e.Key) - } - first = false - db.orc.Lock() - if db.orc.nextTxnTs < y.ParseTs(e.Key) { - db.orc.nextTxnTs = y.ParseTs(e.Key) - } - db.orc.Unlock() - - nk := make([]byte, len(e.Key)) - copy(nk, e.Key) - var nv []byte - meta := e.meta - if db.shouldWriteValueToLSM(e) { - nv = make([]byte, len(e.Value)) - copy(nv, e.Value) - } else { - nv = make([]byte, vptrSize) - vp.Encode(nv) - meta = meta | bitValuePointer - } - // Update vhead. If the crash happens while replay was in progess - // and the head is not updated, we will end up replaying all the - // files starting from file zero, again. - db.updateHead([]valuePointer{vp}) - - v := y.ValueStruct{ - Value: nv, - Meta: meta, - UserMeta: e.UserMeta, - ExpiresAt: e.ExpiresAt, - } - - if e.meta&bitFinTxn > 0 { - txnTs, err := strconv.ParseUint(string(e.Value), 10, 64) - if err != nil { - return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value) - } - y.AssertTrue(lastCommit == txnTs) - y.AssertTrue(len(txn) > 0) - // Got the end of txn. Now we can store them. - for _, t := range txn { - toLSM(t.nk, t.v) - } - txn = txn[:0] - lastCommit = 0 - - } else if e.meta&bitTxn > 0 { - txnTs := y.ParseTs(nk) - if lastCommit == 0 { - lastCommit = txnTs - } - if lastCommit != txnTs { - db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n", - lastCommit) - txn = txn[:0] - lastCommit = txnTs - } - te := txnEntry{nk: nk, v: v} - txn = append(txn, te) - - } else { - // This entry is from a rewrite. - toLSM(nk, v) - - // We shouldn't get this entry in the middle of a transaction. - y.AssertTrue(lastCommit == 0) - y.AssertTrue(len(txn) == 0) - } - return nil - } -} - -// Open returns a new DB object. -func Open(opt Options) (db *DB, err error) { - opt.maxBatchSize = (15 * opt.MaxTableSize) / 100 - opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize) - - if opt.ValueThreshold > ValueThresholdLimit { - return nil, ErrValueThreshold - } - - if opt.ReadOnly { - // Can't truncate if the DB is read only. - opt.Truncate = false - // Do not perform compaction in read only mode. - opt.CompactL0OnClose = false - } - - for _, path := range []string{opt.Dir, opt.ValueDir} { - dirExists, err := exists(path) - if err != nil { - return nil, y.Wrapf(err, "Invalid Dir: %q", path) - } - if !dirExists { - if opt.ReadOnly { - return nil, errors.Errorf("Cannot find directory %q for read-only open", path) - } - // Try to create the directory - err = os.Mkdir(path, 0700) - if err != nil { - return nil, y.Wrapf(err, "Error Creating Dir: %q", path) - } - } - } - var dirLockGuard, valueDirLockGuard *directoryLockGuard - if !opt.BypassLockGuard { - absDir, err := filepath.Abs(opt.Dir) - if err != nil { - return nil, err - } - absValueDir, err := filepath.Abs(opt.ValueDir) - if err != nil { - return nil, err - } - dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly) - if err != nil { - return nil, err - } - defer func() { - if dirLockGuard != nil { - _ = dirLockGuard.release() - } - }() - if absValueDir != absDir { - valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly) - if err != nil { - return nil, err - } - defer func() { - if valueDirLockGuard != nil { - _ = valueDirLockGuard.release() - } - }() - } - } - if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) { - return nil, ErrValueLogSize - } - if !(opt.ValueLogLoadingMode == options.FileIO || - opt.ValueLogLoadingMode == options.MemoryMap) { - return nil, ErrInvalidLoadingMode - } - manifestFile, manifest, err := openOrCreateManifestFile(opt.Dir, opt.ReadOnly) - if err != nil { - return nil, err - } - defer func() { - if manifestFile != nil { - _ = manifestFile.close() - } - }() - - elog := y.NoEventLog - if opt.EventLogging { - elog = trace.NewEventLog("Badger", "DB") - } - - db = &DB{ - imm: make([]*skl.Skiplist, 0, opt.NumMemtables), - flushChan: make(chan flushTask, opt.NumMemtables), - writeCh: make(chan *request, kvWriteChCapacity), - opt: opt, - manifest: manifestFile, - elog: elog, - dirLockGuard: dirLockGuard, - valueDirGuard: valueDirLockGuard, - orc: newOracle(opt), - pub: newPublisher(), - } - - // Calculate initial size. - db.calculateSize() - db.closers.updateSize = y.NewCloser(1) - go db.updateSize(db.closers.updateSize) - db.mt = skl.NewSkiplist(arenaSize(opt)) - - // newLevelsController potentially loads files in directory. - if db.lc, err = newLevelsController(db, &manifest); err != nil { - return nil, err - } - - // Initialize vlog struct. - db.vlog.init(db) - - if !opt.ReadOnly { - db.closers.compactors = y.NewCloser(1) - db.lc.startCompact(db.closers.compactors) - - db.closers.memtable = y.NewCloser(1) - go func() { - _ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up. - }() - } - - headKey := y.KeyWithTs(head, math.MaxUint64) - // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key - vs, err := db.get(headKey) - if err != nil { - return nil, errors.Wrap(err, "Retrieving head") - } - db.orc.nextTxnTs = vs.Version - var vptr valuePointer - if len(vs.Value) > 0 { - vptr.Decode(vs.Value) - } - - replayCloser := y.NewCloser(1) - go db.doWrites(replayCloser) - - if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil { - return db, err - } - replayCloser.SignalAndWait() // Wait for replay to be applied first. - - // Let's advance nextTxnTs to one more than whatever we observed via - // replaying the logs. - db.orc.txnMark.Done(db.orc.nextTxnTs) - // In normal mode, we must update readMark so older versions of keys can be removed during - // compaction when run in offline mode via the flatten tool. - db.orc.readMark.Done(db.orc.nextTxnTs) - db.orc.incrementNextTs() - - db.writeCh = make(chan *request, kvWriteChCapacity) - db.closers.writes = y.NewCloser(1) - go db.doWrites(db.closers.writes) - - db.closers.valueGC = y.NewCloser(1) - go db.vlog.waitOnGC(db.closers.valueGC) - - db.closers.pub = y.NewCloser(1) - go db.pub.listenForUpdates(db.closers.pub) - - valueDirLockGuard = nil - dirLockGuard = nil - manifestFile = nil - return db, nil -} - -// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to -// disk. Calling DB.Close() multiple times would still only close the DB once. -func (db *DB) Close() error { - var err error - db.closeOnce.Do(func() { - err = db.close() - }) - return err -} - -func (db *DB) close() (err error) { - db.elog.Printf("Closing database") - - atomic.StoreInt32(&db.blockWrites, 1) - - // Stop value GC first. - db.closers.valueGC.SignalAndWait() - - // Stop writes next. - db.closers.writes.SignalAndWait() - - // Don't accept any more write. - close(db.writeCh) - - db.closers.pub.SignalAndWait() - - // Now close the value log. - if vlogErr := db.vlog.Close(); vlogErr != nil { - err = errors.Wrap(vlogErr, "DB.Close") - } - - // Make sure that block writer is done pushing stuff into memtable! - // Otherwise, you will have a race condition: we are trying to flush memtables - // and remove them completely, while the block / memtable writer is still - // trying to push stuff into the memtable. This will also resolve the value - // offset problem: as we push into memtable, we update value offsets there. - if !db.mt.Empty() { - db.elog.Printf("Flushing memtable") - for { - pushedFlushTask := func() bool { - db.Lock() - defer db.Unlock() - y.AssertTrue(db.mt != nil) - select { - case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}: - db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm. - db.mt = nil // Will segfault if we try writing! - db.elog.Printf("pushed to flush chan\n") - return true - default: - // If we fail to push, we need to unlock and wait for a short while. - // The flushing operation needs to update s.imm. Otherwise, we have a deadlock. - // TODO: Think about how to do this more cleanly, maybe without any locks. - } - return false - }() - if pushedFlushTask { - break - } - time.Sleep(10 * time.Millisecond) - } - } - db.stopMemoryFlush() - db.stopCompactions() - - // Force Compact L0 - // We don't need to care about cstatus since no parallel compaction is running. - if db.opt.CompactL0OnClose { - err := db.lc.doCompact(compactionPriority{level: 0, score: 1.73}) - switch err { - case errFillTables: - // This error only means that there might be enough tables to do a compaction. So, we - // should not report it to the end user to avoid confusing them. - case nil: - db.opt.Infof("Force compaction on level 0 done") - default: - db.opt.Warningf("While forcing compaction on level 0: %v", err) - } - } - - if lcErr := db.lc.close(); err == nil { - err = errors.Wrap(lcErr, "DB.Close") - } - db.elog.Printf("Waiting for closer") - db.closers.updateSize.SignalAndWait() - db.orc.Stop() - - db.elog.Finish() - - if db.dirLockGuard != nil { - if guardErr := db.dirLockGuard.release(); err == nil { - err = errors.Wrap(guardErr, "DB.Close") - } - } - if db.valueDirGuard != nil { - if guardErr := db.valueDirGuard.release(); err == nil { - err = errors.Wrap(guardErr, "DB.Close") - } - } - if manifestErr := db.manifest.close(); err == nil { - err = errors.Wrap(manifestErr, "DB.Close") - } - - // Fsync directories to ensure that lock file, and any other removed files whose directory - // we haven't specifically fsynced, are guaranteed to have their directory entry removal - // persisted to disk. - if syncErr := syncDir(db.opt.Dir); err == nil { - err = errors.Wrap(syncErr, "DB.Close") - } - if syncErr := syncDir(db.opt.ValueDir); err == nil { - err = errors.Wrap(syncErr, "DB.Close") - } - - return err -} - -const ( - lockFile = "LOCK" -) - -// Sync syncs database content to disk. This function provides -// more control to user to sync data whenever required. -func (db *DB) Sync() error { - return db.vlog.sync(math.MaxUint32) -} - -// getMemtables returns the current memtables and get references. -func (db *DB) getMemTables() ([]*skl.Skiplist, func()) { - db.RLock() - defer db.RUnlock() - - tables := make([]*skl.Skiplist, len(db.imm)+1) - - // Get mutable memtable. - tables[0] = db.mt - tables[0].IncrRef() - - // Get immutable memtables. - last := len(db.imm) - 1 - for i := range db.imm { - tables[i+1] = db.imm[last-i] - tables[i+1].IncrRef() - } - return tables, func() { - for _, tbl := range tables { - tbl.DecrRef() - } - } -} - -// get returns the value in memtable or disk for given key. -// Note that value will include meta byte. -// -// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to -// maintain this invariant to search for the latest value of a key, or else we need to search in all -// tables and find the max version among them. To maintain this invariant, we also need to ensure -// that all versions of a key are always present in the same table from level 1, because compaction -// can push any table down. -// -// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one -// value log to another (while reclaiming space during value log GC), we have logically moved this -// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal -// gets, we can stop going down the LSM tree once we find any version of the key (note however that -// we will ALWAYS skip versions with ts greater than the key version). However, if that key has -// been moved, then for the corresponding movekey, we'll look through all the levels of the tree -// to ensure that we pick the highest version of the movekey present. -func (db *DB) get(key []byte) (y.ValueStruct, error) { - tables, decr := db.getMemTables() // Lock should be released. - defer decr() - - var maxVs *y.ValueStruct - var version uint64 - if bytes.HasPrefix(key, badgerMove) { - // If we are checking badgerMove key, we should look into all the - // levels, so we can pick up the newer versions, which might have been - // compacted down the tree. - maxVs = &y.ValueStruct{} - version = y.ParseTs(key) - } - - y.NumGets.Add(1) - for i := 0; i < len(tables); i++ { - vs := tables[i].Get(key) - y.NumMemtableGets.Add(1) - if vs.Meta == 0 && vs.Value == nil { - continue - } - // Found a version of the key. For user keyspace, return immediately. For move keyspace, - // continue iterating, unless we found a version == given key version. - if maxVs == nil || vs.Version == version { - return vs, nil - } - if maxVs.Version < vs.Version { - *maxVs = vs - } - } - return db.lc.get(key, maxVs) -} - -// updateHead should not be called without the db.Lock() since db.vhead is used -// by the writer go routines and memtable flushing goroutine. -func (db *DB) updateHead(ptrs []valuePointer) { - var ptr valuePointer - for i := len(ptrs) - 1; i >= 0; i-- { - p := ptrs[i] - if !p.IsZero() { - ptr = p - break - } - } - if ptr.IsZero() { - return - } - - y.AssertTrue(!ptr.Less(db.vhead)) - db.vhead = ptr -} - -var requestPool = sync.Pool{ - New: func() interface{} { - return new(request) - }, -} - -func (db *DB) shouldWriteValueToLSM(e Entry) bool { - return len(e.Value) < db.opt.ValueThreshold -} - -func (db *DB) writeToLSM(b *request) error { - if len(b.Ptrs) != len(b.Entries) { - return errors.Errorf("Ptrs and Entries don't match: %+v", b) - } - - for i, entry := range b.Entries { - if entry.meta&bitFinTxn != 0 { - continue - } - if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case. - db.mt.Put(entry.Key, - y.ValueStruct{ - Value: entry.Value, - // Ensure value pointer flag is removed. Otherwise, the value will fail - // to be retrieved during iterator prefetch. `bitValuePointer` is only - // known to be set in write to LSM when the entry is loaded from a backup - // with lower ValueThreshold and its value was stored in the value log. - Meta: entry.meta &^ bitValuePointer, - UserMeta: entry.UserMeta, - ExpiresAt: entry.ExpiresAt, - }) - } else { - var offsetBuf [vptrSize]byte - db.mt.Put(entry.Key, - y.ValueStruct{ - Value: b.Ptrs[i].Encode(offsetBuf[:]), - Meta: entry.meta | bitValuePointer, - UserMeta: entry.UserMeta, - ExpiresAt: entry.ExpiresAt, - }) - } - } - return nil -} - -// writeRequests is called serially by only one goroutine. -func (db *DB) writeRequests(reqs []*request) error { - if len(reqs) == 0 { - return nil - } - - done := func(err error) { - for _, r := range reqs { - r.Err = err - r.Wg.Done() - } - } - db.elog.Printf("writeRequests called. Writing to value log") - - err := db.vlog.write(reqs) - if err != nil { - done(err) - return err - } - - db.elog.Printf("Sending updates to subscribers") - db.pub.sendUpdates(reqs) - db.elog.Printf("Writing to memtable") - var count int - for _, b := range reqs { - if len(b.Entries) == 0 { - continue - } - count += len(b.Entries) - var i uint64 - for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() { - i++ - if i%100 == 0 { - db.elog.Printf("Making room for writes") - } - // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm. - // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm, - // you will get a deadlock. - time.Sleep(10 * time.Millisecond) - } - if err != nil { - done(err) - return errors.Wrap(err, "writeRequests") - } - if err := db.writeToLSM(b); err != nil { - done(err) - return errors.Wrap(err, "writeRequests") - } - db.Lock() - db.updateHead(b.Ptrs) - db.Unlock() - } - done(nil) - db.elog.Printf("%d entries written", count) - return nil -} - -func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) { - if atomic.LoadInt32(&db.blockWrites) == 1 { - return nil, ErrBlockedWrites - } - var count, size int64 - for _, e := range entries { - size += int64(e.estimateSize(db.opt.ValueThreshold)) - count++ - } - if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize { - return nil, ErrTxnTooBig - } - - // We can only service one request because we need each txn to be stored in a contigous section. - // Txns should not interleave among other txns or rewrites. - req := requestPool.Get().(*request) - req.reset() - req.Entries = entries - req.Wg.Add(1) - req.IncrRef() // for db write - db.writeCh <- req // Handled in doWrites. - y.NumPuts.Add(int64(len(entries))) - - return req, nil -} - -func (db *DB) doWrites(lc *y.Closer) { - defer lc.Done() - pendingCh := make(chan struct{}, 1) - - writeRequests := func(reqs []*request) { - if err := db.writeRequests(reqs); err != nil { - db.opt.Errorf("writeRequests: %v", err) - } - <-pendingCh - } - - // This variable tracks the number of pending writes. - reqLen := new(expvar.Int) - y.PendingWrites.Set(db.opt.Dir, reqLen) - - reqs := make([]*request, 0, 10) - for { - var r *request - select { - case r = <-db.writeCh: - case <-lc.HasBeenClosed(): - goto closedCase - } - - for { - reqs = append(reqs, r) - reqLen.Set(int64(len(reqs))) - - if len(reqs) >= 3*kvWriteChCapacity { - pendingCh <- struct{}{} // blocking. - goto writeCase - } - - select { - // Either push to pending, or continue to pick from writeCh. - case r = <-db.writeCh: - case pendingCh <- struct{}{}: - goto writeCase - case <-lc.HasBeenClosed(): - goto closedCase - } - } - - closedCase: - // All the pending request are drained. - // Don't close the writeCh, because it has be used in several places. - for { - select { - case r = <-db.writeCh: - reqs = append(reqs, r) - default: - pendingCh <- struct{}{} // Push to pending before doing a write. - writeRequests(reqs) - return - } - } - - writeCase: - go writeRequests(reqs) - reqs = make([]*request, 0, 10) - reqLen.Set(0) - } -} - -// batchSet applies a list of badger.Entry. If a request level error occurs it -// will be returned. -// Check(kv.BatchSet(entries)) -func (db *DB) batchSet(entries []*Entry) error { - req, err := db.sendToWriteCh(entries) - if err != nil { - return err - } - - return req.Wait() -} - -// batchSetAsync is the asynchronous version of batchSet. It accepts a callback -// function which is called when all the sets are complete. If a request level -// error occurs, it will be passed back via the callback. -// err := kv.BatchSetAsync(entries, func(err error)) { -// Check(err) -// } -func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error { - req, err := db.sendToWriteCh(entries) - if err != nil { - return err - } - go func() { - err := req.Wait() - // Write is complete. Let's call the callback function now. - f(err) - }() - return nil -} - -var errNoRoom = errors.New("No room for write") - -// ensureRoomForWrite is always called serially. -func (db *DB) ensureRoomForWrite() error { - var err error - db.Lock() - defer db.Unlock() - - // Here we determine if we need to force flush memtable. Given we rotated log file, it would - // make sense to force flush a memtable, so the updated value head would have a chance to be - // pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled, - // which can take a lot longer if the write load has fewer keys and larger values. This force - // flush, thus avoids the need to read through a lot of log files on a crash and restart. - // Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before - // inserting every entry in Memtable. We will get latest db.head after all entries for a request - // are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting - // first entry in Memtable, below condition will be true and we will endup flushing old value of - // db.head. Hence we are limiting no of value log files to be read to db.logRotates only. - forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush - - if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize { - return nil - } - - y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed. - select { - case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}: - // After every memtable flush, let's reset the counter. - atomic.StoreInt32(&db.logRotates, 0) - - // Ensure value log is synced to disk so this memtable's contents wouldn't be lost. - err = db.vlog.sync(db.vhead.Fid) - if err != nil { - return err - } - - db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n", - db.mt.MemSize(), len(db.flushChan)) - // We manage to push this task. Let's modify imm. - db.imm = append(db.imm, db.mt) - db.mt = skl.NewSkiplist(arenaSize(db.opt)) - // New memtable is empty. We certainly have room. - return nil - default: - // We need to do this to unlock and allow the flusher to modify imm. - return errNoRoom - } -} - -func arenaSize(opt Options) int64 { - return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize) -} - -// WriteLevel0Table flushes memtable. -func writeLevel0Table(ft flushTask, f io.Writer) error { - iter := ft.mt.NewIterator() - defer iter.Close() - b := table.NewTableBuilder() - defer b.Close() - for iter.SeekToFirst(); iter.Valid(); iter.Next() { - if len(ft.dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), ft.dropPrefixes) { - continue - } - b.Add(iter.Key(), iter.Value()) - } - _, err := f.Write(b.Finish()) - return err -} - -type flushTask struct { - mt *skl.Skiplist - vptr valuePointer - dropPrefixes [][]byte -} - -func (db *DB) pushHead(ft flushTask) error { - // Ensure we never push a zero valued head pointer. - if ft.vptr.IsZero() { - return errors.New("Head should not be zero") - } - - // Store badger head even if vptr is zero, need it for readTs - db.opt.Debugf("Storing value log head: %+v\n", ft.vptr) - offset := make([]byte, vptrSize) - ft.vptr.Encode(offset) - - // Pick the max commit ts, so in case of crash, our read ts would be higher than all the - // commits. - headTs := y.KeyWithTs(head, db.orc.nextTs()) - ft.mt.Put(headTs, y.ValueStruct{Value: offset}) - - return nil -} - -// handleFlushTask must be run serially. -func (db *DB) handleFlushTask(ft flushTask) error { - // There can be a scenario, when empty memtable is flushed. For example, memtable is empty and - // after writing request to value log, rotation count exceeds db.LogRotatesToFlush. - if ft.mt.Empty() { - return nil - } - - if err := db.pushHead(ft); err != nil { - return err - } - - fileID := db.lc.reserveFileID() - fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true) - if err != nil { - return y.Wrap(err) - } - - // Don't block just to sync the directory entry. - dirSyncCh := make(chan error) - go func() { dirSyncCh <- syncDir(db.opt.Dir) }() - - err = writeLevel0Table(ft, fd) - dirSyncErr := <-dirSyncCh - - if err != nil { - db.elog.Errorf("ERROR while writing to level 0: %v", err) - return err - } - if dirSyncErr != nil { - // Do dir sync as best effort. No need to return due to an error there. - db.elog.Errorf("ERROR while syncing level directory: %v", dirSyncErr) - } - - tbl, err := table.OpenTable(fd, db.opt.TableLoadingMode, nil) - if err != nil { - db.elog.Printf("ERROR while opening table: %v", err) - return err - } - // We own a ref on tbl. - err = db.lc.addLevel0Table(tbl) // This will incrRef (if we don't error, sure) - _ = tbl.DecrRef() // Releases our ref. - return err -} - -// flushMemtable must keep running until we send it an empty flushTask. If there -// are errors during handling the flush task, we'll retry indefinitely. -func (db *DB) flushMemtable(lc *y.Closer) error { - defer lc.Done() - - for ft := range db.flushChan { - if ft.mt == nil { - // We close db.flushChan now, instead of sending a nil ft.mt. - continue - } - for { - err := db.handleFlushTask(ft) - if err == nil { - // Update s.imm. Need a lock. - db.Lock() - // This is a single-threaded operation. ft.mt corresponds to the head of - // db.imm list. Once we flush it, we advance db.imm. The next ft.mt - // which would arrive here would match db.imm[0], because we acquire a - // lock over DB when pushing to flushChan. - // TODO: This logic is dirty AF. Any change and this could easily break. - y.AssertTrue(ft.mt == db.imm[0]) - db.imm = db.imm[1:] - ft.mt.DecrRef() // Return memory. - db.Unlock() - - break - } - // Encountered error. Retry indefinitely. - db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err) - time.Sleep(time.Second) - } - } - return nil -} - -func exists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return true, err -} - -// This function does a filewalk, calculates the size of vlog and sst files and stores it in -// y.LSMSize and y.VlogSize. -func (db *DB) calculateSize() { - newInt := func(val int64) *expvar.Int { - v := new(expvar.Int) - v.Add(val) - return v - } - - totalSize := func(dir string) (int64, int64) { - var lsmSize, vlogSize int64 - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - ext := filepath.Ext(path) - if ext == ".sst" { - lsmSize += info.Size() - } else if ext == ".vlog" { - vlogSize += info.Size() - } - return nil - }) - if err != nil { - db.elog.Printf("Got error while calculating total size of directory: %s", dir) - } - return lsmSize, vlogSize - } - - lsmSize, vlogSize := totalSize(db.opt.Dir) - y.LSMSize.Set(db.opt.Dir, newInt(lsmSize)) - // If valueDir is different from dir, we'd have to do another walk. - if db.opt.ValueDir != db.opt.Dir { - _, vlogSize = totalSize(db.opt.ValueDir) - } - y.VlogSize.Set(db.opt.ValueDir, newInt(vlogSize)) -} - -func (db *DB) updateSize(lc *y.Closer) { - defer lc.Done() - - metricsTicker := time.NewTicker(time.Minute) - defer metricsTicker.Stop() - - for { - select { - case <-metricsTicker.C: - db.calculateSize() - case <-lc.HasBeenClosed(): - return - } - } -} - -// RunValueLogGC triggers a value log garbage collection. -// -// It picks value log files to perform GC based on statistics that are collected -// during compactions. If no such statistics are available, then log files are -// picked in random order. The process stops as soon as the first log file is -// encountered which does not result in garbage collection. -// -// When a log file is picked, it is first sampled. If the sample shows that we -// can discard at least discardRatio space of that file, it would be rewritten. -// -// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is -// thrown indicating that the call resulted in no file rewrites. -// -// We recommend setting discardRatio to 0.5, thus indicating that a file be -// rewritten if half the space can be discarded. This results in a lifetime -// value log write amplification of 2 (1 from original write + 0.5 rewrite + -// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer -// space reclaims, while setting it to a lower value would result in more space -// reclaims at the cost of increased activity on the LSM tree. discardRatio -// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an -// ErrInvalidRequest is returned. -// -// Only one GC is allowed at a time. If another value log GC is running, or DB -// has been closed, this would return an ErrRejected. -// -// Note: Every time GC is run, it would produce a spike of activity on the LSM -// tree. -func (db *DB) RunValueLogGC(discardRatio float64) error { - if discardRatio >= 1.0 || discardRatio <= 0.0 { - return ErrInvalidRequest - } - - // Find head on disk - headKey := y.KeyWithTs(head, math.MaxUint64) - // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key - val, err := db.lc.get(headKey, nil) - if err != nil { - return errors.Wrap(err, "Retrieving head from on-disk LSM") - } - - var head valuePointer - if len(val.Value) > 0 { - head.Decode(val.Value) - } - - // Pick a log file and run GC - return db.vlog.runGC(discardRatio, head) -} - -// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to -// call RunValueLogGC. -func (db *DB) Size() (lsm, vlog int64) { - if y.LSMSize.Get(db.opt.Dir) == nil { - lsm, vlog = 0, 0 - return - } - lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value() - vlog = y.VlogSize.Get(db.opt.ValueDir).(*expvar.Int).Value() - return -} - -// Sequence represents a Badger sequence. -type Sequence struct { - sync.Mutex - db *DB - key []byte - next uint64 - leased uint64 - bandwidth uint64 -} - -// Next would return the next integer in the sequence, updating the lease by running a transaction -// if needed. -func (seq *Sequence) Next() (uint64, error) { - seq.Lock() - defer seq.Unlock() - if seq.next >= seq.leased { - if err := seq.updateLease(); err != nil { - return 0, err - } - } - val := seq.next - seq.next++ - return val, nil -} - -// Release the leased sequence to avoid wasted integers. This should be done right -// before closing the associated DB. However it is valid to use the sequence after -// it was released, causing a new lease with full bandwidth. -func (seq *Sequence) Release() error { - seq.Lock() - defer seq.Unlock() - err := seq.db.Update(func(txn *Txn) error { - item, err := txn.Get(seq.key) - if err != nil { - return err - } - - var num uint64 - if err := item.Value(func(v []byte) error { - num = binary.BigEndian.Uint64(v) - return nil - }); err != nil { - return err - } - - if num == seq.leased { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], seq.next) - return txn.SetEntry(NewEntry(seq.key, buf[:])) - } - - return nil - }) - if err != nil { - return err - } - seq.leased = seq.next - return nil -} - -func (seq *Sequence) updateLease() error { - return seq.db.Update(func(txn *Txn) error { - item, err := txn.Get(seq.key) - if err == ErrKeyNotFound { - seq.next = 0 - } else if err != nil { - return err - } else { - var num uint64 - if err := item.Value(func(v []byte) error { - num = binary.BigEndian.Uint64(v) - return nil - }); err != nil { - return err - } - seq.next = num - } - - lease := seq.next + seq.bandwidth - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], lease) - if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil { - return err - } - seq.leased = lease - return nil - }) -} - -// GetSequence would initiate a new sequence object, generating it from the stored lease, if -// available, in the database. Sequence can be used to get a list of monotonically increasing -// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the -// size of the lease, determining how many Next() requests can be served from memory. -// -// GetSequence is not supported on ManagedDB. Calling this would result in a panic. -func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) { - if db.opt.managedTxns { - panic("Cannot use GetSequence with managedDB=true.") - } - - switch { - case len(key) == 0: - return nil, ErrEmptyKey - case bandwidth == 0: - return nil, ErrZeroBandwidth - } - seq := &Sequence{ - db: db, - key: key, - next: 0, - leased: 0, - bandwidth: bandwidth, - } - err := seq.updateLease() - return seq, err -} - -// Tables gets the TableInfo objects from the level controller. If withKeysCount -// is true, TableInfo objects also contain counts of keys for the tables. -func (db *DB) Tables(withKeysCount bool) []TableInfo { - return db.lc.getTableInfo(withKeysCount) -} - -// KeySplits can be used to get rough key ranges to divide up iteration over -// the DB. -func (db *DB) KeySplits(prefix []byte) []string { - var splits []string - // We just want table ranges here and not keys count. - for _, ti := range db.Tables(false) { - // We don't use ti.Left, because that has a tendency to store !badger - // keys. - if bytes.HasPrefix(ti.Right, prefix) { - splits = append(splits, string(ti.Right)) - } - } - sort.Strings(splits) - return splits -} - -// MaxBatchCount returns max possible entries in batch -func (db *DB) MaxBatchCount() int64 { - return db.opt.maxBatchCount -} - -// MaxBatchSize returns max possible batch size -func (db *DB) MaxBatchSize() int64 { - return db.opt.maxBatchSize -} - -func (db *DB) stopMemoryFlush() { - // Stop memtable flushes. - if db.closers.memtable != nil { - close(db.flushChan) - db.closers.memtable.SignalAndWait() - } -} - -func (db *DB) stopCompactions() { - // Stop compactions. - if db.closers.compactors != nil { - db.closers.compactors.SignalAndWait() - } -} - -func (db *DB) startCompactions() { - // Resume compactions. - if db.closers.compactors != nil { - db.closers.compactors = y.NewCloser(1) - db.lc.startCompact(db.closers.compactors) - } -} - -func (db *DB) startMemoryFlush() { - // Start memory fluhser. - if db.closers.memtable != nil { - db.flushChan = make(chan flushTask, db.opt.NumMemtables) - db.closers.memtable = y.NewCloser(1) - go func() { - _ = db.flushMemtable(db.closers.memtable) - }() - } -} - -// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same -// level. This ensures that all the versions of keys are colocated and not split across multiple -// levels, which is necessary after a restore from backup. During Flatten, live compactions are -// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition -// between flattening the tree and new tables being created at level zero. -func (db *DB) Flatten(workers int) error { - db.stopCompactions() - defer db.startCompactions() - - compactAway := func(cp compactionPriority) error { - db.opt.Infof("Attempting to compact with %+v\n", cp) - errCh := make(chan error, 1) - for i := 0; i < workers; i++ { - go func() { - errCh <- db.lc.doCompact(cp) - }() - } - var success int - var rerr error - for i := 0; i < workers; i++ { - err := <-errCh - if err != nil { - rerr = err - db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err) - } else { - success++ - } - } - if success == 0 { - return rerr - } - // We could do at least one successful compaction. So, we'll consider this a success. - db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n", - success, cp.level) - return nil - } - - hbytes := func(sz int64) string { - return humanize.Bytes(uint64(sz)) - } - - for { - db.opt.Infof("\n") - var levels []int - for i, l := range db.lc.levels { - sz := l.getTotalSize() - db.opt.Infof("Level: %d. %8s Size. %8s Max.\n", - i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize)) - if sz > 0 { - levels = append(levels, i) - } - } - if len(levels) <= 1 { - prios := db.lc.pickCompactLevels() - if len(prios) == 0 || prios[0].score <= 1.0 { - db.opt.Infof("All tables consolidated into one level. Flattening done.\n") - return nil - } - if err := compactAway(prios[0]); err != nil { - return err - } - continue - } - // Create an artificial compaction priority, to ensure that we compact the level. - cp := compactionPriority{level: levels[0], score: 1.71} - if err := compactAway(cp); err != nil { - return err - } - } -} - -func (db *DB) blockWrite() { - // Stop accepting new writes. - atomic.StoreInt32(&db.blockWrites, 1) - - // Make all pending writes finish. The following will also close writeCh. - db.closers.writes.SignalAndWait() - db.opt.Infof("Writes flushed. Stopping compactions now...") -} - -func (db *DB) unblockWrite() { - db.closers.writes = y.NewCloser(1) - go db.doWrites(db.closers.writes) - - // Resume writes. - atomic.StoreInt32(&db.blockWrites, 0) -} - -func (db *DB) prepareToDrop() func() { - if db.opt.ReadOnly { - panic("Attempting to drop data in read-only mode.") - } - // In order prepare for drop, we need to block the incoming writes and - // write it to db. Then, flush all the pending flushtask. So that, we - // don't miss any entries. - db.blockWrite() - reqs := make([]*request, 0, 10) - for { - select { - case r := <-db.writeCh: - reqs = append(reqs, r) - default: - if err := db.writeRequests(reqs); err != nil { - db.opt.Errorf("writeRequests: %v", err) - } - db.stopMemoryFlush() - return func() { - db.opt.Infof("Resuming writes") - db.startMemoryFlush() - db.unblockWrite() - } - } - } -} - -// DropAll would drop all the data stored in Badger. It does this in the following way. -// - Stop accepting new writes. -// - Pause memtable flushes and compactions. -// - Pick all tables from all levels, create a changeset to delete all these -// tables and apply it to manifest. -// - Pick all log files from value log, and delete all of them. Restart value log files from zero. -// - Resume memtable flushes and compactions. -// -// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do -// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and -// writes are paused before running DropAll, and resumed after it is finished. -func (db *DB) DropAll() error { - f, err := db.dropAll() - defer f() - if err != nil { - return err - } - return nil -} - -func (db *DB) dropAll() (func(), error) { - db.opt.Infof("DropAll called. Blocking writes...") - f := db.prepareToDrop() - // prepareToDrop will stop all the incomming write and flushes any pending flush tasks. - // Before we drop, we'll stop the compaction because anyways all the datas are going to - // be deleted. - db.stopCompactions() - resume := func() { - db.startCompactions() - f() - } - // Block all foreign interactions with memory tables. - db.Lock() - defer db.Unlock() - - // Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed. - db.mt.DecrRef() - for _, mt := range db.imm { - mt.DecrRef() - } - db.imm = db.imm[:0] - db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes. - - num, err := db.lc.dropTree() - if err != nil { - return resume, err - } - db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num) - - num, err = db.vlog.dropAll() - if err != nil { - return resume, err - } - db.vhead = valuePointer{} // Zero it out. - db.lc.nextFileID = 1 - db.opt.Infof("Deleted %d value log files. DropAll done.\n", num) - return resume, nil -} - -// DropPrefix would drop all the keys with the provided prefix. It does this in the following way: -// - Stop accepting new writes. -// - Stop memtable flushes before acquiring lock. Because we're acquring lock here -// and memtable flush stalls for lock, which leads to deadlock -// - Flush out all memtables, skipping over keys with the given prefix, Kp. -// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp -// back after a restart. -// - Stop compaction. -// - Compact L0->L1, skipping over Kp. -// - Compact rest of the levels, Li->Li, picking tables which have Kp. -// - Resume memtable flushes, compactions and writes. -func (db *DB) DropPrefix(prefixes ...[]byte) error { - db.opt.Infof("DropPrefix Called") - f := db.prepareToDrop() - defer f() - // Block all foreign interactions with memory tables. - db.Lock() - defer db.Unlock() - - db.imm = append(db.imm, db.mt) - for _, memtable := range db.imm { - if memtable.Empty() { - memtable.DecrRef() - continue - } - task := flushTask{ - mt: memtable, - // Ensure that the head of value log gets persisted to disk. - vptr: db.vhead, - dropPrefixes: prefixes, - } - db.opt.Debugf("Flushing memtable") - if err := db.handleFlushTask(task); err != nil { - db.opt.Errorf("While trying to flush memtable: %v", err) - return err - } - memtable.DecrRef() - } - db.stopCompactions() - defer db.startCompactions() - db.imm = db.imm[:0] - db.mt = skl.NewSkiplist(arenaSize(db.opt)) - - // Drop prefixes from the levels. - if err := db.lc.dropPrefixes(prefixes); err != nil { - return err - } - db.opt.Infof("DropPrefix done") - return nil -} - -// KVList contains a list of key-value pairs. -type KVList = pb.KVList - -// Subscribe can be used to watch key changes for the given key prefixes. -// At least one prefix should be passed, or an error will be returned. -// You can use an empty prefix to monitor all changes to the DB. -// This function blocks until the given context is done or an error occurs. -// The given function will be called with a new KVList containing the modified keys and the -// corresponding values. -func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes ...[]byte) error { - if cb == nil { - return ErrNilCallback - } - - c := y.NewCloser(1) - recvCh, id := db.pub.newSubscriber(c, prefixes...) - slurp := func(batch *pb.KVList) error { - for { - select { - case kvs := <-recvCh: - batch.Kv = append(batch.Kv, kvs.Kv...) - default: - if len(batch.GetKv()) > 0 { - return cb(batch) - } - return nil - } - } - } - for { - select { - case <-c.HasBeenClosed(): - // No need to delete here. Closer will be called only while - // closing DB. Subscriber will be deleted by cleanSubscribers. - err := slurp(new(pb.KVList)) - // Drain if any pending updates. - c.Done() - return err - case <-ctx.Done(): - c.Done() - db.pub.deleteSubscriber(id) - // Delete the subscriber to avoid further updates. - return ctx.Err() - case batch := <-recvCh: - err := slurp(batch) - if err != nil { - c.Done() - // Delete the subscriber if there is an error by the callback. - db.pub.deleteSubscriber(id) - return err - } - } - } -} diff --git a/vendor/github.com/dgraph-io/badger/dir_plan9.go b/vendor/github.com/dgraph-io/badger/dir_plan9.go deleted file mode 100644 index ad323d70..00000000 --- a/vendor/github.com/dgraph-io/badger/dir_plan9.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" -) - -// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part -// of the locking mechanism, it's just advisory. -type directoryLockGuard struct { - // File handle on the directory, which we've locked. - f *os.File - // The absolute path to our pid file. - path string -} - -// acquireDirectoryLock gets a lock on the directory. -// It will also write our pid to dirPath/pidFileName for convenience. -// readOnly is not supported on Plan 9. -func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) ( - *directoryLockGuard, error) { - if readOnly { - return nil, ErrPlan9NotSupported - } - - // Convert to absolute path so that Release still works even if we do an unbalanced - // chdir in the meantime. - absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) - if err != nil { - return nil, errors.Wrap(err, "cannot get absolute path for pid lock file") - } - - // If the file was unpacked or created by some other program, it might not - // have the ModeExclusive bit set. Set it before we call OpenFile, so that we - // can be confident that a successful OpenFile implies exclusive use. - // - // OpenFile fails if the file ModeExclusive bit set *and* the file is already open. - // So, if the file is closed when the DB crashed, we're fine. When the process - // that was managing the DB crashes, the OS will close the file for us. - // - // This bit of code is copied from Go's lockedfile internal package: - // https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L58 - if fi, err := os.Stat(absPidFilePath); err == nil { - if fi.Mode()&os.ModeExclusive == 0 { - if err := os.Chmod(absPidFilePath, fi.Mode()|os.ModeExclusive); err != nil { - return nil, errors.Wrapf(err, "could not set exclusive mode bit") - } - } - } else if !os.IsNotExist(err) { - return nil, err - } - f, err := os.OpenFile(absPidFilePath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666|os.ModeExclusive) - if err != nil { - if isLocked(err) { - return nil, errors.Wrapf(err, - "Cannot open pid lock file %q. Another process is using this Badger database", - absPidFilePath) - } - return nil, errors.Wrapf(err, "Cannot open pid lock file %q", absPidFilePath) - } - - if _, err = fmt.Fprintf(f, "%d\n", os.Getpid()); err != nil { - f.Close() - return nil, errors.Wrapf(err, "could not write pid") - } - return &directoryLockGuard{f, absPidFilePath}, nil -} - -// Release deletes the pid file and releases our lock on the directory. -func (guard *directoryLockGuard) release() error { - // It's important that we remove the pid file first. - err := os.Remove(guard.path) - - if closeErr := guard.f.Close(); err == nil { - err = closeErr - } - guard.path = "" - guard.f = nil - - return err -} - -// openDir opens a directory for syncing. -func openDir(path string) (*os.File, error) { return os.Open(path) } - -// When you create or delete a file, you have to ensure the directory entry for the file is synced -// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync, -// or see https://github.com/coreos/etcd/issues/6368 for an example.) -func syncDir(dir string) error { - f, err := openDir(dir) - if err != nil { - return errors.Wrapf(err, "While opening directory: %s.", dir) - } - - err = f.Sync() - closeErr := f.Close() - if err != nil { - return errors.Wrapf(err, "While syncing directory: %s.", dir) - } - return errors.Wrapf(closeErr, "While closing directory: %s.", dir) -} - -// Opening an exclusive-use file returns an error. -// The expected error strings are: -// -// - "open/create -- file is locked" (cwfs, kfs) -// - "exclusive lock" (fossil) -// - "exclusive use file already open" (ramfs) -// -// See https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L16 -var lockedErrStrings = [...]string{ - "file is locked", - "exclusive lock", - "exclusive use file already open", -} - -// Even though plan9 doesn't support the Lock/RLock/Unlock functions to -// manipulate already-open files, IsLocked is still meaningful: os.OpenFile -// itself may return errors that indicate that a file with the ModeExclusive bit -// set is already open. -func isLocked(err error) bool { - s := err.Error() - - for _, frag := range lockedErrStrings { - if strings.Contains(s, frag) { - return true - } - } - return false -} diff --git a/vendor/github.com/dgraph-io/badger/dir_unix.go b/vendor/github.com/dgraph-io/badger/dir_unix.go deleted file mode 100644 index a3fef669..00000000 --- a/vendor/github.com/dgraph-io/badger/dir_unix.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build !windows,!plan9 - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part -// of the locking mechanism, it's just advisory. -type directoryLockGuard struct { - // File handle on the directory, which we've flocked. - f *os.File - // The absolute path to our pid file. - path string - // Was this a shared lock for a read-only database? - readOnly bool -} - -// acquireDirectoryLock gets a lock on the directory (using flock). If -// this is not read-only, it will also write our pid to -// dirPath/pidFileName for convenience. -func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) ( - *directoryLockGuard, error) { - // Convert to absolute path so that Release still works even if we do an unbalanced - // chdir in the meantime. - absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) - if err != nil { - return nil, errors.Wrap(err, "cannot get absolute path for pid lock file") - } - f, err := os.Open(dirPath) - if err != nil { - return nil, errors.Wrapf(err, "cannot open directory %q", dirPath) - } - opts := unix.LOCK_EX | unix.LOCK_NB - if readOnly { - opts = unix.LOCK_SH | unix.LOCK_NB - } - - err = unix.Flock(int(f.Fd()), opts) - if err != nil { - f.Close() - return nil, errors.Wrapf(err, - "Cannot acquire directory lock on %q. Another process is using this Badger database.", - dirPath) - } - - if !readOnly { - // Yes, we happily overwrite a pre-existing pid file. We're the - // only read-write badger process using this directory. - err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) - if err != nil { - f.Close() - return nil, errors.Wrapf(err, - "Cannot write pid file %q", absPidFilePath) - } - } - return &directoryLockGuard{f, absPidFilePath, readOnly}, nil -} - -// Release deletes the pid file and releases our lock on the directory. -func (guard *directoryLockGuard) release() error { - var err error - if !guard.readOnly { - // It's important that we remove the pid file first. - err = os.Remove(guard.path) - } - - if closeErr := guard.f.Close(); err == nil { - err = closeErr - } - guard.path = "" - guard.f = nil - - return err -} - -// openDir opens a directory for syncing. -func openDir(path string) (*os.File, error) { return os.Open(path) } - -// When you create or delete a file, you have to ensure the directory entry for the file is synced -// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync, -// or see https://github.com/coreos/etcd/issues/6368 for an example.) -func syncDir(dir string) error { - f, err := openDir(dir) - if err != nil { - return errors.Wrapf(err, "While opening directory: %s.", dir) - } - err = y.FileSync(f) - closeErr := f.Close() - if err != nil { - return errors.Wrapf(err, "While syncing directory: %s.", dir) - } - return errors.Wrapf(closeErr, "While closing directory: %s.", dir) -} diff --git a/vendor/github.com/dgraph-io/badger/dir_windows.go b/vendor/github.com/dgraph-io/badger/dir_windows.go deleted file mode 100644 index 60f982e2..00000000 --- a/vendor/github.com/dgraph-io/badger/dir_windows.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build windows - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -// OpenDir opens a directory in windows with write access for syncing. -import ( - "os" - "path/filepath" - "syscall" - - "github.com/pkg/errors" -) - -// FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage. -// FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are -// closed, which includes the specified handle and any other open or duplicated handles. -// See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants -// NOTE: Added here to avoid importing golang.org/x/sys/windows -const ( - FILE_ATTRIBUTE_TEMPORARY = 0x00000100 - FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 -) - -func openDir(path string) (*os.File, error) { - fd, err := openDirWin(path) - if err != nil { - return nil, err - } - return os.NewFile(uintptr(fd), path), nil -} - -func openDirWin(path string) (fd syscall.Handle, err error) { - if len(path) == 0 { - return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND - } - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return syscall.InvalidHandle, err - } - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) - sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) - createmode := uint32(syscall.OPEN_EXISTING) - fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) - return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) -} - -// DirectoryLockGuard holds a lock on the directory. -type directoryLockGuard struct { - h syscall.Handle - path string -} - -// AcquireDirectoryLock acquires exclusive access to a directory. -func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { - if readOnly { - return nil, ErrWindowsNotSupported - } - - // Convert to absolute path so that Release still works even if we do an unbalanced - // chdir in the meantime. - absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) - if err != nil { - return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file") - } - - // This call creates a file handler in memory that only one process can use at a time. When - // that process ends, the file is deleted by the system. - // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory. - // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete - // the file when all processes holding the handler are closed. - // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg. - h, err := syscall.CreateFile( - syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil, - syscall.OPEN_ALWAYS, - uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE), - 0) - if err != nil { - return nil, errors.Wrapf(err, - "Cannot create lock file %q. Another process is using this Badger database", - absLockFilePath) - } - - return &directoryLockGuard{h: h, path: absLockFilePath}, nil -} - -// Release removes the directory lock. -func (g *directoryLockGuard) release() error { - g.path = "" - return syscall.CloseHandle(g.h) -} - -// Windows doesn't support syncing directories to the file system. See -// https://github.com/dgraph-io/badger/issues/699#issuecomment-504133587 for more details. -func syncDir(dir string) error { return nil } diff --git a/vendor/github.com/dgraph-io/badger/doc.go b/vendor/github.com/dgraph-io/badger/doc.go deleted file mode 100644 index 83dc9a28..00000000 --- a/vendor/github.com/dgraph-io/badger/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Package badger implements an embeddable, simple and fast key-value database, -written in pure Go. It is designed to be highly performant for both reads and -writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and -supports transactions. It runs transactions concurrently, with serializable -snapshot isolation guarantees. - -Badger uses an LSM tree along with a value log to separate keys from values, -hence reducing both write amplification and the size of the LSM tree. This -allows LSM tree to be served entirely from RAM, while the values are served -from SSD. - - -Usage - -Badger has the following main types: DB, Txn, Item and Iterator. DB contains -keys that are associated with values. It must be opened with the appropriate -options before it can be accessed. - -All operations happen inside a Txn. Txn represents a transaction, which can -be read-only or read-write. Read-only transactions can read values for a -given key (which are returned inside an Item), or iterate over a set of -key-value pairs using an Iterator (which are returned as Item type values as -well). Read-write transactions can also update and delete keys from the DB. - -See the examples for more usage details. -*/ -package badger diff --git a/vendor/github.com/dgraph-io/badger/errors.go b/vendor/github.com/dgraph-io/badger/errors.go deleted file mode 100644 index 933cd130..00000000 --- a/vendor/github.com/dgraph-io/badger/errors.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "math" - - "github.com/pkg/errors" -) - -const ( - // ValueThresholdLimit is the maximum permissible value of opt.ValueThreshold. - ValueThresholdLimit = math.MaxUint16 - 16 + 1 -) - -var ( - // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid - // range. - ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB") - - // ErrValueThreshold is returned when ValueThreshold is set to a value close to or greater than - // uint16. - ErrValueThreshold = errors.Errorf( - "Invalid ValueThreshold, must be less than %d", ValueThresholdLimit) - - // ErrKeyNotFound is returned when key isn't found on a txn.Get. - ErrKeyNotFound = errors.New("Key not found") - - // ErrTxnTooBig is returned if too many writes are fit into a single transaction. - ErrTxnTooBig = errors.New("Txn is too big to fit into one request") - - // ErrConflict is returned when a transaction conflicts with another transaction. This can - // happen if the read rows had been updated concurrently by another transaction. - ErrConflict = errors.New("Transaction Conflict. Please retry") - - // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction. - ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction") - - // ErrDiscardedTxn is returned if a previously discarded transaction is re-used. - ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one") - - // ErrEmptyKey is returned if an empty key is passed on an update function. - ErrEmptyKey = errors.New("Key cannot be empty") - - // ErrInvalidKey is returned if the key has a special !badger! prefix, - // reserved for internal usage. - ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix") - - // ErrRetry is returned when a log file containing the value is not found. - // This usually indicates that it may have been garbage collected, and the - // operation needs to be retried. - ErrRetry = errors.New("Unable to find log file. Please retry") - - // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called. - // In such a case, GC can't be run. - ErrThresholdZero = errors.New( - "Value log GC can't run because threshold is set to zero") - - // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite. - ErrNoRewrite = errors.New( - "Value log GC attempt didn't result in any cleanup") - - // ErrRejected is returned if a value log GC is called either while another GC is running, or - // after DB::Close has been called. - ErrRejected = errors.New("Value log GC request rejected") - - // ErrInvalidRequest is returned if the user request is invalid. - ErrInvalidRequest = errors.New("Invalid request") - - // ErrManagedTxn is returned if the user tries to use an API which isn't - // allowed due to external management of transactions, when using ManagedDB. - ErrManagedTxn = errors.New( - "Invalid API request. Not allowed to perform this action using ManagedDB") - - // ErrInvalidDump if a data dump made previously cannot be loaded into the database. - ErrInvalidDump = errors.New("Data dump cannot be read") - - // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence. - ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero") - - // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not - // within the valid range - ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap") - - // ErrReplayNeeded is returned when opt.ReadOnly is set but the - // database requires a value log replay. - ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only") - - // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows - ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows") - - // ErrPlan9NotSupported is returned when opt.ReadOnly is used on Plan 9 - ErrPlan9NotSupported = errors.New("Read-only mode is not supported on Plan 9") - - // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of - // corrupt data to allow Badger to run properly. - ErrTruncateNeeded = errors.New( - "Value log truncate required to run DB. This might result in data loss") - - // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all - // data from Badger, we stop accepting new writes, by returning this error. - ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close") - - // ErrNilCallback is returned when subscriber's callback is nil. - ErrNilCallback = errors.New("Callback cannot be nil") -) diff --git a/vendor/github.com/dgraph-io/badger/histogram.go b/vendor/github.com/dgraph-io/badger/histogram.go deleted file mode 100644 index d8c94bb7..00000000 --- a/vendor/github.com/dgraph-io/badger/histogram.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "math" -) - -// PrintHistogram builds and displays the key-value size histogram. -// When keyPrefix is set, only the keys that have prefix "keyPrefix" are -// considered for creating the histogram -func (db *DB) PrintHistogram(keyPrefix []byte) { - if db == nil { - fmt.Println("\nCannot build histogram: DB is nil.") - return - } - histogram := db.buildHistogram(keyPrefix) - fmt.Printf("Histogram of key sizes (in bytes)\n") - histogram.keySizeHistogram.printHistogram() - fmt.Printf("Histogram of value sizes (in bytes)\n") - histogram.valueSizeHistogram.printHistogram() -} - -// histogramData stores information about a histogram -type histogramData struct { - bins []int64 - countPerBin []int64 - totalCount int64 - min int64 - max int64 - sum int64 -} - -// sizeHistogram contains keySize histogram and valueSize histogram -type sizeHistogram struct { - keySizeHistogram, valueSizeHistogram histogramData -} - -// newSizeHistogram returns a new instance of keyValueSizeHistogram with -// properly initialized fields. -func newSizeHistogram() *sizeHistogram { - // TODO(ibrahim): find appropriate bin size. - keyBins := createHistogramBins(1, 16) - valueBins := createHistogramBins(1, 30) - return &sizeHistogram{ - keySizeHistogram: histogramData{ - bins: keyBins, - countPerBin: make([]int64, len(keyBins)+1), - max: math.MinInt64, - min: math.MaxInt64, - sum: 0, - }, - valueSizeHistogram: histogramData{ - bins: valueBins, - countPerBin: make([]int64, len(valueBins)+1), - max: math.MinInt64, - min: math.MaxInt64, - sum: 0, - }, - } -} - -// createHistogramBins creates bins for an histogram. The bin sizes are powers -// of two of the form [2^min_exponent, ..., 2^max_exponent]. -func createHistogramBins(minExponent, maxExponent uint32) []int64 { - var bins []int64 - for i := minExponent; i <= maxExponent; i++ { - bins = append(bins, int64(1)< histogram.max { - histogram.max = value - } - if value < histogram.min { - histogram.min = value - } - - histogram.sum += value - histogram.totalCount++ - - for index := 0; index <= len(histogram.bins); index++ { - // Allocate value in the last buckets if we reached the end of the Bounds array. - if index == len(histogram.bins) { - histogram.countPerBin[index]++ - break - } - - // Check if the value should be added to the "index" bin - if value < int64(histogram.bins[index]) { - histogram.countPerBin[index]++ - break - } - } -} - -// buildHistogram builds the key-value size histogram. -// When keyPrefix is set, only the keys that have prefix "keyPrefix" are -// considered for creating the histogram -func (db *DB) buildHistogram(keyPrefix []byte) *sizeHistogram { - txn := db.NewTransaction(false) - defer txn.Discard() - - itr := txn.NewIterator(DefaultIteratorOptions) - defer itr.Close() - - badgerHistogram := newSizeHistogram() - - // Collect key and value sizes. - for itr.Seek(keyPrefix); itr.ValidForPrefix(keyPrefix); itr.Next() { - item := itr.Item() - badgerHistogram.keySizeHistogram.Update(item.KeySize()) - badgerHistogram.valueSizeHistogram.Update(item.ValueSize()) - } - return badgerHistogram -} - -// printHistogram prints the histogram data in a human-readable format. -func (histogram histogramData) printHistogram() { - fmt.Printf("Total count: %d\n", histogram.totalCount) - fmt.Printf("Min value: %d\n", histogram.min) - fmt.Printf("Max value: %d\n", histogram.max) - fmt.Printf("Mean: %.2f\n", float64(histogram.sum)/float64(histogram.totalCount)) - fmt.Printf("%24s %9s\n", "Range", "Count") - - numBins := len(histogram.bins) - for index, count := range histogram.countPerBin { - if count == 0 { - continue - } - - // The last bin represents the bin that contains the range from - // the last bin up to infinity so it's processed differently than the - // other bins. - if index == len(histogram.countPerBin)-1 { - lowerBound := int(histogram.bins[numBins-1]) - fmt.Printf("[%10d, %10s) %9d\n", lowerBound, "infinity", count) - continue - } - - upperBound := int(histogram.bins[index]) - lowerBound := 0 - if index > 0 { - lowerBound = int(histogram.bins[index-1]) - } - - fmt.Printf("[%10d, %10d) %9d\n", lowerBound, upperBound, count) - } - fmt.Println() -} diff --git a/vendor/github.com/dgraph-io/badger/iterator.go b/vendor/github.com/dgraph-io/badger/iterator.go deleted file mode 100644 index c11f2549..00000000 --- a/vendor/github.com/dgraph-io/badger/iterator.go +++ /dev/null @@ -1,736 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "fmt" - "hash/crc32" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/options" - "github.com/dgraph-io/badger/table" - - "github.com/dgraph-io/badger/y" -) - -type prefetchStatus uint8 - -const ( - prefetched prefetchStatus = iota + 1 -) - -// Item is returned during iteration. Both the Key() and Value() output is only valid until -// iterator.Next() is called. -type Item struct { - status prefetchStatus - err error - wg sync.WaitGroup - db *DB - key []byte - vptr []byte - meta byte // We need to store meta to know about bitValuePointer. - userMeta byte - expiresAt uint64 - val []byte - slice *y.Slice // Used only during prefetching. - next *Item - version uint64 - txn *Txn -} - -// String returns a string representation of Item -func (item *Item) String() string { - return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta) -} - -// Key returns the key. -// -// Key is only valid as long as item is valid, or transaction is valid. If you need to use it -// outside its validity, please use KeyCopy. -func (item *Item) Key() []byte { - return item.key -} - -// KeyCopy returns a copy of the key of the item, writing it to dst slice. -// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and -// returned. -func (item *Item) KeyCopy(dst []byte) []byte { - return y.SafeCopy(dst, item.key) -} - -// Version returns the commit timestamp of the item. -func (item *Item) Version() uint64 { - return item.version -} - -// Value retrieves the value of the item from the value log. -// -// This method must be called within a transaction. Calling it outside a -// transaction is considered undefined behavior. If an iterator is being used, -// then Item.Value() is defined in the current iteration only, because items are -// reused. -// -// If you need to use a value outside a transaction, please use Item.ValueCopy -// instead, or copy it yourself. Value might change once discard or commit is called. -// Use ValueCopy if you want to do a Set after Get. -func (item *Item) Value(fn func(val []byte) error) error { - item.wg.Wait() - if item.status == prefetched { - if item.err == nil && fn != nil { - if err := fn(item.val); err != nil { - return err - } - } - return item.err - } - buf, cb, err := item.yieldItemValue() - defer runCallback(cb) - if err != nil { - return err - } - if fn != nil { - return fn(buf) - } - return nil -} - -// ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice. -// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and -// returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call. -// -// This function is useful in long running iterate/update transactions to avoid a write deadlock. -// See Github issue: https://github.com/dgraph-io/badger/issues/315 -func (item *Item) ValueCopy(dst []byte) ([]byte, error) { - item.wg.Wait() - if item.status == prefetched { - return y.SafeCopy(dst, item.val), item.err - } - buf, cb, err := item.yieldItemValue() - defer runCallback(cb) - return y.SafeCopy(dst, buf), err -} - -func (item *Item) hasValue() bool { - if item.meta == 0 && item.vptr == nil { - // key not found - return false - } - return true -} - -// IsDeletedOrExpired returns true if item contains deleted or expired value. -func (item *Item) IsDeletedOrExpired() bool { - return isDeletedOrExpired(item.meta, item.expiresAt) -} - -// DiscardEarlierVersions returns whether the item was created with the -// option to discard earlier versions of a key when multiple are available. -func (item *Item) DiscardEarlierVersions() bool { - return item.meta&bitDiscardEarlierVersions > 0 -} - -func (item *Item) yieldItemValue() ([]byte, func(), error) { - key := item.Key() // No need to copy. - for { - if !item.hasValue() { - return nil, nil, nil - } - - if item.slice == nil { - item.slice = new(y.Slice) - } - - if (item.meta & bitValuePointer) == 0 { - val := item.slice.Resize(len(item.vptr)) - copy(val, item.vptr) - return val, nil, nil - } - - var vp valuePointer - vp.Decode(item.vptr) - result, cb, err := item.db.vlog.Read(vp, item.slice) - if err != ErrRetry { - return result, cb, err - } - if bytes.HasPrefix(key, badgerMove) { - // err == ErrRetry - // Error is retry even after checking the move keyspace. So, let's - // just assume that value is not present. - return nil, cb, nil - } - - // The value pointer is pointing to a deleted value log. Look for the - // move key and read that instead. - runCallback(cb) - // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation. - keyTs := y.KeyWithTs(item.Key(), item.Version()) - key = make([]byte, len(badgerMove)+len(keyTs)) - n := copy(key, badgerMove) - copy(key[n:], keyTs) - // Note that we can't set item.key to move key, because that would - // change the key user sees before and after this call. Also, this move - // logic is internal logic and should not impact the external behavior - // of the retrieval. - vs, err := item.db.get(key) - if err != nil { - return nil, nil, err - } - if vs.Version != item.Version() { - return nil, nil, nil - } - // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this - // slice gets overwritten. - item.vptr = y.SafeCopy(item.vptr, vs.Value) - item.meta &^= bitValuePointer // Clear the value pointer bit. - if vs.Meta&bitValuePointer > 0 { - item.meta |= bitValuePointer // This meta would only be about value pointer. - } - } -} - -func runCallback(cb func()) { - if cb != nil { - cb() - } -} - -func (item *Item) prefetchValue() { - val, cb, err := item.yieldItemValue() - defer runCallback(cb) - - item.err = err - item.status = prefetched - if val == nil { - return - } - if item.db.opt.ValueLogLoadingMode == options.MemoryMap { - buf := item.slice.Resize(len(val)) - copy(buf, val) - item.val = buf - } else { - item.val = val - } -} - -// EstimatedSize returns the approximate size of the key-value pair. -// -// This can be called while iterating through a store to quickly estimate the -// size of a range of key-value pairs (without fetching the corresponding -// values). -func (item *Item) EstimatedSize() int64 { - if !item.hasValue() { - return 0 - } - if (item.meta & bitValuePointer) == 0 { - return int64(len(item.key) + len(item.vptr)) - } - var vp valuePointer - vp.Decode(item.vptr) - return int64(vp.Len) // includes key length. -} - -// KeySize returns the size of the key. -// Exact size of the key is key + 8 bytes of timestamp -func (item *Item) KeySize() int64 { - return int64(len(item.key)) -} - -// ValueSize returns the exact size of the value. -// -// This can be called to quickly estimate the size of a value without fetching -// it. -func (item *Item) ValueSize() int64 { - if !item.hasValue() { - return 0 - } - if (item.meta & bitValuePointer) == 0 { - return int64(len(item.vptr)) - } - var vp valuePointer - vp.Decode(item.vptr) - - klen := int64(len(item.key) + 8) // 8 bytes for timestamp. - return int64(vp.Len) - klen - headerBufSize - crc32.Size -} - -// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user -// is used to interpret the value. -func (item *Item) UserMeta() byte { - return item.userMeta -} - -// ExpiresAt returns a Unix time value indicating when the item will be -// considered expired. 0 indicates that the item will never expire. -func (item *Item) ExpiresAt() uint64 { - return item.expiresAt -} - -// TODO: Switch this to use linked list container in Go. -type list struct { - head *Item - tail *Item -} - -func (l *list) push(i *Item) { - i.next = nil - if l.tail == nil { - l.head = i - l.tail = i - return - } - l.tail.next = i - l.tail = i -} - -func (l *list) pop() *Item { - if l.head == nil { - return nil - } - i := l.head - if l.head == l.tail { - l.tail = nil - l.head = nil - } else { - l.head = i.next - } - i.next = nil - return i -} - -// IteratorOptions is used to set options when iterating over Badger key-value -// stores. -// -// This package provides DefaultIteratorOptions which contains options that -// should work for most applications. Consider using that as a starting point -// before customizing it for your own needs. -type IteratorOptions struct { - // Indicates whether we should prefetch values during iteration and store them. - PrefetchValues bool - // How many KV pairs to prefetch while iterating. Valid only if PrefetchValues is true. - PrefetchSize int - Reverse bool // Direction of iteration. False is forward, true is backward. - AllVersions bool // Fetch all valid versions of the same key. - - // The following option is used to narrow down the SSTables that iterator picks up. If - // Prefix is specified, only tables which could have this prefix are picked based on their range - // of keys. - Prefix []byte // Only iterate over this given prefix. - prefixIsKey bool // If set, use the prefix for bloom filter lookup. - - InternalAccess bool // Used to allow internal access to badger keys. -} - -func (opt *IteratorOptions) compareToPrefix(key []byte) int { - // We should compare key without timestamp. For example key - a[TS] might be > "aa" prefix. - key = y.ParseKey(key) - if len(key) > len(opt.Prefix) { - key = key[:len(opt.Prefix)] - } - return bytes.Compare(key, opt.Prefix) -} - -func (opt *IteratorOptions) pickTable(t table.TableInterface) bool { - if len(opt.Prefix) == 0 { - return true - } - if opt.compareToPrefix(t.Smallest()) > 0 { - return false - } - if opt.compareToPrefix(t.Biggest()) < 0 { - return false - } - // Bloom filter lookup would only work if opt.Prefix does NOT have the read - // timestamp as part of the key. - if opt.prefixIsKey && t.DoesNotHave(opt.Prefix) { - return false - } - return true -} - -// pickTables picks the necessary table for the iterator. This function also assumes -// that the tables are sorted in the right order. -func (opt *IteratorOptions) pickTables(all []*table.Table) []*table.Table { - if len(opt.Prefix) == 0 { - out := make([]*table.Table, len(all)) - copy(out, all) - return out - } - sIdx := sort.Search(len(all), func(i int) bool { - return opt.compareToPrefix(all[i].Biggest()) >= 0 - }) - if sIdx == len(all) { - // Not found. - return []*table.Table{} - } - - filtered := all[sIdx:] - if !opt.prefixIsKey { - eIdx := sort.Search(len(filtered), func(i int) bool { - return opt.compareToPrefix(filtered[i].Smallest()) > 0 - }) - out := make([]*table.Table, len(filtered[:eIdx])) - copy(out, filtered[:eIdx]) - return out - } - - var out []*table.Table - for _, t := range filtered { - // When we encounter the first table whose smallest key is higher than - // opt.Prefix, we can stop. - if opt.compareToPrefix(t.Smallest()) > 0 { - return out - } - // opt.Prefix is actually the key. So, we can run bloom filter checks - // as well. - if t.DoesNotHave(opt.Prefix) { - continue - } - out = append(out, t) - } - return out -} - -// DefaultIteratorOptions contains default options when iterating over Badger key-value stores. -var DefaultIteratorOptions = IteratorOptions{ - PrefetchValues: true, - PrefetchSize: 100, - Reverse: false, - AllVersions: false, -} - -// Iterator helps iterating over the KV pairs in a lexicographically sorted order. -type Iterator struct { - iitr y.Iterator - txn *Txn - readTs uint64 - - opt IteratorOptions - item *Item - data list - waste list - - lastKey []byte // Used to skip over multiple versions of the same key. - - closed bool -} - -// NewIterator returns a new iterator. Depending upon the options, either only keys, or both -// key-value pairs would be fetched. The keys are returned in lexicographically sorted order. -// Using prefetch is recommended if you're doing a long running iteration, for performance. -// -// Multiple Iterators: -// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write -// txn, only one can be running at one time to avoid race conditions, because Txn is thread-unsafe. -func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator { - if txn.discarded { - panic("Transaction has already been discarded") - } - // Do not change the order of the next if. We must track the number of running iterators. - if atomic.AddInt32(&txn.numIterators, 1) > 1 && txn.update { - atomic.AddInt32(&txn.numIterators, -1) - panic("Only one iterator can be active at one time, for a RW txn.") - } - - // TODO: If Prefix is set, only pick those memtables which have keys with - // the prefix. - tables, decr := txn.db.getMemTables() - defer decr() - txn.db.vlog.incrIteratorCount() - var iters []y.Iterator - if itr := txn.newPendingWritesIterator(opt.Reverse); itr != nil { - iters = append(iters, itr) - } - for i := 0; i < len(tables); i++ { - iters = append(iters, tables[i].NewUniIterator(opt.Reverse)) - } - iters = txn.db.lc.appendIterators(iters, &opt) // This will increment references. - - res := &Iterator{ - txn: txn, - iitr: table.NewMergeIterator(iters, opt.Reverse), - opt: opt, - readTs: txn.readTs, - } - return res -} - -// NewKeyIterator is just like NewIterator, but allows the user to iterate over all versions of a -// single key. Internally, it sets the Prefix option in provided opt, and uses that prefix to -// additionally run bloom filter lookups before picking tables from the LSM tree. -func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator { - if len(opt.Prefix) > 0 { - panic("opt.Prefix should be nil for NewKeyIterator.") - } - opt.Prefix = key // This key must be without the timestamp. - opt.prefixIsKey = true - opt.AllVersions = true - return txn.NewIterator(opt) -} - -func (it *Iterator) newItem() *Item { - item := it.waste.pop() - if item == nil { - item = &Item{slice: new(y.Slice), db: it.txn.db, txn: it.txn} - } - return item -} - -// Item returns pointer to the current key-value pair. -// This item is only valid until it.Next() gets called. -func (it *Iterator) Item() *Item { - tx := it.txn - tx.addReadKey(it.item.Key()) - return it.item -} - -// Valid returns false when iteration is done. -func (it *Iterator) Valid() bool { - if it.item == nil { - return false - } - if it.opt.prefixIsKey { - return bytes.Equal(it.item.key, it.opt.Prefix) - } - return bytes.HasPrefix(it.item.key, it.opt.Prefix) -} - -// ValidForPrefix returns false when iteration is done -// or when the current key is not prefixed by the specified prefix. -func (it *Iterator) ValidForPrefix(prefix []byte) bool { - return it.Valid() && bytes.HasPrefix(it.item.key, prefix) -} - -// Close would close the iterator. It is important to call this when you're done with iteration. -func (it *Iterator) Close() { - if it.closed { - return - } - it.closed = true - - it.iitr.Close() - // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie - // goroutines behind, which are waiting to acquire file read locks after DB has been closed. - waitFor := func(l list) { - item := l.pop() - for item != nil { - item.wg.Wait() - item = l.pop() - } - } - waitFor(it.waste) - waitFor(it.data) - - // TODO: We could handle this error. - _ = it.txn.db.vlog.decrIteratorCount() - atomic.AddInt32(&it.txn.numIterators, -1) -} - -// Next would advance the iterator by one. Always check it.Valid() after a Next() -// to ensure you have access to a valid it.Item(). -func (it *Iterator) Next() { - // Reuse current item - it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting. - it.waste.push(it.item) - - // Set next item to current - it.item = it.data.pop() - - for it.iitr.Valid() { - if it.parseItem() { - // parseItem calls one extra next. - // This is used to deal with the complexity of reverse iteration. - break - } - } -} - -func isDeletedOrExpired(meta byte, expiresAt uint64) bool { - if meta&bitDelete > 0 { - return true - } - if expiresAt == 0 { - return false - } - return expiresAt <= uint64(time.Now().Unix()) -} - -// parseItem is a complex function because it needs to handle both forward and reverse iteration -// implementation. We store keys such that their versions are sorted in descending order. This makes -// forward iteration efficient, but revese iteration complicated. This tradeoff is better because -// forward iteration is more common than reverse. -// -// This function advances the iterator. -func (it *Iterator) parseItem() bool { - mi := it.iitr - key := mi.Key() - - setItem := func(item *Item) { - if it.item == nil { - it.item = item - } else { - it.data.push(item) - } - } - - // Skip badger keys. - if !it.opt.InternalAccess && bytes.HasPrefix(key, badgerPrefix) { - mi.Next() - return false - } - - // Skip any versions which are beyond the readTs. - version := y.ParseTs(key) - if version > it.readTs { - mi.Next() - return false - } - - if it.opt.AllVersions { - // Return deleted or expired values also, otherwise user can't figure out - // whether the key was deleted. - item := it.newItem() - it.fill(item) - setItem(item) - mi.Next() - return true - } - - // If iterating in forward direction, then just checking the last key against current key would - // be sufficient. - if !it.opt.Reverse { - if y.SameKey(it.lastKey, key) { - mi.Next() - return false - } - // Only track in forward direction. - // We should update lastKey as soon as we find a different key in our snapshot. - // Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a. - // Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5, - // which is wrong. Therefore, update lastKey here. - it.lastKey = y.SafeCopy(it.lastKey, mi.Key()) - } - -FILL: - // If deleted, advance and return. - vs := mi.Value() - if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { - mi.Next() - return false - } - - item := it.newItem() - it.fill(item) - // fill item based on current cursor position. All Next calls have returned, so reaching here - // means no Next was called. - - mi.Next() // Advance but no fill item yet. - if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid. - setItem(item) - return true - } - - // Reverse direction. - nextTs := y.ParseTs(mi.Key()) - mik := y.ParseKey(mi.Key()) - if nextTs <= it.readTs && bytes.Equal(mik, item.key) { - // This is a valid potential candidate. - goto FILL - } - // Ignore the next candidate. Return the current one. - setItem(item) - return true -} - -func (it *Iterator) fill(item *Item) { - vs := it.iitr.Value() - item.meta = vs.Meta - item.userMeta = vs.UserMeta - item.expiresAt = vs.ExpiresAt - - item.version = y.ParseTs(it.iitr.Key()) - item.key = y.SafeCopy(item.key, y.ParseKey(it.iitr.Key())) - - item.vptr = y.SafeCopy(item.vptr, vs.Value) - item.val = nil - if it.opt.PrefetchValues { - item.wg.Add(1) - go func() { - // FIXME we are not handling errors here. - item.prefetchValue() - item.wg.Done() - }() - } -} - -func (it *Iterator) prefetch() { - prefetchSize := 2 - if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 { - prefetchSize = it.opt.PrefetchSize - } - - i := it.iitr - var count int - it.item = nil - for i.Valid() { - if !it.parseItem() { - continue - } - count++ - if count == prefetchSize { - break - } - } -} - -// Seek would seek to the provided key if present. If absent, it would seek to the next -// smallest key greater than the provided key if iterating in the forward direction. -// Behavior would be reversed if iterating backwards. -func (it *Iterator) Seek(key []byte) { - for i := it.data.pop(); i != nil; i = it.data.pop() { - i.wg.Wait() - it.waste.push(i) - } - - it.lastKey = it.lastKey[:0] - if len(key) == 0 { - key = it.opt.Prefix - } - if len(key) == 0 { - it.iitr.Rewind() - it.prefetch() - return - } - - if !it.opt.Reverse { - key = y.KeyWithTs(key, it.txn.readTs) - } else { - key = y.KeyWithTs(key, 0) - } - it.iitr.Seek(key) - it.prefetch() -} - -// Rewind would rewind the iterator cursor all the way to zero-th position, which would be the -// smallest key if iterating forward, and largest if iterating backward. It does not keep track of -// whether the cursor started with a Seek(). -func (it *Iterator) Rewind() { - it.Seek(nil) -} diff --git a/vendor/github.com/dgraph-io/badger/level_handler.go b/vendor/github.com/dgraph-io/badger/level_handler.go deleted file mode 100644 index 1ea2af22..00000000 --- a/vendor/github.com/dgraph-io/badger/level_handler.go +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "sort" - "sync" - - "github.com/dgraph-io/badger/table" - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" -) - -type levelHandler struct { - // Guards tables, totalSize. - sync.RWMutex - - // For level >= 1, tables are sorted by key ranges, which do not overlap. - // For level 0, tables are sorted by time. - // For level 0, newest table are at the back. Compact the oldest one first, which is at the front. - tables []*table.Table - totalSize int64 - - // The following are initialized once and const. - level int - strLevel string - maxTotalSize int64 - db *DB -} - -func (s *levelHandler) getTotalSize() int64 { - s.RLock() - defer s.RUnlock() - return s.totalSize -} - -// initTables replaces s.tables with given tables. This is done during loading. -func (s *levelHandler) initTables(tables []*table.Table) { - s.Lock() - defer s.Unlock() - - s.tables = tables - s.totalSize = 0 - for _, t := range tables { - s.totalSize += t.Size() - } - - if s.level == 0 { - // Key range will overlap. Just sort by fileID in ascending order - // because newer tables are at the end of level 0. - sort.Slice(s.tables, func(i, j int) bool { - return s.tables[i].ID() < s.tables[j].ID() - }) - } else { - // Sort tables by keys. - sort.Slice(s.tables, func(i, j int) bool { - return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 - }) - } -} - -// deleteTables remove tables idx0, ..., idx1-1. -func (s *levelHandler) deleteTables(toDel []*table.Table) error { - s.Lock() // s.Unlock() below - - toDelMap := make(map[uint64]struct{}) - for _, t := range toDel { - toDelMap[t.ID()] = struct{}{} - } - - // Make a copy as iterators might be keeping a slice of tables. - var newTables []*table.Table - for _, t := range s.tables { - _, found := toDelMap[t.ID()] - if !found { - newTables = append(newTables, t) - continue - } - s.totalSize -= t.Size() - } - s.tables = newTables - - s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow. - - return decrRefs(toDel) -} - -// replaceTables will replace tables[left:right] with newTables. Note this EXCLUDES tables[right]. -// You must call decr() to delete the old tables _after_ writing the update to the manifest. -func (s *levelHandler) replaceTables(toDel, toAdd []*table.Table) error { - // Need to re-search the range of tables in this level to be replaced as other goroutines might - // be changing it as well. (They can't touch our tables, but if they add/remove other tables, - // the indices get shifted around.) - s.Lock() // We s.Unlock() below. - - toDelMap := make(map[uint64]struct{}) - for _, t := range toDel { - toDelMap[t.ID()] = struct{}{} - } - var newTables []*table.Table - for _, t := range s.tables { - _, found := toDelMap[t.ID()] - if !found { - newTables = append(newTables, t) - continue - } - s.totalSize -= t.Size() - } - - // Increase totalSize first. - for _, t := range toAdd { - s.totalSize += t.Size() - t.IncrRef() - newTables = append(newTables, t) - } - - // Assign tables. - s.tables = newTables - sort.Slice(s.tables, func(i, j int) bool { - return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 - }) - s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow. - return decrRefs(toDel) -} - -// addTable adds toAdd table to levelHandler. Normally when we add tables to levelHandler, we sort -// tables based on table.Smallest. This is required for correctness of the system. But in case of -// stream writer this can be avoided. We can just add tables to levelHandler's table list -// and after all addTable calls, we can sort table list(check sortTable method). -// NOTE: levelHandler.sortTables() should be called after call addTable calls are done. -func (s *levelHandler) addTable(t *table.Table) { - s.Lock() - defer s.Unlock() - - s.totalSize += t.Size() // Increase totalSize first. - t.IncrRef() - s.tables = append(s.tables, t) -} - -// sortTables sorts tables of levelHandler based on table.Smallest. -// Normally it should be called after all addTable calls. -func (s *levelHandler) sortTables() { - s.RLock() - defer s.RUnlock() - - sort.Slice(s.tables, func(i, j int) bool { - return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 - }) -} - -func decrRefs(tables []*table.Table) error { - for _, table := range tables { - if err := table.DecrRef(); err != nil { - return err - } - } - return nil -} - -func newLevelHandler(db *DB, level int) *levelHandler { - return &levelHandler{ - level: level, - strLevel: fmt.Sprintf("l%d", level), - db: db, - } -} - -// tryAddLevel0Table returns true if ok and no stalling. -func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool { - y.AssertTrue(s.level == 0) - // Need lock as we may be deleting the first table during a level 0 compaction. - s.Lock() - defer s.Unlock() - if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall { - return false - } - - s.tables = append(s.tables, t) - t.IncrRef() - s.totalSize += t.Size() - - return true -} - -func (s *levelHandler) numTables() int { - s.RLock() - defer s.RUnlock() - return len(s.tables) -} - -func (s *levelHandler) close() error { - s.RLock() - defer s.RUnlock() - var err error - for _, t := range s.tables { - if closeErr := t.Close(); closeErr != nil && err == nil { - err = closeErr - } - } - return errors.Wrap(err, "levelHandler.close") -} - -// getTableForKey acquires a read-lock to access s.tables. It returns a list of tableHandlers. -func (s *levelHandler) getTableForKey(key []byte) ([]*table.Table, func() error) { - s.RLock() - defer s.RUnlock() - - if s.level == 0 { - // For level 0, we need to check every table. Remember to make a copy as s.tables may change - // once we exit this function, and we don't want to lock s.tables while seeking in tables. - // CAUTION: Reverse the tables. - out := make([]*table.Table, 0, len(s.tables)) - for i := len(s.tables) - 1; i >= 0; i-- { - out = append(out, s.tables[i]) - s.tables[i].IncrRef() - } - return out, func() error { - for _, t := range out { - if err := t.DecrRef(); err != nil { - return err - } - } - return nil - } - } - // For level >= 1, we can do a binary search as key range does not overlap. - idx := sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(s.tables[i].Biggest(), key) >= 0 - }) - if idx >= len(s.tables) { - // Given key is strictly > than every element we have. - return nil, func() error { return nil } - } - tbl := s.tables[idx] - tbl.IncrRef() - return []*table.Table{tbl}, tbl.DecrRef -} - -// get returns value for a given key or the key after that. If not found, return nil. -func (s *levelHandler) get(key []byte) (y.ValueStruct, error) { - tables, decr := s.getTableForKey(key) - keyNoTs := y.ParseKey(key) - - var maxVs y.ValueStruct - for _, th := range tables { - if th.DoesNotHave(keyNoTs) { - y.NumLSMBloomHits.Add(s.strLevel, 1) - continue - } - - it := th.NewIterator(false) - defer it.Close() - - y.NumLSMGets.Add(s.strLevel, 1) - it.Seek(key) - if !it.Valid() { - continue - } - if y.SameKey(key, it.Key()) { - if version := y.ParseTs(it.Key()); maxVs.Version < version { - maxVs = it.Value() - maxVs.Version = version - } - } - } - return maxVs, decr() -} - -// appendIterators appends iterators to an array of iterators, for merging. -// Note: This obtains references for the table handlers. Remember to close these iterators. -func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator { - s.RLock() - defer s.RUnlock() - - if s.level == 0 { - // Remember to add in reverse order! - // The newer table at the end of s.tables should be added first as it takes precedence. - // Level 0 tables are not in key sorted order, so we need to consider them one by one. - var out []*table.Table - for _, t := range s.tables { - if opt.pickTable(t) { - out = append(out, t) - } - } - return appendIteratorsReversed(iters, out, opt.Reverse) - } - - tables := opt.pickTables(s.tables) - if len(tables) == 0 { - return iters - } - return append(iters, table.NewConcatIterator(tables, opt.Reverse)) -} - -type levelHandlerRLocked struct{} - -// overlappingTables returns the tables that intersect with key range. Returns a half-interval. -// This function should already have acquired a read lock, and this is so important the caller must -// pass an empty parameter declaring such. -func (s *levelHandler) overlappingTables(_ levelHandlerRLocked, kr keyRange) (int, int) { - if len(kr.left) == 0 || len(kr.right) == 0 { - return 0, 0 - } - left := sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0 - }) - right := sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0 - }) - return left, right -} diff --git a/vendor/github.com/dgraph-io/badger/levels.go b/vendor/github.com/dgraph-io/badger/levels.go deleted file mode 100644 index 96f1264b..00000000 --- a/vendor/github.com/dgraph-io/badger/levels.go +++ /dev/null @@ -1,1092 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "sort" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/trace" - - "github.com/dgraph-io/badger/pb" - "github.com/dgraph-io/badger/table" - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" -) - -type levelsController struct { - nextFileID uint64 // Atomic - elog trace.EventLog - - // The following are initialized once and const. - levels []*levelHandler - kv *DB - - cstatus compactStatus -} - -var ( - // This is for getting timings between stalls. - lastUnstalled time.Time -) - -// revertToManifest checks that all necessary table files exist and removes all table files not -// referenced by the manifest. idMap is a set of table file id's that were read from the directory -// listing. -func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error { - // 1. Check all files in manifest exist. - for id := range mf.Tables { - if _, ok := idMap[id]; !ok { - return fmt.Errorf("file does not exist for table %d", id) - } - } - - // 2. Delete files that shouldn't exist. - for id := range idMap { - if _, ok := mf.Tables[id]; !ok { - kv.elog.Printf("Table file %d not referenced in MANIFEST\n", id) - filename := table.NewFilename(id, kv.opt.Dir) - if err := os.Remove(filename); err != nil { - return y.Wrapf(err, "While removing table %d", id) - } - } - } - - return nil -} - -func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) { - y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables) - s := &levelsController{ - kv: db, - elog: db.elog, - levels: make([]*levelHandler, db.opt.MaxLevels), - } - s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels) - - for i := 0; i < db.opt.MaxLevels; i++ { - s.levels[i] = newLevelHandler(db, i) - if i == 0 { - // Do nothing. - } else if i == 1 { - // Level 1 probably shouldn't be too much bigger than level 0. - s.levels[i].maxTotalSize = db.opt.LevelOneSize - } else { - s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier) - } - s.cstatus.levels[i] = new(levelCompactStatus) - } - - // Compare manifest against directory, check for existent/non-existent files, and remove. - if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil { - return nil, err - } - - // Some files may be deleted. Let's reload. - var flags uint32 = y.Sync - if db.opt.ReadOnly { - flags |= y.ReadOnly - } - - var mu sync.Mutex - tables := make([][]*table.Table, db.opt.MaxLevels) - var maxFileID uint64 - - // We found that using 3 goroutines allows disk throughput to be utilized to its max. - // Disk utilization is the main thing we should focus on, while trying to read the data. That's - // the one factor that remains constant between HDD and SSD. - throttle := y.NewThrottle(3) - - start := time.Now() - var numOpened int32 - tick := time.NewTicker(3 * time.Second) - defer tick.Stop() - - for fileID, tf := range mf.Tables { - fname := table.NewFilename(fileID, db.opt.Dir) - select { - case <-tick.C: - db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened), - len(mf.Tables), time.Since(start).Round(time.Millisecond)) - default: - } - if err := throttle.Do(); err != nil { - closeAllTables(tables) - return nil, err - } - if fileID > maxFileID { - maxFileID = fileID - } - go func(fname string, tf TableManifest) { - var rerr error - defer func() { - throttle.Done(rerr) - atomic.AddInt32(&numOpened, 1) - }() - fd, err := y.OpenExistingFile(fname, flags) - if err != nil { - rerr = errors.Wrapf(err, "Opening file: %q", fname) - return - } - - t, err := table.OpenTable(fd, db.opt.TableLoadingMode, tf.Checksum) - if err != nil { - if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") { - db.opt.Errorf(err.Error()) - db.opt.Errorf("Ignoring table %s", fd.Name()) - // Do not set rerr. We will continue without this table. - } else { - rerr = errors.Wrapf(err, "Opening table: %q", fname) - } - return - } - - mu.Lock() - tables[tf.Level] = append(tables[tf.Level], t) - mu.Unlock() - }(fname, tf) - } - if err := throttle.Finish(); err != nil { - closeAllTables(tables) - return nil, err - } - db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened), - time.Since(start).Round(time.Millisecond)) - s.nextFileID = maxFileID + 1 - for i, tbls := range tables { - s.levels[i].initTables(tbls) - } - - // Make sure key ranges do not overlap etc. - if err := s.validate(); err != nil { - _ = s.cleanupLevels() - return nil, errors.Wrap(err, "Level validation") - } - - // Sync directory (because we have at least removed some files, or previously created the - // manifest file). - if err := syncDir(db.opt.Dir); err != nil { - _ = s.close() - return nil, err - } - - return s, nil -} - -// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef() -// because that would delete the underlying files.) We ignore errors, which is OK because tables -// are read-only. -func closeAllTables(tables [][]*table.Table) { - for _, tableSlice := range tables { - for _, table := range tableSlice { - _ = table.Close() - } - } -} - -func (s *levelsController) cleanupLevels() error { - var firstErr error - for _, l := range s.levels { - if err := l.close(); err != nil && firstErr == nil { - firstErr = err - } - } - return firstErr -} - -// dropTree picks all tables from all levels, creates a manifest changeset, -// applies it, and then decrements the refs of these tables, which would result -// in their deletion. -func (s *levelsController) dropTree() (int, error) { - // First pick all tables, so we can create a manifest changelog. - var all []*table.Table - for _, l := range s.levels { - l.RLock() - all = append(all, l.tables...) - l.RUnlock() - } - if len(all) == 0 { - return 0, nil - } - - // Generate the manifest changes. - changes := []*pb.ManifestChange{} - for _, table := range all { - changes = append(changes, newDeleteChange(table.ID())) - } - changeSet := pb.ManifestChangeSet{Changes: changes} - if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil { - return 0, err - } - - // Now that manifest has been successfully written, we can delete the tables. - for _, l := range s.levels { - l.Lock() - l.totalSize = 0 - l.tables = l.tables[:0] - l.Unlock() - } - for _, table := range all { - if err := table.DecrRef(); err != nil { - return 0, err - } - } - return len(all), nil -} - -// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the -// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the -// provided prefix and also the internal move keys for the same prefix. -// For Li->Li compactions, it picks up the tables which would have the prefix. The -// tables who only have keys with this prefix are quickly dropped. The ones which have other keys -// are run through MergeIterator and compacted to create new tables. All the mechanisms of -// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow. -func (s *levelsController) dropPrefixes(prefixes [][]byte) error { - // Internal move keys related to the given prefix should also be skipped. - for _, prefix := range prefixes { - key := make([]byte, 0, len(badgerMove)+len(prefix)) - key = append(key, badgerMove...) - key = append(key, prefix...) - prefixes = append(prefixes, key) - } - - opt := s.kv.opt - // Iterate levels in the reverse order because if we were to iterate from - // lower level (say level 0) to a higher level (say level 3) we could have - // a state in which level 0 is compacted and an older version of a key exists in lower level. - // At this point, if someone creates an iterator, they would see an old - // value for a key from lower levels. Iterating in reverse order ensures we - // drop the oldest data first so that lookups never return stale data. - for i := len(s.levels) - 1; i >= 0; i-- { - l := s.levels[i] - - l.RLock() - if l.level == 0 { - size := len(l.tables) - l.RUnlock() - - if size > 0 { - cp := compactionPriority{ - level: 0, - score: 1.74, - // A unique number greater than 1.0 does two things. Helps identify this - // function in logs, and forces a compaction. - dropPrefixes: prefixes, - } - if err := s.doCompact(cp); err != nil { - opt.Warningf("While compacting level 0: %v", err) - return nil - } - } - continue - } - - // Build a list of compaction tableGroups affecting all the prefixes we - // need to drop. We need to build tableGroups that satisfy the invariant that - // bottom tables are consecutive. - // tableGroup contains groups of consecutive tables. - var tableGroups [][]*table.Table - var tableGroup []*table.Table - - finishGroup := func() { - if len(tableGroup) > 0 { - tableGroups = append(tableGroups, tableGroup) - tableGroup = nil - } - } - - for _, table := range l.tables { - if containsAnyPrefixes(table.Smallest(), table.Biggest(), prefixes) { - tableGroup = append(tableGroup, table) - } else { - finishGroup() - } - } - finishGroup() - - l.RUnlock() - - if len(tableGroups) == 0 { - continue - } - - opt.Infof("Dropping prefix at level %d (%d tableGroups)", l.level, len(tableGroups)) - for _, operation := range tableGroups { - cd := compactDef{ - elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"), - thisLevel: l, - nextLevel: l, - top: nil, - bot: operation, - dropPrefixes: prefixes, - } - if err := s.runCompactDef(l.level, cd); err != nil { - opt.Warningf("While running compact def: %+v. Error: %v", cd, err) - return err - } - } - } - return nil -} - -func (s *levelsController) startCompact(lc *y.Closer) { - n := s.kv.opt.NumCompactors - lc.AddRunning(n - 1) - for i := 0; i < n; i++ { - go s.runWorker(lc) - } -} - -func (s *levelsController) runWorker(lc *y.Closer) { - defer lc.Done() - - randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond) - select { - case <-randomDelay.C: - case <-lc.HasBeenClosed(): - randomDelay.Stop() - return - } - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - - for { - select { - // Can add a done channel or other stuff. - case <-ticker.C: - prios := s.pickCompactLevels() - for _, p := range prios { - if err := s.doCompact(p); err == nil { - break - } else if err == errFillTables { - // pass - } else { - s.kv.opt.Warningf("While running doCompact: %v\n", err) - } - } - case <-lc.HasBeenClosed(): - return - } - } -} - -// Returns true if level zero may be compacted, without accounting for compactions that already -// might be happening. -func (s *levelsController) isLevel0Compactable() bool { - return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables -} - -// Returns true if the non-zero level may be compacted. delSize provides the size of the tables -// which are currently being compacted so that we treat them as already having started being -// compacted (because they have been, yet their size is already counted in getTotalSize). -func (l *levelHandler) isCompactable(delSize int64) bool { - return l.getTotalSize()-delSize >= l.maxTotalSize -} - -type compactionPriority struct { - level int - score float64 - dropPrefixes [][]byte -} - -// pickCompactLevel determines which level to compact. -// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction -func (s *levelsController) pickCompactLevels() (prios []compactionPriority) { - // This function must use identical criteria for guaranteeing compaction's progress that - // addLevel0Table uses. - - // cstatus is checked to see if level 0's tables are already being compacted - if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() { - pri := compactionPriority{ - level: 0, - score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables), - } - prios = append(prios, pri) - } - - for i, l := range s.levels[1:] { - // Don't consider those tables that are already being compacted right now. - delSize := s.cstatus.delSize(i + 1) - - if l.isCompactable(delSize) { - pri := compactionPriority{ - level: i + 1, - score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize), - } - prios = append(prios, pri) - } - } - sort.Slice(prios, func(i, j int) bool { - return prios[i].score > prios[j].score - }) - return prios -} - -// checkOverlap checks if the given tables overlap with any level from the given "lev" onwards. -func (s *levelsController) checkOverlap(tables []*table.Table, lev int) bool { - kr := getKeyRange(tables...) - for i, lh := range s.levels { - if i < lev { // Skip upper levels. - continue - } - lh.RLock() - left, right := lh.overlappingTables(levelHandlerRLocked{}, kr) - lh.RUnlock() - if right-left > 0 { - return true - } - } - return false -} - -// compactBuildTables merges topTables and botTables to form a list of new tables. -func (s *levelsController) compactBuildTables( - lev int, cd compactDef) ([]*table.Table, func() error, error) { - topTables := cd.top - botTables := cd.bot - - // Check overlap of the top level with the levels which are not being - // compacted in this compaction. - hasOverlap := s.checkOverlap(cd.allTables(), cd.nextLevel.level+1) - - // Try to collect stats so that we can inform value log about GC. That would help us find which - // value log file should be GCed. - discardStats := make(map[uint32]int64) - updateStats := func(vs y.ValueStruct) { - if vs.Meta&bitValuePointer > 0 { - var vp valuePointer - vp.Decode(vs.Value) - discardStats[vp.Fid] += int64(vp.Len) - } - } - - // Create iterators across all the tables involved first. - var iters []y.Iterator - if lev == 0 { - iters = appendIteratorsReversed(iters, topTables, false) - } else if len(topTables) > 0 { - y.AssertTrue(len(topTables) == 1) - iters = []y.Iterator{topTables[0].NewIterator(false)} - } - - // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap. - var valid []*table.Table - -nextTable: - for _, table := range botTables { - if len(cd.dropPrefixes) > 0 { - for _, prefix := range cd.dropPrefixes { - if bytes.HasPrefix(table.Smallest(), prefix) && - bytes.HasPrefix(table.Biggest(), prefix) { - // All the keys in this table have the dropPrefix. So, this - // table does not need to be in the iterator and can be - // dropped immediately. - continue nextTable - } - } - } - valid = append(valid, table) - } - iters = append(iters, table.NewConcatIterator(valid, false)) - it := table.NewMergeIterator(iters, false) - defer it.Close() // Important to close the iterator to do ref counting. - - it.Rewind() - - // Pick a discard ts, so we can discard versions below this ts. We should - // never discard any versions starting from above this timestamp, because - // that would affect the snapshot view guarantee provided by transactions. - discardTs := s.kv.orc.discardAtOrBelow() - - // Start generating new tables. - type newTableResult struct { - table *table.Table - err error - } - resultCh := make(chan newTableResult) - var numBuilds, numVersions int - var lastKey, skipKey []byte - for it.Valid() { - timeStart := time.Now() - builder := table.NewTableBuilder() - var numKeys, numSkips uint64 - for ; it.Valid(); it.Next() { - // See if we need to skip the prefix. - if len(cd.dropPrefixes) > 0 && hasAnyPrefixes(it.Key(), cd.dropPrefixes) { - numSkips++ - updateStats(it.Value()) - continue - } - - // See if we need to skip this key. - if len(skipKey) > 0 { - if y.SameKey(it.Key(), skipKey) { - numSkips++ - updateStats(it.Value()) - continue - } else { - skipKey = skipKey[:0] - } - } - - if !y.SameKey(it.Key(), lastKey) { - if builder.ReachedCapacity(s.kv.opt.MaxTableSize) { - // Only break if we are on a different key, and have reached capacity. We want - // to ensure that all versions of the key are stored in the same sstable, and - // not divided across multiple tables at the same level. - break - } - lastKey = y.SafeCopy(lastKey, it.Key()) - numVersions = 0 - } - - vs := it.Value() - version := y.ParseTs(it.Key()) - // Do not discard entries inserted by merge operator. These entries will be - // discarded once they're merged - if version <= discardTs && vs.Meta&bitMergeEntry == 0 { - // Keep track of the number of versions encountered for this key. Only consider the - // versions which are below the minReadTs, otherwise, we might end up discarding the - // only valid version for a running transaction. - numVersions++ - - // Keep the current version and discard all the next versions if - // - The `discardEarlierVersions` bit is set OR - // - We've already processed `NumVersionsToKeep` number of versions - // (including the current item being processed) - lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 || - numVersions == s.kv.opt.NumVersionsToKeep - - isExpired := isDeletedOrExpired(vs.Meta, vs.ExpiresAt) - - if isExpired || lastValidVersion { - // If this version of the key is deleted or expired, skip all the rest of the - // versions. Ensure that we're only removing versions below readTs. - skipKey = y.SafeCopy(skipKey, it.Key()) - - switch { - // Add the key to the table only if it has not expired. - // We don't want to add the deleted/expired keys. - case !isExpired && lastValidVersion: - // Add this key. We have set skipKey, so the following key versions - // would be skipped. - case hasOverlap: - // If this key range has overlap with lower levels, then keep the deletion - // marker with the latest version, discarding the rest. We have set skipKey, - // so the following key versions would be skipped. - default: - // If no overlap, we can skip all the versions, by continuing here. - numSkips++ - updateStats(vs) - continue // Skip adding this key. - } - } - } - numKeys++ - builder.Add(it.Key(), it.Value()) - } - // It was true that it.Valid() at least once in the loop above, which means we - // called Add() at least once, and builder is not Empty(). - s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v", - numKeys, numSkips, time.Since(timeStart)) - if !builder.Empty() { - numBuilds++ - fileID := s.reserveFileID() - go func(builder *table.Builder) { - defer builder.Close() - - fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true) - if err != nil { - resultCh <- newTableResult{nil, errors.Wrapf(err, "While opening new table: %d", fileID)} - return - } - - if _, err := fd.Write(builder.Finish()); err != nil { - resultCh <- newTableResult{nil, errors.Wrapf(err, "Unable to write to file: %d", fileID)} - return - } - - tbl, err := table.OpenTable(fd, s.kv.opt.TableLoadingMode, nil) - // decrRef is added below. - resultCh <- newTableResult{tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name())} - }(builder) - } - } - - newTables := make([]*table.Table, 0, 20) - // Wait for all table builders to finish. - var firstErr error - for x := 0; x < numBuilds; x++ { - res := <-resultCh - newTables = append(newTables, res.table) - if firstErr == nil { - firstErr = res.err - } - } - - if firstErr == nil { - // Ensure created files' directory entries are visible. We don't mind the extra latency - // from not doing this ASAP after all file creation has finished because this is a - // background operation. - firstErr = syncDir(s.kv.opt.Dir) - } - - if firstErr != nil { - // An error happened. Delete all the newly created table files (by calling DecrRef - // -- we're the only holders of a ref). - for j := 0; j < numBuilds; j++ { - if newTables[j] != nil { - _ = newTables[j].DecrRef() - } - } - errorReturn := errors.Wrapf(firstErr, "While running compaction for: %+v", cd) - return nil, nil, errorReturn - } - - sort.Slice(newTables, func(i, j int) bool { - return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0 - }) - s.kv.vlog.updateDiscardStats(discardStats) - s.kv.opt.Debugf("Discard stats: %v", discardStats) - return newTables, func() error { return decrRefs(newTables) }, nil -} - -func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet { - changes := []*pb.ManifestChange{} - for _, table := range newTables { - changes = append(changes, - newCreateChange(table.ID(), cd.nextLevel.level, table.Checksum)) - } - for _, table := range cd.top { - changes = append(changes, newDeleteChange(table.ID())) - } - for _, table := range cd.bot { - changes = append(changes, newDeleteChange(table.ID())) - } - return pb.ManifestChangeSet{Changes: changes} -} - -func hasAnyPrefixes(s []byte, listOfPrefixes [][]byte) bool { - for _, prefix := range listOfPrefixes { - if bytes.HasPrefix(s, prefix) { - return true - } - } - - return false -} - -func containsPrefix(smallValue, largeValue, prefix []byte) bool { - if bytes.HasPrefix(smallValue, prefix) { - return true - } - if bytes.HasPrefix(largeValue, prefix) { - return true - } - if bytes.Compare(prefix, smallValue) > 0 && - bytes.Compare(prefix, largeValue) < 0 { - return true - } - - return false -} - -func containsAnyPrefixes(smallValue, largeValue []byte, listOfPrefixes [][]byte) bool { - for _, prefix := range listOfPrefixes { - if containsPrefix(smallValue, largeValue, prefix) { - return true - } - } - - return false -} - -type compactDef struct { - elog trace.Trace - - thisLevel *levelHandler - nextLevel *levelHandler - - top []*table.Table - bot []*table.Table - - thisRange keyRange - nextRange keyRange - - thisSize int64 - - dropPrefixes [][]byte -} - -func (cd *compactDef) lockLevels() { - cd.thisLevel.RLock() - cd.nextLevel.RLock() -} - -func (cd *compactDef) unlockLevels() { - cd.nextLevel.RUnlock() - cd.thisLevel.RUnlock() -} - -func (cd *compactDef) allTables() []*table.Table { - ret := make([]*table.Table, 0, len(cd.top)+len(cd.bot)) - ret = append(ret, cd.top...) - ret = append(ret, cd.bot...) - return ret -} - -func (s *levelsController) fillTablesL0(cd *compactDef) bool { - cd.lockLevels() - defer cd.unlockLevels() - - cd.top = make([]*table.Table, len(cd.thisLevel.tables)) - copy(cd.top, cd.thisLevel.tables) - if len(cd.top) == 0 { - return false - } - cd.thisRange = infRange - - kr := getKeyRange(cd.top...) - left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr) - cd.bot = make([]*table.Table, right-left) - copy(cd.bot, cd.nextLevel.tables[left:right]) - - if len(cd.bot) == 0 { - cd.nextRange = kr - } else { - cd.nextRange = getKeyRange(cd.bot...) - } - - if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { - return false - } - - return true -} - -// sortByOverlap sorts tables in increasing order of overlap with next level. -func (s *levelsController) sortByOverlap(tables []*table.Table, cd *compactDef) { - if len(tables) == 0 || cd.nextLevel == nil { - return - } - - tableOverlap := make([]int, len(tables)) - for i := range tables { - // get key range for table - tableRange := getKeyRange(tables[i]) - // get overlap with next level - left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, tableRange) - tableOverlap[i] = right - left - } - - sort.Slice(tables, func(i, j int) bool { - return tableOverlap[i] < tableOverlap[j] - }) -} - -func (s *levelsController) fillTables(cd *compactDef) bool { - cd.lockLevels() - defer cd.unlockLevels() - - tables := make([]*table.Table, len(cd.thisLevel.tables)) - copy(tables, cd.thisLevel.tables) - if len(tables) == 0 { - return false - } - - // We want to pick files from current level in order of increasing overlap with next level - // tables. Idea here is to first compact file from current level which has least overlap with - // next level. This provides us better write amplification. - s.sortByOverlap(tables, cd) - - for _, t := range tables { - cd.thisSize = t.Size() - cd.thisRange = getKeyRange(t) - if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) { - continue - } - cd.top = []*table.Table{t} - left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange) - - cd.bot = make([]*table.Table, right-left) - copy(cd.bot, cd.nextLevel.tables[left:right]) - - if len(cd.bot) == 0 { - cd.bot = []*table.Table{} - cd.nextRange = cd.thisRange - if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { - continue - } - return true - } - cd.nextRange = getKeyRange(cd.bot...) - - if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) { - continue - } - if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { - continue - } - return true - } - return false -} - -func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) { - timeStart := time.Now() - - thisLevel := cd.thisLevel - nextLevel := cd.nextLevel - - // Table should never be moved directly between levels, always be rewritten to allow discarding - // invalid versions. - - newTables, decr, err := s.compactBuildTables(l, cd) - if err != nil { - return err - } - defer func() { - // Only assign to err, if it's not already nil. - if decErr := decr(); err == nil { - err = decErr - } - }() - changeSet := buildChangeSet(&cd, newTables) - - // We write to the manifest _before_ we delete files (and after we created files) - if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil { - return err - } - - // See comment earlier in this function about the ordering of these ops, and the order in which - // we access levels when reading. - if err := nextLevel.replaceTables(cd.bot, newTables); err != nil { - return err - } - if err := thisLevel.deleteTables(cd.top); err != nil { - return err - } - - // Note: For level 0, while doCompact is running, it is possible that new tables are added. - // However, the tables are added only to the end, so it is ok to just delete the first table. - - s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n", - thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot), - len(newTables), time.Since(timeStart)) - return nil -} - -var errFillTables = errors.New("Unable to fill tables") - -// doCompact picks some table on level l and compacts it away to the next level. -func (s *levelsController) doCompact(p compactionPriority) error { - l := p.level - y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check. - - cd := compactDef{ - elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"), - thisLevel: s.levels[l], - nextLevel: s.levels[l+1], - dropPrefixes: p.dropPrefixes, - } - cd.elog.SetMaxEvents(100) - defer cd.elog.Finish() - - s.kv.opt.Infof("Got compaction priority: %+v", p) - - // While picking tables to be compacted, both levels' tables are expected to - // remain unchanged. - if l == 0 { - if !s.fillTablesL0(&cd) { - return errFillTables - } - - } else { - if !s.fillTables(&cd) { - return errFillTables - } - } - defer s.cstatus.delete(cd) // Remove the ranges from compaction status. - - s.kv.opt.Infof("Running for level: %d\n", cd.thisLevel.level) - s.cstatus.toLog(cd.elog) - if err := s.runCompactDef(l, cd); err != nil { - // This compaction couldn't be done successfully. - s.kv.opt.Warningf("LOG Compact FAILED with error: %+v: %+v", err, cd) - return err - } - - s.cstatus.toLog(cd.elog) - s.kv.opt.Infof("Compaction for level: %d DONE", cd.thisLevel.level) - return nil -} - -func (s *levelsController) addLevel0Table(t *table.Table) error { - // We update the manifest _before_ the table becomes part of a levelHandler, because at that - // point it could get used in some compaction. This ensures the manifest file gets updated in - // the proper order. (That means this update happens before that of some compaction which - // deletes the table.) - err := s.kv.manifest.addChanges([]*pb.ManifestChange{ - newCreateChange(t.ID(), 0, t.Checksum), - }) - if err != nil { - return err - } - - for !s.levels[0].tryAddLevel0Table(t) { - // Stall. Make sure all levels are healthy before we unstall. - var timeStart time.Time - { - s.elog.Printf("STALLED STALLED STALLED: %v\n", time.Since(lastUnstalled)) - s.cstatus.RLock() - for i := 0; i < s.kv.opt.MaxLevels; i++ { - s.elog.Printf("level=%d. Status=%s Size=%d\n", - i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize()) - } - s.cstatus.RUnlock() - timeStart = time.Now() - } - // Before we unstall, we need to make sure that level 0 and 1 are healthy. Otherwise, we - // will very quickly fill up level 0 again and if the compaction strategy favors level 0, - // then level 1 is going to super full. - for i := 0; ; i++ { - // Passing 0 for delSize to compactable means we're treating incomplete compactions as - // not having finished -- we wait for them to finish. Also, it's crucial this behavior - // replicates pickCompactLevels' behavior in computing compactability in order to - // guarantee progress. - if !s.isLevel0Compactable() && !s.levels[1].isCompactable(0) { - break - } - time.Sleep(10 * time.Millisecond) - if i%100 == 0 { - prios := s.pickCompactLevels() - s.elog.Printf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios) - i = 0 - } - } - { - s.elog.Printf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart)) - lastUnstalled = time.Now() - } - } - - return nil -} - -func (s *levelsController) close() error { - err := s.cleanupLevels() - return errors.Wrap(err, "levelsController.Close") -} - -// get returns the found value if any. If not found, we return nil. -func (s *levelsController) get(key []byte, maxVs *y.ValueStruct) (y.ValueStruct, error) { - // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated - // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could - // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do - // parallelize this, we will need to call the h.RLock() function by increasing order of level - // number.) - version := y.ParseTs(key) - for _, h := range s.levels { - vs, err := h.get(key) // Calls h.RLock() and h.RUnlock(). - if err != nil { - return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key) - } - if vs.Value == nil && vs.Meta == 0 { - continue - } - if maxVs == nil || vs.Version == version { - return vs, nil - } - if maxVs.Version < vs.Version { - *maxVs = vs - } - } - if maxVs != nil { - return *maxVs, nil - } - return y.ValueStruct{}, nil -} - -func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator { - for i := len(th) - 1; i >= 0; i-- { - // This will increment the reference of the table handler. - out = append(out, th[i].NewIterator(reversed)) - } - return out -} - -// appendIterators appends iterators to an array of iterators, for merging. -// Note: This obtains references for the table handlers. Remember to close these iterators. -func (s *levelsController) appendIterators( - iters []y.Iterator, opt *IteratorOptions) []y.Iterator { - // Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing - // data when there's a compaction. - for _, level := range s.levels { - iters = level.appendIterators(iters, opt) - } - return iters -} - -// TableInfo represents the information about a table. -type TableInfo struct { - ID uint64 - Level int - Left []byte - Right []byte - KeyCount uint64 // Number of keys in the table -} - -func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) { - for _, l := range s.levels { - l.RLock() - for _, t := range l.tables { - var count uint64 - if withKeysCount { - it := t.NewIterator(false) - for it.Rewind(); it.Valid(); it.Next() { - count++ - } - it.Close() - } - - info := TableInfo{ - ID: t.ID(), - Level: l.level, - Left: t.Smallest(), - Right: t.Biggest(), - KeyCount: count, - } - result = append(result, info) - } - l.RUnlock() - } - sort.Slice(result, func(i, j int) bool { - if result[i].Level != result[j].Level { - return result[i].Level < result[j].Level - } - return result[i].ID < result[j].ID - }) - return -} diff --git a/vendor/github.com/dgraph-io/badger/logger.go b/vendor/github.com/dgraph-io/badger/logger.go deleted file mode 100644 index 3a9b8a33..00000000 --- a/vendor/github.com/dgraph-io/badger/logger.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "log" - "os" -) - -// Logger is implemented by any logging system that is used for standard logs. -type Logger interface { - Errorf(string, ...interface{}) - Warningf(string, ...interface{}) - Infof(string, ...interface{}) - Debugf(string, ...interface{}) -} - -// Errorf logs an ERROR log message to the logger specified in opts or to the -// global logger if no logger is specified in opts. -func (opt *Options) Errorf(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Errorf(format, v...) -} - -// Infof logs an INFO message to the logger specified in opts. -func (opt *Options) Infof(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Infof(format, v...) -} - -// Warningf logs a WARNING message to the logger specified in opts. -func (opt *Options) Warningf(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Warningf(format, v...) -} - -// Debugf logs a DEBUG message to the logger specified in opts. -func (opt *Options) Debugf(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Debugf(format, v...) -} - -type defaultLog struct { - *log.Logger -} - -var defaultLogger = &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags)} - -func (l *defaultLog) Errorf(f string, v ...interface{}) { - l.Printf("ERROR: "+f, v...) -} - -func (l *defaultLog) Warningf(f string, v ...interface{}) { - l.Printf("WARNING: "+f, v...) -} - -func (l *defaultLog) Infof(f string, v ...interface{}) { - l.Printf("INFO: "+f, v...) -} - -func (l *defaultLog) Debugf(f string, v ...interface{}) { - l.Printf("DEBUG: "+f, v...) -} diff --git a/vendor/github.com/dgraph-io/badger/managed_db.go b/vendor/github.com/dgraph-io/badger/managed_db.go deleted file mode 100644 index 61e6b3cc..00000000 --- a/vendor/github.com/dgraph-io/badger/managed_db.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -// OpenManaged returns a new DB, which allows more control over setting -// transaction timestamps, aka managed mode. -// -// This is only useful for databases built on top of Badger (like Dgraph), and -// can be ignored by most users. -func OpenManaged(opts Options) (*DB, error) { - opts.managedTxns = true - return Open(opts) -} - -// NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the -// provided read timestamp. -// -// This is only useful for databases built on top of Badger (like Dgraph), and -// can be ignored by most users. -func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn { - if !db.opt.managedTxns { - panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.") - } - txn := db.newTransaction(update, true) - txn.readTs = readTs - return txn -} - -// NewWriteBatchAt is similar to NewWriteBatch but it allows user to set the commit timestamp. -// NewWriteBatchAt is supposed to be used only in the managed mode. -func (db *DB) NewWriteBatchAt(commitTs uint64) *WriteBatch { - if !db.opt.managedTxns { - panic("cannot use NewWriteBatchAt with managedDB=false. Use NewWriteBatch instead") - } - - wb := db.newWriteBatch() - wb.commitTs = commitTs - wb.txn.commitTs = commitTs - return wb -} - -// CommitAt commits the transaction, following the same logic as Commit(), but -// at the given commit timestamp. This will panic if not used with managed transactions. -// -// This is only useful for databases built on top of Badger (like Dgraph), and -// can be ignored by most users. -func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error { - if !txn.db.opt.managedTxns { - panic("Cannot use CommitAt with managedDB=false. Use Commit instead.") - } - txn.commitTs = commitTs - if callback == nil { - return txn.Commit() - } - txn.CommitWith(callback) - return nil -} - -// SetDiscardTs sets a timestamp at or below which, any invalid or deleted -// versions can be discarded from the LSM tree, and thence from the value log to -// reclaim disk space. Can only be used with managed transactions. -func (db *DB) SetDiscardTs(ts uint64) { - if !db.opt.managedTxns { - panic("Cannot use SetDiscardTs with managedDB=false.") - } - db.orc.setDiscardTs(ts) -} diff --git a/vendor/github.com/dgraph-io/badger/manifest.go b/vendor/github.com/dgraph-io/badger/manifest.go deleted file mode 100644 index 5a2e837e..00000000 --- a/vendor/github.com/dgraph-io/badger/manifest.go +++ /dev/null @@ -1,456 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/dgraph-io/badger/pb" - "github.com/dgraph-io/badger/y" - "github.com/golang/protobuf/proto" - "github.com/pkg/errors" -) - -// Manifest represents the contents of the MANIFEST file in a Badger store. -// -// The MANIFEST file describes the startup state of the db -- all LSM files and what level they're -// at. -// -// It consists of a sequence of ManifestChangeSet objects. Each of these is treated atomically, -// and contains a sequence of ManifestChange's (file creations/deletions) which we use to -// reconstruct the manifest at startup. -type Manifest struct { - Levels []levelManifest - Tables map[uint64]TableManifest - - // Contains total number of creation and deletion changes in the manifest -- used to compute - // whether it'd be useful to rewrite the manifest. - Creations int - Deletions int -} - -func createManifest() Manifest { - levels := make([]levelManifest, 0) - return Manifest{ - Levels: levels, - Tables: make(map[uint64]TableManifest), - } -} - -// levelManifest contains information about LSM tree levels -// in the MANIFEST file. -type levelManifest struct { - Tables map[uint64]struct{} // Set of table id's -} - -// TableManifest contains information about a specific level -// in the LSM tree. -type TableManifest struct { - Level uint8 - Checksum []byte -} - -// manifestFile holds the file pointer (and other info) about the manifest file, which is a log -// file we append to. -type manifestFile struct { - fp *os.File - directory string - // We make this configurable so that unit tests can hit rewrite() code quickly - deletionsRewriteThreshold int - - // Guards appends, which includes access to the manifest field. - appendLock sync.Mutex - - // Used to track the current state of the manifest, used when rewriting. - manifest Manifest -} - -const ( - // ManifestFilename is the filename for the manifest file. - ManifestFilename = "MANIFEST" - manifestRewriteFilename = "MANIFEST-REWRITE" - manifestDeletionsRewriteThreshold = 10000 - manifestDeletionsRatio = 10 -) - -// asChanges returns a sequence of changes that could be used to recreate the Manifest in its -// present state. -func (m *Manifest) asChanges() []*pb.ManifestChange { - changes := make([]*pb.ManifestChange, 0, len(m.Tables)) - for id, tm := range m.Tables { - changes = append(changes, newCreateChange(id, int(tm.Level), tm.Checksum)) - } - return changes -} - -func (m *Manifest) clone() Manifest { - changeSet := pb.ManifestChangeSet{Changes: m.asChanges()} - ret := createManifest() - y.Check(applyChangeSet(&ret, &changeSet)) - return ret -} - -// openOrCreateManifestFile opens a Badger manifest file if it exists, or creates on if -// one doesn’t. -func openOrCreateManifestFile(dir string, readOnly bool) ( - ret *manifestFile, result Manifest, err error) { - return helpOpenOrCreateManifestFile(dir, readOnly, manifestDeletionsRewriteThreshold) -} - -func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) ( - *manifestFile, Manifest, error) { - - path := filepath.Join(dir, ManifestFilename) - var flags uint32 - if readOnly { - flags |= y.ReadOnly - } - fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock. - if err != nil { - if !os.IsNotExist(err) { - return nil, Manifest{}, err - } - if readOnly { - return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db") - } - m := createManifest() - fp, netCreations, err := helpRewrite(dir, &m) - if err != nil { - return nil, Manifest{}, err - } - y.AssertTrue(netCreations == 0) - mf := &manifestFile{ - fp: fp, - directory: dir, - manifest: m.clone(), - deletionsRewriteThreshold: deletionsThreshold, - } - return mf, m, nil - } - - manifest, truncOffset, err := ReplayManifestFile(fp) - if err != nil { - _ = fp.Close() - return nil, Manifest{}, err - } - - if !readOnly { - // Truncate file so we don't have a half-written entry at the end. - if err := fp.Truncate(truncOffset); err != nil { - _ = fp.Close() - return nil, Manifest{}, err - } - } - if _, err = fp.Seek(0, io.SeekEnd); err != nil { - _ = fp.Close() - return nil, Manifest{}, err - } - - mf := &manifestFile{ - fp: fp, - directory: dir, - manifest: manifest.clone(), - deletionsRewriteThreshold: deletionsThreshold, - } - return mf, manifest, nil -} - -func (mf *manifestFile) close() error { - return mf.fp.Close() -} - -// addChanges writes a batch of changes, atomically, to the file. By "atomically" that means when -// we replay the MANIFEST file, we'll either replay all the changes or none of them. (The truth of -// this depends on the filesystem -- some might append garbage data if a system crash happens at -// the wrong time.) -func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error { - changes := pb.ManifestChangeSet{Changes: changesParam} - buf, err := proto.Marshal(&changes) - if err != nil { - return err - } - - // Maybe we could use O_APPEND instead (on certain file systems) - mf.appendLock.Lock() - if err := applyChangeSet(&mf.manifest, &changes); err != nil { - mf.appendLock.Unlock() - return err - } - // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care - if mf.manifest.Deletions > mf.deletionsRewriteThreshold && - mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) { - if err := mf.rewrite(); err != nil { - mf.appendLock.Unlock() - return err - } - } else { - var lenCrcBuf [8]byte - binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf))) - binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable)) - buf = append(lenCrcBuf[:], buf...) - if _, err := mf.fp.Write(buf); err != nil { - mf.appendLock.Unlock() - return err - } - } - - mf.appendLock.Unlock() - return y.FileSync(mf.fp) -} - -// Has to be 4 bytes. The value can never change, ever, anyway. -var magicText = [4]byte{'B', 'd', 'g', 'r'} - -// The magic version number. -const magicVersion = 4 - -func helpRewrite(dir string, m *Manifest) (*os.File, int, error) { - rewritePath := filepath.Join(dir, manifestRewriteFilename) - // We explicitly sync. - fp, err := y.OpenTruncFile(rewritePath, false) - if err != nil { - return nil, 0, err - } - - buf := make([]byte, 8) - copy(buf[0:4], magicText[:]) - binary.BigEndian.PutUint32(buf[4:8], magicVersion) - - netCreations := len(m.Tables) - changes := m.asChanges() - set := pb.ManifestChangeSet{Changes: changes} - - changeBuf, err := proto.Marshal(&set) - if err != nil { - fp.Close() - return nil, 0, err - } - var lenCrcBuf [8]byte - binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(changeBuf))) - binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(changeBuf, y.CastagnoliCrcTable)) - buf = append(buf, lenCrcBuf[:]...) - buf = append(buf, changeBuf...) - if _, err := fp.Write(buf); err != nil { - fp.Close() - return nil, 0, err - } - if err := y.FileSync(fp); err != nil { - fp.Close() - return nil, 0, err - } - - // In Windows the files should be closed before doing a Rename. - if err = fp.Close(); err != nil { - return nil, 0, err - } - manifestPath := filepath.Join(dir, ManifestFilename) - if err := os.Rename(rewritePath, manifestPath); err != nil { - return nil, 0, err - } - fp, err = y.OpenExistingFile(manifestPath, 0) - if err != nil { - return nil, 0, err - } - if _, err := fp.Seek(0, io.SeekEnd); err != nil { - fp.Close() - return nil, 0, err - } - if err := syncDir(dir); err != nil { - fp.Close() - return nil, 0, err - } - - return fp, netCreations, nil -} - -// Must be called while appendLock is held. -func (mf *manifestFile) rewrite() error { - // In Windows the files should be closed before doing a Rename. - if err := mf.fp.Close(); err != nil { - return err - } - fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest) - if err != nil { - return err - } - mf.fp = fp - mf.manifest.Creations = netCreations - mf.manifest.Deletions = 0 - - return nil -} - -type countingReader struct { - wrapped *bufio.Reader - count int64 -} - -func (r *countingReader) Read(p []byte) (n int, err error) { - n, err = r.wrapped.Read(p) - r.count += int64(n) - return -} - -func (r *countingReader) ReadByte() (b byte, err error) { - b, err = r.wrapped.ReadByte() - if err == nil { - r.count++ - } - return -} - -var ( - errBadMagic = errors.New("manifest has bad magic") - errBadChecksum = errors.New("manifest has checksum mismatch") -) - -// ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one -// immutable copy and one mutable copy of the manifest. Easiest way is to construct two of them.) -// Also, returns the last offset after a completely read manifest entry -- the file must be -// truncated at that point before further appends are made (if there is a partial entry after -// that). In normal conditions, truncOffset is the file size. -func ReplayManifestFile(fp *os.File) (Manifest, int64, error) { - r := countingReader{wrapped: bufio.NewReader(fp)} - - var magicBuf [8]byte - if _, err := io.ReadFull(&r, magicBuf[:]); err != nil { - return Manifest{}, 0, errBadMagic - } - if !bytes.Equal(magicBuf[0:4], magicText[:]) { - return Manifest{}, 0, errBadMagic - } - version := binary.BigEndian.Uint32(magicBuf[4:8]) - if version != magicVersion { - return Manifest{}, 0, - //nolint:lll - fmt.Errorf("manifest has unsupported version: %d (we support %d).\n"+ - "Please see https://github.com/dgraph-io/badger/blob/master/README.md#i-see-manifest-has-unsupported-version-x-we-support-y-error"+ - " on how to fix this.", - version, magicVersion) - } - - stat, err := fp.Stat() - if err != nil { - return Manifest{}, 0, err - } - - build := createManifest() - var offset int64 - for { - offset = r.count - var lenCrcBuf [8]byte - _, err := io.ReadFull(&r, lenCrcBuf[:]) - if err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } - return Manifest{}, 0, err - } - length := binary.BigEndian.Uint32(lenCrcBuf[0:4]) - // Sanity check to ensure we don't over-allocate memory. - if length > uint32(stat.Size()) { - return Manifest{}, 0, errors.Errorf( - "Buffer length: %d greater than file size: %d. Manifest file might be corrupted", - length, stat.Size()) - } - var buf = make([]byte, length) - if _, err := io.ReadFull(&r, buf); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } - return Manifest{}, 0, err - } - if crc32.Checksum(buf, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(lenCrcBuf[4:8]) { - return Manifest{}, 0, errBadChecksum - } - - var changeSet pb.ManifestChangeSet - if err := proto.Unmarshal(buf, &changeSet); err != nil { - return Manifest{}, 0, err - } - - if err := applyChangeSet(&build, &changeSet); err != nil { - return Manifest{}, 0, err - } - } - - return build, offset, nil -} - -func applyManifestChange(build *Manifest, tc *pb.ManifestChange) error { - switch tc.Op { - case pb.ManifestChange_CREATE: - if _, ok := build.Tables[tc.Id]; ok { - return fmt.Errorf("MANIFEST invalid, table %d exists", tc.Id) - } - build.Tables[tc.Id] = TableManifest{ - Level: uint8(tc.Level), - Checksum: append([]byte{}, tc.Checksum...), - } - for len(build.Levels) <= int(tc.Level) { - build.Levels = append(build.Levels, levelManifest{make(map[uint64]struct{})}) - } - build.Levels[tc.Level].Tables[tc.Id] = struct{}{} - build.Creations++ - case pb.ManifestChange_DELETE: - tm, ok := build.Tables[tc.Id] - if !ok { - return fmt.Errorf("MANIFEST removes non-existing table %d", tc.Id) - } - delete(build.Levels[tm.Level].Tables, tc.Id) - delete(build.Tables, tc.Id) - build.Deletions++ - default: - return fmt.Errorf("MANIFEST file has invalid manifestChange op") - } - return nil -} - -// This is not a "recoverable" error -- opening the KV store fails because the MANIFEST file is -// just plain broken. -func applyChangeSet(build *Manifest, changeSet *pb.ManifestChangeSet) error { - for _, change := range changeSet.Changes { - if err := applyManifestChange(build, change); err != nil { - return err - } - } - return nil -} - -func newCreateChange(id uint64, level int, checksum []byte) *pb.ManifestChange { - return &pb.ManifestChange{ - Id: id, - Op: pb.ManifestChange_CREATE, - Level: uint32(level), - Checksum: checksum, - } -} - -func newDeleteChange(id uint64) *pb.ManifestChange { - return &pb.ManifestChange{ - Id: id, - Op: pb.ManifestChange_DELETE, - } -} diff --git a/vendor/github.com/dgraph-io/badger/merge.go b/vendor/github.com/dgraph-io/badger/merge.go deleted file mode 100644 index 02ad4bcd..00000000 --- a/vendor/github.com/dgraph-io/badger/merge.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "sync" - "time" - - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" -) - -// MergeOperator represents a Badger merge operator. -type MergeOperator struct { - sync.RWMutex - f MergeFunc - db *DB - key []byte - closer *y.Closer -} - -// MergeFunc accepts two byte slices, one representing an existing value, and -// another representing a new value that needs to be ‘merged’ into it. MergeFunc -// contains the logic to perform the ‘merge’ and return an updated value. -// MergeFunc could perform operations like integer addition, list appends etc. -// Note that the ordering of the operands is maintained. -type MergeFunc func(existingVal, newVal []byte) []byte - -// GetMergeOperator creates a new MergeOperator for a given key and returns a -// pointer to it. It also fires off a goroutine that performs a compaction using -// the merge function that runs periodically, as specified by dur. -func (db *DB) GetMergeOperator(key []byte, - f MergeFunc, dur time.Duration) *MergeOperator { - op := &MergeOperator{ - f: f, - db: db, - key: key, - closer: y.NewCloser(1), - } - - go op.runCompactions(dur) - return op -} - -var errNoMerge = errors.New("No need for merge") - -func (op *MergeOperator) iterateAndMerge() (newVal []byte, latest uint64, err error) { - txn := op.db.NewTransaction(false) - defer txn.Discard() - opt := DefaultIteratorOptions - opt.AllVersions = true - it := txn.NewKeyIterator(op.key, opt) - defer it.Close() - - var numVersions int - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - numVersions++ - if numVersions == 1 { - // This should be the newVal, considering this is the latest version. - newVal, err = item.ValueCopy(newVal) - if err != nil { - return nil, 0, err - } - latest = item.Version() - } else { - if err := item.Value(func(oldVal []byte) error { - // The merge should always be on the newVal considering it has the merge result of - // the latest version. The value read should be the oldVal. - newVal = op.f(oldVal, newVal) - return nil - }); err != nil { - return nil, 0, err - } - } - if item.DiscardEarlierVersions() { - break - } - } - if numVersions == 0 { - return nil, latest, ErrKeyNotFound - } else if numVersions == 1 { - return newVal, latest, errNoMerge - } - return newVal, latest, nil -} - -func (op *MergeOperator) compact() error { - op.Lock() - defer op.Unlock() - val, version, err := op.iterateAndMerge() - if err == ErrKeyNotFound || err == errNoMerge { - return nil - } else if err != nil { - return err - } - entries := []*Entry{ - { - Key: y.KeyWithTs(op.key, version), - Value: val, - meta: bitDiscardEarlierVersions, - }, - } - // Write value back to the DB. It is important that we do not set the bitMergeEntry bit - // here. When compaction happens, all the older merged entries will be removed. - return op.db.batchSetAsync(entries, func(err error) { - if err != nil { - op.db.opt.Errorf("failed to insert the result of merge compaction: %s", err) - } - }) -} - -func (op *MergeOperator) runCompactions(dur time.Duration) { - ticker := time.NewTicker(dur) - defer op.closer.Done() - var stop bool - for { - select { - case <-op.closer.HasBeenClosed(): - stop = true - case <-ticker.C: // wait for tick - } - if err := op.compact(); err != nil { - op.db.opt.Errorf("failure while running merge operation: %s", err) - } - if stop { - ticker.Stop() - break - } - } -} - -// Add records a value in Badger which will eventually be merged by a background -// routine into the values that were recorded by previous invocations to Add(). -func (op *MergeOperator) Add(val []byte) error { - return op.db.Update(func(txn *Txn) error { - return txn.SetEntry(NewEntry(op.key, val).withMergeBit()) - }) -} - -// Get returns the latest value for the merge operator, which is derived by -// applying the merge function to all the values added so far. -// -// If Add has not been called even once, Get will return ErrKeyNotFound. -func (op *MergeOperator) Get() ([]byte, error) { - op.RLock() - defer op.RUnlock() - var existing []byte - err := op.db.View(func(txn *Txn) (err error) { - existing, _, err = op.iterateAndMerge() - return err - }) - if err == errNoMerge { - return existing, nil - } - return existing, err -} - -// Stop waits for any pending merge to complete and then stops the background -// goroutine. -func (op *MergeOperator) Stop() { - op.closer.SignalAndWait() -} diff --git a/vendor/github.com/dgraph-io/badger/options.go b/vendor/github.com/dgraph-io/badger/options.go deleted file mode 100644 index f396c7ea..00000000 --- a/vendor/github.com/dgraph-io/badger/options.go +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "github.com/dgraph-io/badger/options" -) - -// Note: If you add a new option X make sure you also add a WithX method on Options. - -// Options are params for creating DB object. -// -// This package provides DefaultOptions which contains options that should -// work for most applications. Consider using that as a starting point before -// customizing it for your own needs. -// -// Each option X is documented on the WithX method. -type Options struct { - // Required options. - - Dir string - ValueDir string - - // Usually modified options. - - SyncWrites bool - TableLoadingMode options.FileLoadingMode - ValueLogLoadingMode options.FileLoadingMode - NumVersionsToKeep int - ReadOnly bool - Truncate bool - Logger Logger - EventLogging bool - - // Fine tuning options. - - MaxTableSize int64 - LevelSizeMultiplier int - MaxLevels int - ValueThreshold int - NumMemtables int - - NumLevelZeroTables int - NumLevelZeroTablesStall int - - LevelOneSize int64 - ValueLogFileSize int64 - ValueLogMaxEntries uint32 - - NumCompactors int - CompactL0OnClose bool - LogRotatesToFlush int32 - // When set, checksum will be validated for each entry read from the value log file. - VerifyValueChecksum bool - - // BypassLockGaurd will bypass the lock guard on badger. Bypassing lock - // guard can cause data corruption if multiple badger instances are using - // the same directory. Use this options with caution. - BypassLockGuard bool - - // Transaction start and commit timestamps are managed by end-user. - // This is only useful for databases built on top of Badger (like Dgraph). - // Not recommended for most users. - managedTxns bool - - // 4. Flags for testing purposes - // ------------------------------ - maxBatchCount int64 // max entries in batch - maxBatchSize int64 // max batch size in bytes - -} - -// DefaultOptions sets a list of recommended options for good performance. -// Feel free to modify these to suit your needs with the WithX methods. -func DefaultOptions(path string) Options { - return Options{ - Dir: path, - ValueDir: path, - LevelOneSize: 256 << 20, - LevelSizeMultiplier: 10, - TableLoadingMode: options.MemoryMap, - ValueLogLoadingMode: options.MemoryMap, - // table.MemoryMap to mmap() the tables. - // table.Nothing to not preload the tables. - MaxLevels: 7, - MaxTableSize: 64 << 20, - NumCompactors: 2, // Compactions can be expensive. Only run 2. - NumLevelZeroTables: 5, - NumLevelZeroTablesStall: 10, - NumMemtables: 5, - SyncWrites: true, - NumVersionsToKeep: 1, - CompactL0OnClose: true, - VerifyValueChecksum: false, - // Nothing to read/write value log using standard File I/O - // MemoryMap to mmap() the value log files - // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32. - // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems. - ValueLogFileSize: 1<<30 - 1, - - ValueLogMaxEntries: 1000000, - ValueThreshold: 32, - Truncate: false, - Logger: defaultLogger, - EventLogging: true, - LogRotatesToFlush: 2, - } -} - -// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold -// so values would be colocated with the LSM tree, with value log largely acting -// as a write-ahead log only. These options would reduce the disk usage of value -// log, and make Badger act more like a typical LSM tree. -func LSMOnlyOptions(path string) Options { - // Max value length which fits in uint16. - // Let's not set any other options, because they can cause issues with the - // size of key-value a user can pass to Badger. For e.g., if we set - // ValueLogFileSize to 64MB, a user can't pass a value more than that. - // Setting it to ValueLogMaxEntries to 1000, can generate too many files. - // These options are better configured on a usage basis, than broadly here. - // The ValueThreshold is the most important setting a user needs to do to - // achieve a heavier usage of LSM tree. - // NOTE: If a user does not want to set 64KB as the ValueThreshold because - // of performance reasons, 1KB would be a good option too, allowing - // values smaller than 1KB to be colocated with the keys in the LSM tree. - return DefaultOptions(path).WithValueThreshold(65500) -} - -// WithDir returns a new Options value with Dir set to the given value. -// -// Dir is the path of the directory where key data will be stored in. -// If it doesn't exist, Badger will try to create it for you. -// This is set automatically to be the path given to `DefaultOptions`. -func (opt Options) WithDir(val string) Options { - opt.Dir = val - return opt -} - -// WithValueDir returns a new Options value with ValueDir set to the given value. -// -// ValueDir is the path of the directory where value data will be stored in. -// If it doesn't exist, Badger will try to create it for you. -// This is set automatically to be the path given to `DefaultOptions`. -func (opt Options) WithValueDir(val string) Options { - opt.ValueDir = val - return opt -} - -// WithSyncWrites returns a new Options value with SyncWrites set to the given value. -// -// When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better -// performance, but may cause data loss in case of crash. -// -// The default value of SyncWrites is true. -func (opt Options) WithSyncWrites(val bool) Options { - opt.SyncWrites = val - return opt -} - -// WithTableLoadingMode returns a new Options value with TableLoadingMode set to the given value. -// -// TableLoadingMode indicates which file loading mode should be used for the LSM tree data files. -// -// The default value of TableLoadingMode is options.MemoryMap. -func (opt Options) WithTableLoadingMode(val options.FileLoadingMode) Options { - opt.TableLoadingMode = val - return opt -} - -// WithValueLogLoadingMode returns a new Options value with ValueLogLoadingMode set to the given -// value. -// -// ValueLogLoadingMode indicates which file loading mode should be used for the value log data -// files. -// -// The default value of ValueLogLoadingMode is options.MemoryMap. -func (opt Options) WithValueLogLoadingMode(val options.FileLoadingMode) Options { - opt.ValueLogLoadingMode = val - return opt -} - -// WithNumVersionsToKeep returns a new Options value with NumVersionsToKeep set to the given value. -// -// NumVersionsToKeep sets how many versions to keep per key at most. -// -// The default value of NumVersionsToKeep is 1. -func (opt Options) WithNumVersionsToKeep(val int) Options { - opt.NumVersionsToKeep = val - return opt -} - -// WithReadOnly returns a new Options value with ReadOnly set to the given value. -// -// When ReadOnly is true the DB will be opened on read-only mode. -// Multiple processes can open the same Badger DB. -// Note: if the DB being opened had crashed before and has vlog data to be replayed, -// ReadOnly will cause Open to fail with an appropriate message. -// -// The default value of ReadOnly is false. -func (opt Options) WithReadOnly(val bool) Options { - opt.ReadOnly = val - return opt -} - -// WithTruncate returns a new Options value with Truncate set to the given value. -// -// Truncate indicates whether value log files should be truncated to delete corrupt data, if any. -// This option is ignored when ReadOnly is true. -// -// The default value of Truncate is false. -func (opt Options) WithTruncate(val bool) Options { - opt.Truncate = val - return opt -} - -// WithLogger returns a new Options value with Logger set to the given value. -// -// Logger provides a way to configure what logger each value of badger.DB uses. -// -// The default value of Logger writes to stderr using the log package from the Go standard library. -func (opt Options) WithLogger(val Logger) Options { - opt.Logger = val - return opt -} - -// WithEventLogging returns a new Options value with EventLogging set to the given value. -// -// EventLogging provides a way to enable or disable trace.EventLog logging. -// -// The default value of EventLogging is true. -func (opt Options) WithEventLogging(enabled bool) Options { - opt.EventLogging = enabled - return opt -} - -// WithMaxTableSize returns a new Options value with MaxTableSize set to the given value. -// -// MaxTableSize sets the maximum size in bytes for each LSM table or file. -// -// The default value of MaxTableSize is 64MB. -func (opt Options) WithMaxTableSize(val int64) Options { - opt.MaxTableSize = val - return opt -} - -// WithLevelSizeMultiplier returns a new Options value with LevelSizeMultiplier set to the given -// value. -// -// LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM. -// Once a level grows to be larger than this ratio allowed, the compaction process will be -// triggered. -// -// The default value of LevelSizeMultiplier is 10. -func (opt Options) WithLevelSizeMultiplier(val int) Options { - opt.LevelSizeMultiplier = val - return opt -} - -// WithMaxLevels returns a new Options value with MaxLevels set to the given value. -// -// Maximum number of levels of compaction allowed in the LSM. -// -// The default value of MaxLevels is 7. -func (opt Options) WithMaxLevels(val int) Options { - opt.MaxLevels = val - return opt -} - -// WithValueThreshold returns a new Options value with ValueThreshold set to the given value. -// -// ValueThreshold sets the threshold used to decide whether a value is stored directly in the LSM -// tree or separatedly in the log value files. -// -// The default value of ValueThreshold is 32, but LSMOnlyOptions sets it to 65500. -func (opt Options) WithValueThreshold(val int) Options { - opt.ValueThreshold = val - return opt -} - -// WithNumMemtables returns a new Options value with NumMemtables set to the given value. -// -// NumMemtables sets the maximum number of tables to keep in memory before stalling. -// -// The default value of NumMemtables is 5. -func (opt Options) WithNumMemtables(val int) Options { - opt.NumMemtables = val - return opt -} - -// WithNumLevelZeroTables returns a new Options value with NumLevelZeroTables set to the given -// value. -// -// NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts. -// -// The default value of NumLevelZeroTables is 5. -func (opt Options) WithNumLevelZeroTables(val int) Options { - opt.NumLevelZeroTables = val - return opt -} - -// WithNumLevelZeroTablesStall returns a new Options value with NumLevelZeroTablesStall set to the -// given value. -// -// NumLevelZeroTablesStall sets the number of Level 0 tables that once reached causes the DB to -// stall until compaction succeeds. -// -// The default value of NumLevelZeroTablesStall is 10. -func (opt Options) WithNumLevelZeroTablesStall(val int) Options { - opt.NumLevelZeroTablesStall = val - return opt -} - -// WithLevelOneSize returns a new Options value with LevelOneSize set to the given value. -// -// LevelOneSize sets the maximum total size for Level 1. -// -// The default value of LevelOneSize is 20MB. -func (opt Options) WithLevelOneSize(val int64) Options { - opt.LevelOneSize = val - return opt -} - -// WithValueLogFileSize returns a new Options value with ValueLogFileSize set to the given value. -// -// ValueLogFileSize sets the maximum size of a single value log file. -// -// The default value of ValueLogFileSize is 1GB. -func (opt Options) WithValueLogFileSize(val int64) Options { - opt.ValueLogFileSize = val - return opt -} - -// WithValueLogMaxEntries returns a new Options value with ValueLogMaxEntries set to the given -// value. -// -// ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately. -// A actual size limit of a value log file is the minimum of ValueLogFileSize and -// ValueLogMaxEntries. -// -// The default value of ValueLogMaxEntries is one million (1000000). -func (opt Options) WithValueLogMaxEntries(val uint32) Options { - opt.ValueLogMaxEntries = val - return opt -} - -// WithNumCompactors returns a new Options value with NumCompactors set to the given value. -// -// NumCompactors sets the number of compaction workers to run concurrently. -// Setting this to zero stops compactions, which could eventually cause writes to block forever. -// -// The default value of NumCompactors is 2. -func (opt Options) WithNumCompactors(val int) Options { - opt.NumCompactors = val - return opt -} - -// WithCompactL0OnClose returns a new Options value with CompactL0OnClose set to the given value. -// -// CompactL0OnClose determines whether Level 0 should be compacted before closing the DB. -// This ensures that both reads and writes are efficient when the DB is opened later. -// -// The default value of CompactL0OnClose is true. -func (opt Options) WithCompactL0OnClose(val bool) Options { - opt.CompactL0OnClose = val - return opt -} - -// WithLogRotatesToFlush returns a new Options value with LogRotatesToFlush set to the given value. -// -// LogRotatesToFlush sets the number of value log file rotates after which the Memtables are -// flushed to disk. This is useful in write loads with fewer keys and larger values. This work load -// would fill up the value logs quickly, while not filling up the Memtables. Thus, on a crash -// and restart, the value log head could cause the replay of a good number of value log files -// which can slow things on start. -// -// The default value of LogRotatesToFlush is 2. -func (opt Options) WithLogRotatesToFlush(val int32) Options { - opt.LogRotatesToFlush = val - return opt -} - -// WithVerifyValueChecksum returns a new Options value with VerifyValueChecksum set to -// the given value. -// -// When VerifyValueChecksum is set to true, checksum will be verified for every entry read -// from the value log. If the value is stored in SST (value size less than value threshold) then the -// checksum validation will not be done. -// -// The default value of VerifyValueChecksum is False. -func (opt Options) WithVerifyValueChecksum(val bool) Options { - opt.VerifyValueChecksum = val - return opt -} - -// WithBypassLockGuard returns a new Options value with BypassLockGuard -// set to the given value. -// -// When BypassLockGuard option is set, badger will not acquire a lock on the -// directory. This could lead to data corruption if multiple badger instances -// write to the same data directory. Use this option with caution. -// -// The default value of BypassLockGuard is false. -func (opt Options) WithBypassLockGuard(b bool) Options { - opt.BypassLockGuard = b - return opt -} diff --git a/vendor/github.com/dgraph-io/badger/options/options.go b/vendor/github.com/dgraph-io/badger/options/options.go deleted file mode 100644 index 06c8b1b7..00000000 --- a/vendor/github.com/dgraph-io/badger/options/options.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package options - -// FileLoadingMode specifies how data in LSM table files and value log files should -// be loaded. -type FileLoadingMode int - -const ( - // FileIO indicates that files must be loaded using standard I/O - FileIO FileLoadingMode = iota - // LoadToRAM indicates that file must be loaded into RAM - LoadToRAM - // MemoryMap indicates that that the file must be memory-mapped - MemoryMap -) diff --git a/vendor/github.com/dgraph-io/badger/pb/gen.sh b/vendor/github.com/dgraph-io/badger/pb/gen.sh deleted file mode 100644 index 49b44ff4..00000000 --- a/vendor/github.com/dgraph-io/badger/pb/gen.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# You might need to go get -v github.com/gogo/protobuf/... - -protos=${GOPATH-$HOME/go}/src/github.com/dgraph-io/badger/pb -pushd $protos > /dev/null -protoc --gofast_out=plugins=grpc:. -I=. pb.proto diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go b/vendor/github.com/dgraph-io/badger/pb/pb.pb.go deleted file mode 100644 index 6fd3d07c..00000000 --- a/vendor/github.com/dgraph-io/badger/pb/pb.pb.go +++ /dev/null @@ -1,1359 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: pb.proto - -package pb - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type ManifestChange_Operation int32 - -const ( - ManifestChange_CREATE ManifestChange_Operation = 0 - ManifestChange_DELETE ManifestChange_Operation = 1 -) - -var ManifestChange_Operation_name = map[int32]string{ - 0: "CREATE", - 1: "DELETE", -} - -var ManifestChange_Operation_value = map[string]int32{ - "CREATE": 0, - "DELETE": 1, -} - -func (x ManifestChange_Operation) String() string { - return proto.EnumName(ManifestChange_Operation_name, int32(x)) -} - -func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f80abaa17e25ccc8, []int{3, 0} -} - -type KV struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"` - Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` - Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"` - // Stream id is used to identify which stream the KV came from. - StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` - // Stream done is used to indicate end of stream. - StreamDone bool `protobuf:"varint,11,opt,name=stream_done,json=streamDone,proto3" json:"stream_done,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KV) Reset() { *m = KV{} } -func (m *KV) String() string { return proto.CompactTextString(m) } -func (*KV) ProtoMessage() {} -func (*KV) Descriptor() ([]byte, []int) { - return fileDescriptor_f80abaa17e25ccc8, []int{0} -} -func (m *KV) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KV.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KV) XXX_Merge(src proto.Message) { - xxx_messageInfo_KV.Merge(m, src) -} -func (m *KV) XXX_Size() int { - return m.Size() -} -func (m *KV) XXX_DiscardUnknown() { - xxx_messageInfo_KV.DiscardUnknown(m) -} - -var xxx_messageInfo_KV proto.InternalMessageInfo - -func (m *KV) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *KV) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *KV) GetUserMeta() []byte { - if m != nil { - return m.UserMeta - } - return nil -} - -func (m *KV) GetVersion() uint64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *KV) GetExpiresAt() uint64 { - if m != nil { - return m.ExpiresAt - } - return 0 -} - -func (m *KV) GetMeta() []byte { - if m != nil { - return m.Meta - } - return nil -} - -func (m *KV) GetStreamId() uint32 { - if m != nil { - return m.StreamId - } - return 0 -} - -func (m *KV) GetStreamDone() bool { - if m != nil { - return m.StreamDone - } - return false -} - -type KVList struct { - Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KVList) Reset() { *m = KVList{} } -func (m *KVList) String() string { return proto.CompactTextString(m) } -func (*KVList) ProtoMessage() {} -func (*KVList) Descriptor() ([]byte, []int) { - return fileDescriptor_f80abaa17e25ccc8, []int{1} -} -func (m *KVList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KVList.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KVList) XXX_Merge(src proto.Message) { - xxx_messageInfo_KVList.Merge(m, src) -} -func (m *KVList) XXX_Size() int { - return m.Size() -} -func (m *KVList) XXX_DiscardUnknown() { - xxx_messageInfo_KVList.DiscardUnknown(m) -} - -var xxx_messageInfo_KVList proto.InternalMessageInfo - -func (m *KVList) GetKv() []*KV { - if m != nil { - return m.Kv - } - return nil -} - -type ManifestChangeSet struct { - // A set of changes that are applied atomically. - Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} } -func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) } -func (*ManifestChangeSet) ProtoMessage() {} -func (*ManifestChangeSet) Descriptor() ([]byte, []int) { - return fileDescriptor_f80abaa17e25ccc8, []int{2} -} -func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ManifestChangeSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManifestChangeSet.Merge(m, src) -} -func (m *ManifestChangeSet) XXX_Size() int { - return m.Size() -} -func (m *ManifestChangeSet) XXX_DiscardUnknown() { - xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m) -} - -var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo - -func (m *ManifestChangeSet) GetChanges() []*ManifestChange { - if m != nil { - return m.Changes - } - return nil -} - -type ManifestChange struct { - Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"` - Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=pb.ManifestChange_Operation" json:"Op,omitempty"` - Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"` - Checksum []byte `protobuf:"bytes,4,opt,name=Checksum,proto3" json:"Checksum,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ManifestChange) Reset() { *m = ManifestChange{} } -func (m *ManifestChange) String() string { return proto.CompactTextString(m) } -func (*ManifestChange) ProtoMessage() {} -func (*ManifestChange) Descriptor() ([]byte, []int) { - return fileDescriptor_f80abaa17e25ccc8, []int{3} -} -func (m *ManifestChange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ManifestChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManifestChange.Merge(m, src) -} -func (m *ManifestChange) XXX_Size() int { - return m.Size() -} -func (m *ManifestChange) XXX_DiscardUnknown() { - xxx_messageInfo_ManifestChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ManifestChange proto.InternalMessageInfo - -func (m *ManifestChange) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *ManifestChange) GetOp() ManifestChange_Operation { - if m != nil { - return m.Op - } - return ManifestChange_CREATE -} - -func (m *ManifestChange) GetLevel() uint32 { - if m != nil { - return m.Level - } - return 0 -} - -func (m *ManifestChange) GetChecksum() []byte { - if m != nil { - return m.Checksum - } - return nil -} - -func init() { - proto.RegisterEnum("pb.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value) - proto.RegisterType((*KV)(nil), "pb.KV") - proto.RegisterType((*KVList)(nil), "pb.KVList") - proto.RegisterType((*ManifestChangeSet)(nil), "pb.ManifestChangeSet") - proto.RegisterType((*ManifestChange)(nil), "pb.ManifestChange") -} - -func init() { proto.RegisterFile("pb.proto", fileDescriptor_f80abaa17e25ccc8) } - -var fileDescriptor_f80abaa17e25ccc8 = []byte{ - // 384 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0x4f, 0x6a, 0xdb, 0x40, - 0x14, 0xc6, 0x33, 0x63, 0x47, 0x91, 0x9f, 0x13, 0xe3, 0x3e, 0x4a, 0x19, 0xfa, 0xc7, 0x15, 0xea, - 0x46, 0x8b, 0xa0, 0x45, 0x7a, 0x02, 0xd7, 0xd1, 0xc2, 0xd8, 0xc1, 0x30, 0x0d, 0xde, 0x9a, 0x51, - 0xf4, 0xda, 0x08, 0xc5, 0xd2, 0xa0, 0x19, 0x8b, 0xf6, 0x26, 0xbd, 0x40, 0xef, 0xd2, 0x65, 0x17, - 0x3d, 0x40, 0x71, 0x2f, 0x52, 0x34, 0x72, 0x02, 0x26, 0xbb, 0xf7, 0x7d, 0xdf, 0x7b, 0xdf, 0xc0, - 0x8f, 0x01, 0x5f, 0xa7, 0xb1, 0xae, 0x2b, 0x5b, 0x21, 0xd7, 0x69, 0xf8, 0x87, 0x01, 0x5f, 0xac, - 0x71, 0x0c, 0xbd, 0x82, 0xbe, 0x0b, 0x16, 0xb0, 0xe8, 0x5c, 0xb6, 0x23, 0xbe, 0x84, 0xd3, 0x46, - 0x3d, 0xec, 0x48, 0x70, 0xe7, 0x75, 0x02, 0xdf, 0xc0, 0x60, 0x67, 0xa8, 0xde, 0x6c, 0xc9, 0x2a, - 0xd1, 0x73, 0x89, 0xdf, 0x1a, 0x37, 0x64, 0x15, 0x0a, 0x38, 0x6b, 0xa8, 0x36, 0x79, 0x55, 0x8a, - 0x7e, 0xc0, 0xa2, 0xbe, 0x7c, 0x94, 0xf8, 0x0e, 0x80, 0xbe, 0xe9, 0xbc, 0x26, 0xb3, 0x51, 0x56, - 0x9c, 0xba, 0x70, 0x70, 0x70, 0xa6, 0x16, 0x11, 0xfa, 0xae, 0xd0, 0x73, 0x85, 0x6e, 0x6e, 0x5f, - 0x32, 0xb6, 0x26, 0xb5, 0xdd, 0xe4, 0x99, 0x80, 0x80, 0x45, 0x17, 0xd2, 0xef, 0x8c, 0x79, 0x86, - 0xef, 0x61, 0x78, 0x08, 0xb3, 0xaa, 0x24, 0x31, 0x0c, 0x58, 0xe4, 0x4b, 0xe8, 0xac, 0xeb, 0xaa, - 0xa4, 0x30, 0x00, 0x6f, 0xb1, 0x5e, 0xe6, 0xc6, 0xe2, 0x2b, 0xe0, 0x45, 0x23, 0x58, 0xd0, 0x8b, - 0x86, 0x57, 0x5e, 0xac, 0xd3, 0x78, 0xb1, 0x96, 0xbc, 0x68, 0xc2, 0x29, 0xbc, 0xb8, 0x51, 0x65, - 0xfe, 0x85, 0x8c, 0x9d, 0xdd, 0xab, 0xf2, 0x2b, 0x7d, 0x26, 0x8b, 0x97, 0x70, 0x76, 0xe7, 0x84, - 0x39, 0x5c, 0x60, 0x7b, 0x71, 0xbc, 0x27, 0x1f, 0x57, 0xc2, 0x9f, 0x0c, 0x46, 0xc7, 0x19, 0x8e, - 0x80, 0xcf, 0x33, 0x87, 0xb1, 0x2f, 0xf9, 0x3c, 0xc3, 0x4b, 0xe0, 0x2b, 0xed, 0x10, 0x8e, 0xae, - 0xde, 0x3e, 0xef, 0x8a, 0x57, 0x9a, 0x6a, 0x65, 0xf3, 0xaa, 0x94, 0x7c, 0xa5, 0x5b, 0xe6, 0x4b, - 0x6a, 0xe8, 0xc1, 0x91, 0xbd, 0x90, 0x9d, 0xc0, 0xd7, 0xe0, 0xcf, 0xee, 0xe9, 0xae, 0x30, 0xbb, - 0xad, 0xe3, 0x7a, 0x2e, 0x9f, 0x74, 0xf8, 0x01, 0x06, 0x4f, 0x15, 0x08, 0xe0, 0xcd, 0x64, 0x32, - 0xbd, 0x4d, 0xc6, 0x27, 0xed, 0x7c, 0x9d, 0x2c, 0x93, 0xdb, 0x64, 0xcc, 0x3e, 0x8d, 0x7f, 0xed, - 0x27, 0xec, 0xf7, 0x7e, 0xc2, 0xfe, 0xee, 0x27, 0xec, 0xc7, 0xbf, 0xc9, 0x49, 0xea, 0xb9, 0x0f, - 0xf0, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x50, 0x3d, 0x49, 0xb9, 0x0c, 0x02, 0x00, 0x00, -} - -func (m *KV) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KV) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KV) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.StreamDone { - i-- - if m.StreamDone { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - if m.StreamId != 0 { - i = encodeVarintPb(dAtA, i, uint64(m.StreamId)) - i-- - dAtA[i] = 0x50 - } - if len(m.Meta) > 0 { - i -= len(m.Meta) - copy(dAtA[i:], m.Meta) - i = encodeVarintPb(dAtA, i, uint64(len(m.Meta))) - i-- - dAtA[i] = 0x32 - } - if m.ExpiresAt != 0 { - i = encodeVarintPb(dAtA, i, uint64(m.ExpiresAt)) - i-- - dAtA[i] = 0x28 - } - if m.Version != 0 { - i = encodeVarintPb(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x20 - } - if len(m.UserMeta) > 0 { - i -= len(m.UserMeta) - copy(dAtA[i:], m.UserMeta) - i = encodeVarintPb(dAtA, i, uint64(len(m.UserMeta))) - i-- - dAtA[i] = 0x1a - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintPb(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintPb(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *KVList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KVList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KVList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Kv) > 0 { - for iNdEx := len(m.Kv) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Kv[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPb(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManifestChangeSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Changes) > 0 { - for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPb(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ManifestChange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManifestChange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Checksum) > 0 { - i -= len(m.Checksum) - copy(dAtA[i:], m.Checksum) - i = encodeVarintPb(dAtA, i, uint64(len(m.Checksum))) - i-- - dAtA[i] = 0x22 - } - if m.Level != 0 { - i = encodeVarintPb(dAtA, i, uint64(m.Level)) - i-- - dAtA[i] = 0x18 - } - if m.Op != 0 { - i = encodeVarintPb(dAtA, i, uint64(m.Op)) - i-- - dAtA[i] = 0x10 - } - if m.Id != 0 { - i = encodeVarintPb(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintPb(dAtA []byte, offset int, v uint64) int { - offset -= sovPb(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *KV) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovPb(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovPb(uint64(l)) - } - l = len(m.UserMeta) - if l > 0 { - n += 1 + l + sovPb(uint64(l)) - } - if m.Version != 0 { - n += 1 + sovPb(uint64(m.Version)) - } - if m.ExpiresAt != 0 { - n += 1 + sovPb(uint64(m.ExpiresAt)) - } - l = len(m.Meta) - if l > 0 { - n += 1 + l + sovPb(uint64(l)) - } - if m.StreamId != 0 { - n += 1 + sovPb(uint64(m.StreamId)) - } - if m.StreamDone { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *KVList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Kv) > 0 { - for _, e := range m.Kv { - l = e.Size() - n += 1 + l + sovPb(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ManifestChangeSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Changes) > 0 { - for _, e := range m.Changes { - l = e.Size() - n += 1 + l + sovPb(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ManifestChange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sovPb(uint64(m.Id)) - } - if m.Op != 0 { - n += 1 + sovPb(uint64(m.Op)) - } - if m.Level != 0 { - n += 1 + sovPb(uint64(m.Level)) - } - l = len(m.Checksum) - if l > 0 { - n += 1 + l + sovPb(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovPb(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPb(x uint64) (n int) { - return sovPb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *KV) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KV: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...) - if m.UserMeta == nil { - m.UserMeta = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) - } - m.ExpiresAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpiresAt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...) - if m.Meta == nil { - m.Meta = []byte{} - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) - } - m.StreamId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StreamId |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamDone", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StreamDone = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipPb(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KVList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KVList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPb - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kv = append(m.Kv, &KV{}) - if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPb(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPb - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Changes = append(m.Changes, &ManifestChange{}) - if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPb(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManifestChange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - m.Op = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Op |= ManifestChange_Operation(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) - } - m.Level = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Level |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPb - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPb - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPb - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checksum = append(m.Checksum[:0], dAtA[iNdEx:postIndex]...) - if m.Checksum == nil { - m.Checksum = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPb(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPb - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPb(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPb - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPb - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPb - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPb - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPb - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPb - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPb = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPb = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPb = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/dgraph-io/badger/pb/pb.proto b/vendor/github.com/dgraph-io/badger/pb/pb.proto deleted file mode 100644 index faf0b65c..00000000 --- a/vendor/github.com/dgraph-io/badger/pb/pb.proto +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Use protos/gen.sh to generate .pb.go files. -syntax = "proto3"; - -package pb; - -message KV { - bytes key = 1; - bytes value = 2; - bytes user_meta = 3; - uint64 version = 4; - uint64 expires_at = 5; - bytes meta = 6; - - // Stream id is used to identify which stream the KV came from. - uint32 stream_id = 10; - // Stream done is used to indicate end of stream. - bool stream_done = 11; -} - -message KVList { - repeated KV kv = 1; -} - -message ManifestChangeSet { - // A set of changes that are applied atomically. - repeated ManifestChange changes = 1; -} - -message ManifestChange { - uint64 Id = 1; - enum Operation { - CREATE = 0; - DELETE = 1; - } - Operation Op = 2; - uint32 Level = 3; // Only used for CREATE - bytes Checksum = 4; // Only used for CREATE -} diff --git a/vendor/github.com/dgraph-io/badger/publisher.go b/vendor/github.com/dgraph-io/badger/publisher.go deleted file mode 100644 index 7458b0d9..00000000 --- a/vendor/github.com/dgraph-io/badger/publisher.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "sync" - - "github.com/dgraph-io/badger/pb" - "github.com/dgraph-io/badger/trie" - "github.com/dgraph-io/badger/y" -) - -type subscriber struct { - prefixes [][]byte - sendCh chan<- *pb.KVList - subCloser *y.Closer -} - -type publisher struct { - sync.Mutex - pubCh chan requests - subscribers map[uint64]subscriber - nextID uint64 - indexer *trie.Trie -} - -func newPublisher() *publisher { - return &publisher{ - pubCh: make(chan requests, 1000), - subscribers: make(map[uint64]subscriber), - nextID: 0, - indexer: trie.NewTrie(), - } -} - -func (p *publisher) listenForUpdates(c *y.Closer) { - defer func() { - p.cleanSubscribers() - c.Done() - }() - slurp := func(batch requests) { - for { - select { - case reqs := <-p.pubCh: - batch = append(batch, reqs...) - default: - p.publishUpdates(batch) - return - } - } - } - for { - select { - case <-c.HasBeenClosed(): - return - case reqs := <-p.pubCh: - slurp(reqs) - } - } -} - -func (p *publisher) publishUpdates(reqs requests) { - p.Lock() - defer func() { - p.Unlock() - // Release all the request. - reqs.DecrRef() - }() - batchedUpdates := make(map[uint64]*pb.KVList) - for _, req := range reqs { - for _, e := range req.Entries { - ids := p.indexer.Get(e.Key) - if len(ids) > 0 { - k := y.SafeCopy(nil, e.Key) - kv := &pb.KV{ - Key: y.ParseKey(k), - Value: y.SafeCopy(nil, e.Value), - Meta: []byte{e.UserMeta}, - ExpiresAt: e.ExpiresAt, - Version: y.ParseTs(k), - } - for id := range ids { - if _, ok := batchedUpdates[id]; !ok { - batchedUpdates[id] = &pb.KVList{} - } - batchedUpdates[id].Kv = append(batchedUpdates[id].Kv, kv) - } - } - } - } - - for id, kvs := range batchedUpdates { - p.subscribers[id].sendCh <- kvs - } -} - -func (p *publisher) newSubscriber(c *y.Closer, prefixes ...[]byte) (<-chan *pb.KVList, uint64) { - p.Lock() - defer p.Unlock() - ch := make(chan *pb.KVList, 1000) - id := p.nextID - // Increment next ID. - p.nextID++ - p.subscribers[id] = subscriber{ - prefixes: prefixes, - sendCh: ch, - subCloser: c, - } - for _, prefix := range prefixes { - p.indexer.Add(prefix, id) - } - return ch, id -} - -// cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB. -func (p *publisher) cleanSubscribers() { - p.Lock() - defer p.Unlock() - for id, s := range p.subscribers { - for _, prefix := range s.prefixes { - p.indexer.Delete(prefix, id) - } - delete(p.subscribers, id) - s.subCloser.SignalAndWait() - } -} - -func (p *publisher) deleteSubscriber(id uint64) { - p.Lock() - defer p.Unlock() - if s, ok := p.subscribers[id]; ok { - for _, prefix := range s.prefixes { - p.indexer.Delete(prefix, id) - } - } - delete(p.subscribers, id) -} - -func (p *publisher) sendUpdates(reqs requests) { - if p.noOfSubscribers() != 0 { - reqs.IncrRef() - p.pubCh <- reqs - } -} - -func (p *publisher) noOfSubscribers() int { - p.Lock() - defer p.Unlock() - return len(p.subscribers) -} diff --git a/vendor/github.com/dgraph-io/badger/skl/README.md b/vendor/github.com/dgraph-io/badger/skl/README.md deleted file mode 100644 index e22e4590..00000000 --- a/vendor/github.com/dgraph-io/badger/skl/README.md +++ /dev/null @@ -1,113 +0,0 @@ -This is much better than `skiplist` and `slist`. - -``` -BenchmarkReadWrite/frac_0-8 3000000 537 ns/op -BenchmarkReadWrite/frac_1-8 3000000 503 ns/op -BenchmarkReadWrite/frac_2-8 3000000 492 ns/op -BenchmarkReadWrite/frac_3-8 3000000 475 ns/op -BenchmarkReadWrite/frac_4-8 3000000 440 ns/op -BenchmarkReadWrite/frac_5-8 5000000 442 ns/op -BenchmarkReadWrite/frac_6-8 5000000 380 ns/op -BenchmarkReadWrite/frac_7-8 5000000 338 ns/op -BenchmarkReadWrite/frac_8-8 5000000 294 ns/op -BenchmarkReadWrite/frac_9-8 10000000 268 ns/op -BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op -``` - -And even better than a simple map with read-write lock: - -``` -BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op -BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op -BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op -BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op -BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op -BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op -BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op -BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op -BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op -BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op -BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op -``` - -# Node Pooling - -Command used - -``` -rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10 -``` - -For pprof results, we run without using /usr/bin/time. There are four runs below. - -Results seem to vary quite a bit between runs. - -## Before node pooling - -``` -1311.53MB of 1338.69MB total (97.97%) -Dropped 30 nodes (cum <= 6.69MB) -Showing top 10 nodes out of 37 (cum >= 12.50MB) - flat flat% sum% cum cum% - 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put - 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte - 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put - 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E - 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice - 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue - 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV - 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next - 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read - 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode - - 128.31 real 329.37 user 17.11 sys -3355660288 maximum resident set size - 0 average shared memory size - 0 average unshared data size - 0 average unshared stack size - 2203080 page reclaims - 764 page faults - 0 swaps - 275 block input operations - 76 block output operations - 0 messages sent - 0 messages received - 0 signals received - 49173 voluntary context switches - 599922 involuntary context switches -``` - -## After node pooling - -``` -1963.13MB of 2026.09MB total (96.89%) -Dropped 29 nodes (cum <= 10.13MB) -Showing top 10 nodes out of 41 (cum >= 185.62MB) - flat flat% sum% cum cum% - 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1 - 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E - 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte - 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put - 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice - 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode - 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue - 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV - 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read - 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next - - 135.58 real 374.29 user 17.65 sys -3740614656 maximum resident set size - 0 average shared memory size - 0 average unshared data size - 0 average unshared stack size - 2276566 page reclaims - 770 page faults - 0 swaps - 128 block input operations - 90 block output operations - 0 messages sent - 0 messages received - 0 signals received - 46434 voluntary context switches - 597049 involuntary context switches -``` diff --git a/vendor/github.com/dgraph-io/badger/skl/arena.go b/vendor/github.com/dgraph-io/badger/skl/arena.go deleted file mode 100644 index def55071..00000000 --- a/vendor/github.com/dgraph-io/badger/skl/arena.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package skl - -import ( - "sync/atomic" - "unsafe" - - "github.com/dgraph-io/badger/y" -) - -const ( - offsetSize = int(unsafe.Sizeof(uint32(0))) - - // Always align nodes on 64-bit boundaries, even on 32-bit architectures, - // so that the node.value field is 64-bit aligned. This is necessary because - // node.getValueOffset uses atomic.LoadUint64, which expects its input - // pointer to be 64-bit aligned. - nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1 -) - -// Arena should be lock-free. -type Arena struct { - n uint32 - buf []byte -} - -// newArena returns a new arena. -func newArena(n int64) *Arena { - // Don't store data at position 0 in order to reserve offset=0 as a kind - // of nil pointer. - out := &Arena{ - n: 1, - buf: make([]byte, n), - } - return out -} - -func (s *Arena) size() int64 { - return int64(atomic.LoadUint32(&s.n)) -} - -func (s *Arena) reset() { - atomic.StoreUint32(&s.n, 0) -} - -// putNode allocates a node in the arena. The node is aligned on a pointer-sized -// boundary. The arena offset of the node is returned. -func (s *Arena) putNode(height int) uint32 { - // Compute the amount of the tower that will never be used, since the height - // is less than maxHeight. - unusedSize := (maxHeight - height) * offsetSize - - // Pad the allocation with enough bytes to ensure pointer alignment. - l := uint32(MaxNodeSize - unusedSize + nodeAlign) - n := atomic.AddUint32(&s.n, l) - y.AssertTruef(int(n) <= len(s.buf), - "Arena too small, toWrite:%d newTotal:%d limit:%d", - l, n, len(s.buf)) - - // Return the aligned offset. - m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign) - return m -} - -// Put will *copy* val into arena. To make better use of this, reuse your input -// val buffer. Returns an offset into buf. User is responsible for remembering -// size of val. We could also store this size inside arena but the encoding and -// decoding will incur some overhead. -func (s *Arena) putVal(v y.ValueStruct) uint32 { - l := uint32(v.EncodedSize()) - n := atomic.AddUint32(&s.n, l) - y.AssertTruef(int(n) <= len(s.buf), - "Arena too small, toWrite:%d newTotal:%d limit:%d", - l, n, len(s.buf)) - m := n - l - v.Encode(s.buf[m:]) - return m -} - -func (s *Arena) putKey(key []byte) uint32 { - l := uint32(len(key)) - n := atomic.AddUint32(&s.n, l) - y.AssertTruef(int(n) <= len(s.buf), - "Arena too small, toWrite:%d newTotal:%d limit:%d", - l, n, len(s.buf)) - m := n - l - y.AssertTrue(len(key) == copy(s.buf[m:n], key)) - return m -} - -// getNode returns a pointer to the node located at offset. If the offset is -// zero, then the nil node pointer is returned. -func (s *Arena) getNode(offset uint32) *node { - if offset == 0 { - return nil - } - - return (*node)(unsafe.Pointer(&s.buf[offset])) -} - -// getKey returns byte slice at offset. -func (s *Arena) getKey(offset uint32, size uint16) []byte { - return s.buf[offset : offset+uint32(size)] -} - -// getVal returns byte slice at offset. The given size should be just the value -// size and should NOT include the meta bytes. -func (s *Arena) getVal(offset uint32, size uint16) (ret y.ValueStruct) { - ret.Decode(s.buf[offset : offset+uint32(size)]) - return -} - -// getNodeOffset returns the offset of node in the arena. If the node pointer is -// nil, then the zero offset is returned. -func (s *Arena) getNodeOffset(nd *node) uint32 { - if nd == nil { - return 0 - } - - return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0]))) -} diff --git a/vendor/github.com/dgraph-io/badger/skl/skl.go b/vendor/github.com/dgraph-io/badger/skl/skl.go deleted file mode 100644 index 65647ff5..00000000 --- a/vendor/github.com/dgraph-io/badger/skl/skl.go +++ /dev/null @@ -1,517 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* -Adapted from RocksDB inline skiplist. - -Key differences: -- No optimization for sequential inserts (no "prev"). -- No custom comparator. -- Support overwrites. This requires care when we see the same key when inserting. - For RocksDB or LevelDB, overwrites are implemented as a newer sequence number in the key, so - there is no need for values. We don't intend to support versioning. In-place updates of values - would be more efficient. -- We discard all non-concurrent code. -- We do not support Splices. This simplifies the code a lot. -- No AllocateNode or other pointer arithmetic. -- We combine the findLessThan, findGreaterOrEqual, etc into one function. -*/ - -package skl - -import ( - "math" - "sync/atomic" - "unsafe" - - "github.com/dgraph-io/badger/y" - "github.com/dgraph-io/ristretto/z" -) - -const ( - maxHeight = 20 - heightIncrease = math.MaxUint32 / 3 -) - -// MaxNodeSize is the memory footprint of a node of maximum height. -const MaxNodeSize = int(unsafe.Sizeof(node{})) - -type node struct { - // Multiple parts of the value are encoded as a single uint64 so that it - // can be atomically loaded and stored: - // value offset: uint32 (bits 0-31) - // value size : uint16 (bits 32-47) - value uint64 - - // A byte slice is 24 bytes. We are trying to save space here. - keyOffset uint32 // Immutable. No need to lock to access key. - keySize uint16 // Immutable. No need to lock to access key. - - // Height of the tower. - height uint16 - - // Most nodes do not need to use the full height of the tower, since the - // probability of each successive level decreases exponentially. Because - // these elements are never accessed, they do not need to be allocated. - // Therefore, when a node is allocated in the arena, its memory footprint - // is deliberately truncated to not include unneeded tower elements. - // - // All accesses to elements should use CAS operations, with no need to lock. - tower [maxHeight]uint32 -} - -// Skiplist maps keys to values (in memory) -type Skiplist struct { - height int32 // Current height. 1 <= height <= kMaxHeight. CAS. - head *node - ref int32 - arena *Arena -} - -// IncrRef increases the refcount -func (s *Skiplist) IncrRef() { - atomic.AddInt32(&s.ref, 1) -} - -// DecrRef decrements the refcount, deallocating the Skiplist when done using it -func (s *Skiplist) DecrRef() { - newRef := atomic.AddInt32(&s.ref, -1) - if newRef > 0 { - return - } - - s.arena.reset() - // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition - // here would suggest we are accessing skiplist when we are supposed to have no reference! - s.arena = nil - // Since the head references the arena's buf, as long as the head is kept around - // GC can't release the buf. - s.head = nil -} - -func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node { - // The base level is already allocated in the node struct. - offset := arena.putNode(height) - node := arena.getNode(offset) - node.keyOffset = arena.putKey(key) - node.keySize = uint16(len(key)) - node.height = uint16(height) - node.value = encodeValue(arena.putVal(v), v.EncodedSize()) - return node -} - -func encodeValue(valOffset uint32, valSize uint16) uint64 { - return uint64(valSize)<<32 | uint64(valOffset) -} - -func decodeValue(value uint64) (valOffset uint32, valSize uint16) { - valOffset = uint32(value) - valSize = uint16(value >> 32) - return -} - -// NewSkiplist makes a new empty skiplist, with a given arena size -func NewSkiplist(arenaSize int64) *Skiplist { - arena := newArena(arenaSize) - head := newNode(arena, nil, y.ValueStruct{}, maxHeight) - return &Skiplist{ - height: 1, - head: head, - arena: arena, - ref: 1, - } -} - -func (s *node) getValueOffset() (uint32, uint16) { - value := atomic.LoadUint64(&s.value) - return decodeValue(value) -} - -func (s *node) key(arena *Arena) []byte { - return arena.getKey(s.keyOffset, s.keySize) -} - -func (s *node) setValue(arena *Arena, v y.ValueStruct) { - valOffset := arena.putVal(v) - value := encodeValue(valOffset, v.EncodedSize()) - atomic.StoreUint64(&s.value, value) -} - -func (s *node) getNextOffset(h int) uint32 { - return atomic.LoadUint32(&s.tower[h]) -} - -func (s *node) casNextOffset(h int, old, val uint32) bool { - return atomic.CompareAndSwapUint32(&s.tower[h], old, val) -} - -// Returns true if key is strictly > n.key. -// If n is nil, this is an "end" marker and we return false. -//func (s *Skiplist) keyIsAfterNode(key []byte, n *node) bool { -// y.AssertTrue(n != s.head) -// return n != nil && y.CompareKeys(key, n.key) > 0 -//} - -func (s *Skiplist) randomHeight() int { - h := 1 - for h < maxHeight && z.FastRand() <= heightIncrease { - h++ - } - return h -} - -func (s *Skiplist) getNext(nd *node, height int) *node { - return s.arena.getNode(nd.getNextOffset(height)) -} - -// findNear finds the node near to key. -// If less=true, it finds rightmost node such that node.key < key (if allowEqual=false) or -// node.key <= key (if allowEqual=true). -// If less=false, it finds leftmost node such that node.key > key (if allowEqual=false) or -// node.key >= key (if allowEqual=true). -// Returns the node found. The bool returned is true if the node has key equal to given key. -func (s *Skiplist) findNear(key []byte, less bool, allowEqual bool) (*node, bool) { - x := s.head - level := int(s.getHeight() - 1) - for { - // Assume x.key < key. - next := s.getNext(x, level) - if next == nil { - // x.key < key < END OF LIST - if level > 0 { - // Can descend further to iterate closer to the end. - level-- - continue - } - // Level=0. Cannot descend further. Let's return something that makes sense. - if !less { - return nil, false - } - // Try to return x. Make sure it is not a head node. - if x == s.head { - return nil, false - } - return x, false - } - - nextKey := next.key(s.arena) - cmp := y.CompareKeys(key, nextKey) - if cmp > 0 { - // x.key < next.key < key. We can continue to move right. - x = next - continue - } - if cmp == 0 { - // x.key < key == next.key. - if allowEqual { - return next, true - } - if !less { - // We want >, so go to base level to grab the next bigger note. - return s.getNext(next, 0), false - } - // We want <. If not base level, we should go closer in the next level. - if level > 0 { - level-- - continue - } - // On base level. Return x. - if x == s.head { - return nil, false - } - return x, false - } - // cmp < 0. In other words, x.key < key < next. - if level > 0 { - level-- - continue - } - // At base level. Need to return something. - if !less { - return next, false - } - // Try to return x. Make sure it is not a head node. - if x == s.head { - return nil, false - } - return x, false - } -} - -// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key <= key <= outAfter.key. -// The input "before" tells us where to start looking. -// If we found a node with the same key, then we return outBefore = outAfter. -// Otherwise, outBefore.key < key < outAfter.key. -func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) { - for { - // Assume before.key < key. - next := s.getNext(before, level) - if next == nil { - return before, next - } - nextKey := next.key(s.arena) - cmp := y.CompareKeys(key, nextKey) - if cmp == 0 { - // Equality case. - return next, next - } - if cmp < 0 { - // before.key < key < next.key. We are done for this level. - return before, next - } - before = next // Keep moving right on this level. - } -} - -func (s *Skiplist) getHeight() int32 { - return atomic.LoadInt32(&s.height) -} - -// Put inserts the key-value pair. -func (s *Skiplist) Put(key []byte, v y.ValueStruct) { - // Since we allow overwrite, we may not need to create a new node. We might not even need to - // increase the height. Let's defer these actions. - - listHeight := s.getHeight() - var prev [maxHeight + 1]*node - var next [maxHeight + 1]*node - prev[listHeight] = s.head - next[listHeight] = nil - for i := int(listHeight) - 1; i >= 0; i-- { - // Use higher level to speed up for current level. - prev[i], next[i] = s.findSpliceForLevel(key, prev[i+1], i) - if prev[i] == next[i] { - prev[i].setValue(s.arena, v) - return - } - } - - // We do need to create a new node. - height := s.randomHeight() - x := newNode(s.arena, key, v, height) - - // Try to increase s.height via CAS. - listHeight = s.getHeight() - for height > int(listHeight) { - if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) { - // Successfully increased skiplist.height. - break - } - listHeight = s.getHeight() - } - - // We always insert from the base level and up. After you add a node in base level, we cannot - // create a node in the level above because it would have discovered the node in the base level. - for i := 0; i < height; i++ { - for { - if prev[i] == nil { - y.AssertTrue(i > 1) // This cannot happen in base level. - // We haven't computed prev, next for this level because height exceeds old listHeight. - // For these levels, we expect the lists to be sparse, so we can just search from head. - prev[i], next[i] = s.findSpliceForLevel(key, s.head, i) - // Someone adds the exact same key before we are able to do so. This can only happen on - // the base level. But we know we are not on the base level. - y.AssertTrue(prev[i] != next[i]) - } - nextOffset := s.arena.getNodeOffset(next[i]) - x.tower[i] = nextOffset - if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) { - // Managed to insert x between prev[i] and next[i]. Go to the next level. - break - } - // CAS failed. We need to recompute prev and next. - // It is unlikely to be helpful to try to use a different level as we redo the search, - // because it is unlikely that lots of nodes are inserted between prev[i] and next[i]. - prev[i], next[i] = s.findSpliceForLevel(key, prev[i], i) - if prev[i] == next[i] { - y.AssertTruef(i == 0, "Equality can happen only on base level: %d", i) - prev[i].setValue(s.arena, v) - return - } - } - } -} - -// Empty returns if the Skiplist is empty. -func (s *Skiplist) Empty() bool { - return s.findLast() == nil -} - -// findLast returns the last element. If head (empty list), we return nil. All the find functions -// will NEVER return the head nodes. -func (s *Skiplist) findLast() *node { - n := s.head - level := int(s.getHeight()) - 1 - for { - next := s.getNext(n, level) - if next != nil { - n = next - continue - } - if level == 0 { - if n == s.head { - return nil - } - return n - } - level-- - } -} - -// Get gets the value associated with the key. It returns a valid value if it finds equal or earlier -// version of the same key. -func (s *Skiplist) Get(key []byte) y.ValueStruct { - n, _ := s.findNear(key, false, true) // findGreaterOrEqual. - if n == nil { - return y.ValueStruct{} - } - - nextKey := s.arena.getKey(n.keyOffset, n.keySize) - if !y.SameKey(key, nextKey) { - return y.ValueStruct{} - } - - valOffset, valSize := n.getValueOffset() - vs := s.arena.getVal(valOffset, valSize) - vs.Version = y.ParseTs(nextKey) - return vs -} - -// NewIterator returns a skiplist iterator. You have to Close() the iterator. -func (s *Skiplist) NewIterator() *Iterator { - s.IncrRef() - return &Iterator{list: s} -} - -// MemSize returns the size of the Skiplist in terms of how much memory is used within its internal -// arena. -func (s *Skiplist) MemSize() int64 { return s.arena.size() } - -// Iterator is an iterator over skiplist object. For new objects, you just -// need to initialize Iterator.list. -type Iterator struct { - list *Skiplist - n *node -} - -// Close frees the resources held by the iterator -func (s *Iterator) Close() error { - s.list.DecrRef() - return nil -} - -// Valid returns true iff the iterator is positioned at a valid node. -func (s *Iterator) Valid() bool { return s.n != nil } - -// Key returns the key at the current position. -func (s *Iterator) Key() []byte { - return s.list.arena.getKey(s.n.keyOffset, s.n.keySize) -} - -// Value returns value. -func (s *Iterator) Value() y.ValueStruct { - valOffset, valSize := s.n.getValueOffset() - return s.list.arena.getVal(valOffset, valSize) -} - -// Next advances to the next position. -func (s *Iterator) Next() { - y.AssertTrue(s.Valid()) - s.n = s.list.getNext(s.n, 0) -} - -// Prev advances to the previous position. -func (s *Iterator) Prev() { - y.AssertTrue(s.Valid()) - s.n, _ = s.list.findNear(s.Key(), true, false) // find <. No equality allowed. -} - -// Seek advances to the first entry with a key >= target. -func (s *Iterator) Seek(target []byte) { - s.n, _ = s.list.findNear(target, false, true) // find >=. -} - -// SeekForPrev finds an entry with key <= target. -func (s *Iterator) SeekForPrev(target []byte) { - s.n, _ = s.list.findNear(target, true, true) // find <=. -} - -// SeekToFirst seeks position at the first entry in list. -// Final state of iterator is Valid() iff list is not empty. -func (s *Iterator) SeekToFirst() { - s.n = s.list.getNext(s.list.head, 0) -} - -// SeekToLast seeks position at the last entry in list. -// Final state of iterator is Valid() iff list is not empty. -func (s *Iterator) SeekToLast() { - s.n = s.list.findLast() -} - -// UniIterator is a unidirectional memtable iterator. It is a thin wrapper around -// Iterator. We like to keep Iterator as before, because it is more powerful and -// we might support bidirectional iterators in the future. -type UniIterator struct { - iter *Iterator - reversed bool -} - -// NewUniIterator returns a UniIterator. -func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator { - return &UniIterator{ - iter: s.NewIterator(), - reversed: reversed, - } -} - -// Next implements y.Interface -func (s *UniIterator) Next() { - if !s.reversed { - s.iter.Next() - } else { - s.iter.Prev() - } -} - -// Rewind implements y.Interface -func (s *UniIterator) Rewind() { - if !s.reversed { - s.iter.SeekToFirst() - } else { - s.iter.SeekToLast() - } -} - -// Seek implements y.Interface -func (s *UniIterator) Seek(key []byte) { - if !s.reversed { - s.iter.Seek(key) - } else { - s.iter.SeekForPrev(key) - } -} - -// Key implements y.Interface -func (s *UniIterator) Key() []byte { return s.iter.Key() } - -// Value implements y.Interface -func (s *UniIterator) Value() y.ValueStruct { return s.iter.Value() } - -// Valid implements y.Interface -func (s *UniIterator) Valid() bool { return s.iter.Valid() } - -// Close implements y.Interface (and frees up the iter's resources) -func (s *UniIterator) Close() error { return s.iter.Close() } diff --git a/vendor/github.com/dgraph-io/badger/stream.go b/vendor/github.com/dgraph-io/badger/stream.go deleted file mode 100644 index d89a4af8..00000000 --- a/vendor/github.com/dgraph-io/badger/stream.go +++ /dev/null @@ -1,386 +0,0 @@ -/* - * Copyright 2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "context" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/pb" - "github.com/dgraph-io/badger/y" - humanize "github.com/dustin/go-humanize" - "github.com/golang/protobuf/proto" -) - -const pageSize = 4 << 20 // 4MB - -// Stream provides a framework to concurrently iterate over a snapshot of Badger, pick up -// key-values, batch them up and call Send. Stream does concurrent iteration over many smaller key -// ranges. It does NOT send keys in lexicographical sorted order. To get keys in sorted -// order, use Iterator. -type Stream struct { - // Prefix to only iterate over certain range of keys. If set to nil (default), Stream would - // iterate over the entire DB. - Prefix []byte - - // Number of goroutines to use for iterating over key ranges. Defaults to 16. - NumGo int - - // Badger would produce log entries in Infof to indicate the progress of Stream. LogPrefix can - // be used to help differentiate them from other activities. Default is "Badger.Stream". - LogPrefix string - - // ChooseKey is invoked each time a new key is encountered. Note that this is not called - // on every version of the value, only the first encountered version (i.e. the highest version - // of the value a key has). ChooseKey can be left nil to select all keys. - // - // Note: Calls to ChooseKey are concurrent. - ChooseKey func(item *Item) bool - - // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It - // is upto the caller to iterate over the versions and generate zero, one or more KVs. It - // is expected that the user would advance the iterator to go through the versions of the - // values. However, the user MUST immediately return from this function on the first encounter - // with a mismatching key. See example usage in ToList function. Can be left nil to use ToList - // function by default. - // - // Note: Calls to KeyToList are concurrent. - KeyToList func(key []byte, itr *Iterator) (*pb.KVList, error) - - // This is the method where Stream sends the final output. All calls to Send are done by a - // single goroutine, i.e. logic within Send method can expect single threaded execution. - Send func(*pb.KVList) error - - readTs uint64 - db *DB - rangeCh chan keyRange - kvChan chan *pb.KVList - nextStreamId uint32 -} - -// ToList is a default implementation of KeyToList. It picks up all valid versions of the key, -// skipping over deleted or expired keys. -func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) { - list := &pb.KVList{} - for ; itr.Valid(); itr.Next() { - item := itr.Item() - if item.IsDeletedOrExpired() { - break - } - if !bytes.Equal(key, item.Key()) { - // Break out on the first encounter with another key. - break - } - - valCopy, err := item.ValueCopy(nil) - if err != nil { - return nil, err - } - kv := &pb.KV{ - Key: item.KeyCopy(nil), - Value: valCopy, - UserMeta: []byte{item.UserMeta()}, - Version: item.Version(), - ExpiresAt: item.ExpiresAt(), - } - list.Kv = append(list.Kv, kv) - if st.db.opt.NumVersionsToKeep == 1 { - break - } - - if item.DiscardEarlierVersions() { - break - } - } - return list, nil -} - -// keyRange is [start, end), including start, excluding end. Do ensure that the start, -// end byte slices are owned by keyRange struct. -func (st *Stream) produceRanges(ctx context.Context) { - splits := st.db.KeySplits(st.Prefix) - - // We don't need to create more key ranges than NumGo goroutines. This way, we will have limited - // number of "streams" coming out, which then helps limit the memory used by SSWriter. - { - pickEvery := int(math.Floor(float64(len(splits)) / float64(st.NumGo))) - if pickEvery < 1 { - pickEvery = 1 - } - filtered := splits[:0] - for i, split := range splits { - if (i+1)%pickEvery == 0 { - filtered = append(filtered, split) - } - } - splits = filtered - } - - start := y.SafeCopy(nil, st.Prefix) - for _, key := range splits { - st.rangeCh <- keyRange{left: start, right: y.SafeCopy(nil, []byte(key))} - start = y.SafeCopy(nil, []byte(key)) - } - // Edge case: prefix is empty and no splits exist. In that case, we should have at least one - // keyRange output. - st.rangeCh <- keyRange{left: start} - close(st.rangeCh) -} - -// produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan. -func (st *Stream) produceKVs(ctx context.Context) error { - var size int - var txn *Txn - if st.readTs > 0 { - txn = st.db.NewTransactionAt(st.readTs, false) - } else { - txn = st.db.NewTransaction(false) - } - defer txn.Discard() - - iterate := func(kr keyRange) error { - iterOpts := DefaultIteratorOptions - iterOpts.AllVersions = true - iterOpts.Prefix = st.Prefix - iterOpts.PrefetchValues = false - itr := txn.NewIterator(iterOpts) - defer itr.Close() - - // This unique stream id is used to identify all the keys from this iteration. - streamId := atomic.AddUint32(&st.nextStreamId, 1) - - outList := new(pb.KVList) - var prevKey []byte - for itr.Seek(kr.left); itr.Valid(); { - // it.Valid would only return true for keys with the provided Prefix in iterOpts. - item := itr.Item() - if bytes.Equal(item.Key(), prevKey) { - itr.Next() - continue - } - prevKey = append(prevKey[:0], item.Key()...) - - // Check if we reached the end of the key range. - if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 { - break - } - // Check if we should pick this key. - if st.ChooseKey != nil && !st.ChooseKey(item) { - continue - } - - // Now convert to key value. - list, err := st.KeyToList(item.KeyCopy(nil), itr) - if err != nil { - return err - } - if list == nil || len(list.Kv) == 0 { - continue - } - outList.Kv = append(outList.Kv, list.Kv...) - size += proto.Size(list) - if size >= pageSize { - for _, kv := range outList.Kv { - kv.StreamId = streamId - } - select { - case st.kvChan <- outList: - case <-ctx.Done(): - return ctx.Err() - } - outList = new(pb.KVList) - size = 0 - } - } - if len(outList.Kv) > 0 { - for _, kv := range outList.Kv { - kv.StreamId = streamId - } - // TODO: Think of a way to indicate that a stream is over. - select { - case st.kvChan <- outList: - case <-ctx.Done(): - return ctx.Err() - } - } - return nil - } - - for { - select { - case kr, ok := <-st.rangeCh: - if !ok { - // Done with the keys. - return nil - } - if err := iterate(kr); err != nil { - return err - } - case <-ctx.Done(): - return ctx.Err() - } - } -} - -func (st *Stream) streamKVs(ctx context.Context) error { - var count int - var bytesSent uint64 - t := time.NewTicker(time.Second) - defer t.Stop() - now := time.Now() - - slurp := func(batch *pb.KVList) error { - loop: - for { - select { - case kvs, ok := <-st.kvChan: - if !ok { - break loop - } - y.AssertTrue(kvs != nil) - batch.Kv = append(batch.Kv, kvs.Kv...) - default: - break loop - } - } - sz := uint64(proto.Size(batch)) - bytesSent += sz - count += len(batch.Kv) - t := time.Now() - if err := st.Send(batch); err != nil { - return err - } - st.db.opt.Infof("%s Created batch of size: %s in %s.\n", - st.LogPrefix, humanize.Bytes(sz), time.Since(t)) - return nil - } - -outer: - for { - var batch *pb.KVList - select { - case <-ctx.Done(): - return ctx.Err() - - case <-t.C: - dur := time.Since(now) - durSec := uint64(dur.Seconds()) - if durSec == 0 { - continue - } - speed := bytesSent / durSec - st.db.opt.Infof("%s Time elapsed: %s, bytes sent: %s, speed: %s/sec\n", st.LogPrefix, - y.FixedDuration(dur), humanize.Bytes(bytesSent), humanize.Bytes(speed)) - - case kvs, ok := <-st.kvChan: - if !ok { - break outer - } - y.AssertTrue(kvs != nil) - batch = kvs - if err := slurp(batch); err != nil { - return err - } - } - } - - st.db.opt.Infof("%s Sent %d keys\n", st.LogPrefix, count) - return nil -} - -// Orchestrate runs Stream. It picks up ranges from the SSTables, then runs NumGo number of -// goroutines to iterate over these ranges and batch up KVs in lists. It concurrently runs a single -// goroutine to pick these lists, batch them up further and send to Output.Send. Orchestrate also -// spits logs out to Infof, using provided LogPrefix. Note that all calls to Output.Send -// are serial. In case any of these steps encounter an error, Orchestrate would stop execution and -// return that error. Orchestrate can be called multiple times, but in serial order. -func (st *Stream) Orchestrate(ctx context.Context) error { - st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists. - - // kvChan should only have a small capacity to ensure that we don't buffer up too much data if - // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each - // KVList. To get 128MB buffer, we can set the channel size to 32. - st.kvChan = make(chan *pb.KVList, 32) - - if st.KeyToList == nil { - st.KeyToList = st.ToList - } - - // Picks up ranges from Badger, and sends them to rangeCh. - go st.produceRanges(ctx) - - errCh := make(chan error, 1) // Stores error by consumeKeys. - var wg sync.WaitGroup - for i := 0; i < st.NumGo; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan. - if err := st.produceKVs(ctx); err != nil { - select { - case errCh <- err: - default: - } - } - }() - } - - // Pick up key-values from kvChan and send to stream. - kvErr := make(chan error, 1) - go func() { - // Picks up KV lists from kvChan, and sends them to Output. - kvErr <- st.streamKVs(ctx) - }() - wg.Wait() // Wait for produceKVs to be over. - close(st.kvChan) // Now we can close kvChan. - - select { - case err := <-errCh: // Check error from produceKVs. - return err - default: - } - - // Wait for key streaming to be over. - err := <-kvErr - return err -} - -func (db *DB) newStream() *Stream { - return &Stream{db: db, NumGo: 16, LogPrefix: "Badger.Stream"} -} - -// NewStream creates a new Stream. -func (db *DB) NewStream() *Stream { - if db.opt.managedTxns { - panic("This API can not be called in managed mode.") - } - return db.newStream() -} - -// NewStreamAt creates a new Stream at a particular timestamp. Should only be used with managed DB. -func (db *DB) NewStreamAt(readTs uint64) *Stream { - if !db.opt.managedTxns { - panic("This API can only be called in managed mode.") - } - stream := db.newStream() - stream.readTs = readTs - return stream -} diff --git a/vendor/github.com/dgraph-io/badger/stream_writer.go b/vendor/github.com/dgraph-io/badger/stream_writer.go deleted file mode 100644 index 46dd3805..00000000 --- a/vendor/github.com/dgraph-io/badger/stream_writer.go +++ /dev/null @@ -1,439 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "math" - "sync" - - "github.com/dgraph-io/badger/pb" - "github.com/dgraph-io/badger/table" - "github.com/dgraph-io/badger/y" - humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" -) - -const headStreamId uint32 = math.MaxUint32 - -// StreamWriter is used to write data coming from multiple streams. The streams must not have any -// overlapping key ranges. Within each stream, the keys must be sorted. Badger Stream framework is -// capable of generating such an output. So, this StreamWriter can be used at the other end to build -// BadgerDB at a much faster pace by writing SSTables (and value logs) directly to LSM tree levels -// without causing any compactions at all. This is way faster than using batched writer or using -// transactions, but only applicable in situations where the keys are pre-sorted and the DB is being -// bootstrapped. Existing data would get deleted when using this writer. So, this is only useful -// when restoring from backup or replicating DB across servers. -// -// StreamWriter should not be called on in-use DB instances. It is designed only to bootstrap new -// DBs. -type StreamWriter struct { - writeLock sync.Mutex - db *DB - done func() - throttle *y.Throttle - maxVersion uint64 - writers map[uint32]*sortedWriter - maxHead valuePointer -} - -// NewStreamWriter creates a StreamWriter. Right after creating StreamWriter, Prepare must be -// called. The memory usage of a StreamWriter is directly proportional to the number of streams -// possible. So, efforts must be made to keep the number of streams low. Stream framework would -// typically use 16 goroutines and hence create 16 streams. -func (db *DB) NewStreamWriter() *StreamWriter { - return &StreamWriter{ - db: db, - // throttle shouldn't make much difference. Memory consumption is based on the number of - // concurrent streams being processed. - throttle: y.NewThrottle(16), - writers: make(map[uint32]*sortedWriter), - } -} - -// Prepare should be called before writing any entry to StreamWriter. It deletes all data present in -// existing DB, stops compactions and any writes being done by other means. Be very careful when -// calling Prepare, because it could result in permanent data loss. Not calling Prepare would result -// in a corrupt Badger instance. -func (sw *StreamWriter) Prepare() error { - sw.writeLock.Lock() - defer sw.writeLock.Unlock() - - var err error - sw.done, err = sw.db.dropAll() - return err -} - -// Write writes KVList to DB. Each KV within the list contains the stream id which StreamWriter -// would use to demux the writes. Write is thread safe and can be called concurrently by mulitple -// goroutines. -func (sw *StreamWriter) Write(kvs *pb.KVList) error { - if len(kvs.GetKv()) == 0 { - return nil - } - - // closedStreams keeps track of all streams which are going to be marked as done. We are - // keeping track of all streams so that we can close them at the end, after inserting all - // the valid kvs. - closedStreams := make(map[uint32]struct{}) - streamReqs := make(map[uint32]*request) - for _, kv := range kvs.Kv { - if kv.StreamDone { - closedStreams[kv.StreamId] = struct{}{} - continue - } - - // Panic if some kv comes after stream has been marked as closed. - if _, ok := closedStreams[kv.StreamId]; ok { - panic(fmt.Sprintf("write performed on closed stream: %d", kv.StreamId)) - } - - var meta, userMeta byte - if len(kv.Meta) > 0 { - meta = kv.Meta[0] - } - if len(kv.UserMeta) > 0 { - userMeta = kv.UserMeta[0] - } - if sw.maxVersion < kv.Version { - sw.maxVersion = kv.Version - } - e := &Entry{ - Key: y.KeyWithTs(kv.Key, kv.Version), - Value: kv.Value, - UserMeta: userMeta, - ExpiresAt: kv.ExpiresAt, - meta: meta, - } - // If the value can be collocated with the key in LSM tree, we can skip - // writing the value to value log. - e.skipVlog = sw.db.shouldWriteValueToLSM(*e) - req := streamReqs[kv.StreamId] - if req == nil { - req = &request{} - streamReqs[kv.StreamId] = req - } - req.Entries = append(req.Entries, e) - } - all := make([]*request, 0, len(streamReqs)) - for _, req := range streamReqs { - all = append(all, req) - } - - sw.writeLock.Lock() - defer sw.writeLock.Unlock() - - // We are writing all requests to vlog even if some request belongs to already closed stream. - // It is safe to do because we are panicking while writing to sorted writer, which will be nil - // for closed stream. At restart, stream writer will drop all the data in Prepare function. - if err := sw.db.vlog.write(all); err != nil { - return err - } - - for streamId, req := range streamReqs { - writer, ok := sw.writers[streamId] - if !ok { - writer = sw.newWriter(streamId) - sw.writers[streamId] = writer - } - - if writer == nil { - panic(fmt.Sprintf("write performed on closed stream: %d", streamId)) - } - - writer.reqCh <- req - } - - // Now we can close any streams if required. We will make writer for - // the closed streams as nil. - for streamId := range closedStreams { - writer, ok := sw.writers[streamId] - if !ok { - sw.db.opt.Logger.Warningf("Trying to close stream: %d, but no sorted "+ - "writer found for it", streamId) - continue - } - - writer.closer.SignalAndWait() - if err := writer.Done(); err != nil { - return err - } - - if sw.maxHead.Less(writer.head) { - sw.maxHead = writer.head - } - - sw.writers[streamId] = nil - } - return nil -} - -// Flush is called once we are done writing all the entries. It syncs DB directories. It also -// updates Oracle with maxVersion found in all entries (if DB is not managed). -func (sw *StreamWriter) Flush() error { - sw.writeLock.Lock() - defer sw.writeLock.Unlock() - - defer sw.done() - - for _, writer := range sw.writers { - if writer != nil { - writer.closer.SignalAndWait() - } - } - - for _, writer := range sw.writers { - if writer == nil { - continue - } - if err := writer.Done(); err != nil { - return err - } - if sw.maxHead.Less(writer.head) { - sw.maxHead = writer.head - } - } - - // Encode and write the value log head into a new table. - data := make([]byte, vptrSize) - data = sw.maxHead.Encode(data) - headWriter := sw.newWriter(headStreamId) - if err := headWriter.Add( - y.KeyWithTs(head, sw.maxVersion), - y.ValueStruct{Value: data}); err != nil { - return err - } - if err := headWriter.Done(); err != nil { - return err - } - - if !sw.db.opt.managedTxns { - if sw.db.orc != nil { - sw.db.orc.Stop() - } - sw.db.orc = newOracle(sw.db.opt) - sw.db.orc.nextTxnTs = sw.maxVersion - sw.db.orc.txnMark.Done(sw.maxVersion) - sw.db.orc.readMark.Done(sw.maxVersion) - sw.db.orc.incrementNextTs() - } - - // Wait for all files to be written. - if err := sw.throttle.Finish(); err != nil { - return err - } - - // Sort tables at the end. - for _, l := range sw.db.lc.levels { - l.sortTables() - } - - // Now sync the directories, so all the files are registered. - if sw.db.opt.ValueDir != sw.db.opt.Dir { - if err := syncDir(sw.db.opt.ValueDir); err != nil { - return err - } - } - if err := syncDir(sw.db.opt.Dir); err != nil { - return err - } - return sw.db.lc.validate() -} - -type sortedWriter struct { - db *DB - throttle *y.Throttle - - builder *table.Builder - lastKey []byte - streamId uint32 - reqCh chan *request - head valuePointer - // Have separate closer for each writer, as it can be closed at any time. - closer *y.Closer -} - -func (sw *StreamWriter) newWriter(streamId uint32) *sortedWriter { - w := &sortedWriter{ - db: sw.db, - streamId: streamId, - throttle: sw.throttle, - builder: table.NewTableBuilder(), - reqCh: make(chan *request, 3), - closer: y.NewCloser(1), - } - - go w.handleRequests() - return w -} - -// ErrUnsortedKey is returned when any out of order key arrives at sortedWriter during call to Add. -var ErrUnsortedKey = errors.New("Keys not in sorted order") - -func (w *sortedWriter) handleRequests() { - defer w.closer.Done() - - process := func(req *request) { - for i, e := range req.Entries { - vptr := req.Ptrs[i] - if !vptr.IsZero() { - y.AssertTrue(w.head.Less(vptr)) - w.head = vptr - } - - var vs y.ValueStruct - if e.skipVlog { - vs = y.ValueStruct{ - Value: e.Value, - Meta: e.meta, - UserMeta: e.UserMeta, - ExpiresAt: e.ExpiresAt, - } - } else { - vbuf := make([]byte, vptrSize) - vs = y.ValueStruct{ - Value: vptr.Encode(vbuf), - Meta: e.meta | bitValuePointer, - UserMeta: e.UserMeta, - ExpiresAt: e.ExpiresAt, - } - } - if err := w.Add(e.Key, vs); err != nil { - panic(err) - } - } - } - - for { - select { - case req := <-w.reqCh: - process(req) - case <-w.closer.HasBeenClosed(): - close(w.reqCh) - for req := range w.reqCh { - process(req) - } - return - } - } -} - -// Add adds key and vs to sortedWriter. -func (w *sortedWriter) Add(key []byte, vs y.ValueStruct) error { - if len(w.lastKey) > 0 && y.CompareKeys(key, w.lastKey) <= 0 { - return ErrUnsortedKey - } - - sameKey := y.SameKey(key, w.lastKey) - // Same keys should go into the same SSTable. - if !sameKey && w.builder.ReachedCapacity(w.db.opt.MaxTableSize) { - if err := w.send(false); err != nil { - return err - } - } - - w.lastKey = y.SafeCopy(w.lastKey, key) - w.builder.Add(key, vs) - return nil -} - -func (w *sortedWriter) send(done bool) error { - if err := w.throttle.Do(); err != nil { - return err - } - go func(builder *table.Builder) { - data := builder.Finish() - err := w.createTable(data) - w.throttle.Done(err) - }(w.builder) - w.builder = table.NewTableBuilder() - return nil -} - -// Done is called once we are done writing all keys and valueStructs -// to sortedWriter. It completes writing current SST to disk. -func (w *sortedWriter) Done() error { - if w.builder.Empty() { - // Assign builder as nil, so that underlying memory can be garbage collected. - w.builder = nil - return nil - } - - return w.send(true) -} - -func (w *sortedWriter) createTable(data []byte) error { - if len(data) == 0 { - return nil - } - fileID := w.db.lc.reserveFileID() - fd, err := y.CreateSyncedFile(table.NewFilename(fileID, w.db.opt.Dir), true) - if err != nil { - return err - } - if _, err := fd.Write(data); err != nil { - return err - } - tbl, err := table.OpenTable(fd, w.db.opt.TableLoadingMode, nil) - if err != nil { - return err - } - lc := w.db.lc - - var lhandler *levelHandler - // We should start the levels from 1, because we need level 0 to set the !badger!head key. We - // cannot mix up this key with other keys from the DB, otherwise we would introduce a range - // overlap violation. - y.AssertTrue(len(lc.levels) > 1) - for _, l := range lc.levels[1:] { - ratio := float64(l.getTotalSize()) / float64(l.maxTotalSize) - if ratio < 1.0 { - lhandler = l - break - } - } - if lhandler == nil { - // If we're exceeding the size of the lowest level, shove it in the lowest level. Can't do - // better than that. - lhandler = lc.levels[len(lc.levels)-1] - } - if w.streamId == headStreamId { - // This is a special !badger!head key. We should store it at level 0, separate from all the - // other keys to avoid an overlap. - lhandler = lc.levels[0] - } - // Now that table can be opened successfully, let's add this to the MANIFEST. - change := &pb.ManifestChange{ - Id: tbl.ID(), - Op: pb.ManifestChange_CREATE, - Level: uint32(lhandler.level), - Checksum: tbl.Checksum, - } - if err := w.db.manifest.addChanges([]*pb.ManifestChange{change}); err != nil { - return err - } - - // We are not calling lhandler.replaceTables() here, as it sorts tables on every addition. - // We can sort all tables only once during Flush() call. - lhandler.addTable(tbl) - - // Release the ref held by OpenTable. - _ = tbl.DecrRef() - w.db.opt.Infof("Table created: %d at level: %d for stream: %d. Size: %s\n", - fileID, lhandler.level, w.streamId, humanize.Bytes(uint64(tbl.Size()))) - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/structs.go b/vendor/github.com/dgraph-io/badger/structs.go deleted file mode 100644 index 51d16cdb..00000000 --- a/vendor/github.com/dgraph-io/badger/structs.go +++ /dev/null @@ -1,186 +0,0 @@ -package badger - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "time" - - "github.com/dgraph-io/badger/y" -) - -type valuePointer struct { - Fid uint32 - Len uint32 - Offset uint32 -} - -func (p valuePointer) Less(o valuePointer) bool { - if p.Fid != o.Fid { - return p.Fid < o.Fid - } - if p.Offset != o.Offset { - return p.Offset < o.Offset - } - return p.Len < o.Len -} - -func (p valuePointer) IsZero() bool { - return p.Fid == 0 && p.Offset == 0 && p.Len == 0 -} - -const vptrSize = 12 - -// Encode encodes Pointer into byte buffer. -func (p valuePointer) Encode(b []byte) []byte { - binary.BigEndian.PutUint32(b[:4], p.Fid) - binary.BigEndian.PutUint32(b[4:8], p.Len) - binary.BigEndian.PutUint32(b[8:12], p.Offset) - return b[:vptrSize] -} - -func (p *valuePointer) Decode(b []byte) { - p.Fid = binary.BigEndian.Uint32(b[:4]) - p.Len = binary.BigEndian.Uint32(b[4:8]) - p.Offset = binary.BigEndian.Uint32(b[8:12]) -} - -// header is used in value log as a header before Entry. -type header struct { - klen uint32 - vlen uint32 - expiresAt uint64 - meta byte - userMeta byte -} - -const ( - headerBufSize = 18 -) - -func (h header) Encode(out []byte) { - y.AssertTrue(len(out) >= headerBufSize) - binary.BigEndian.PutUint32(out[0:4], h.klen) - binary.BigEndian.PutUint32(out[4:8], h.vlen) - binary.BigEndian.PutUint64(out[8:16], h.expiresAt) - out[16] = h.meta - out[17] = h.userMeta -} - -// Decodes h from buf. -func (h *header) Decode(buf []byte) { - h.klen = binary.BigEndian.Uint32(buf[0:4]) - h.vlen = binary.BigEndian.Uint32(buf[4:8]) - h.expiresAt = binary.BigEndian.Uint64(buf[8:16]) - h.meta = buf[16] - h.userMeta = buf[17] -} - -// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by -// the user to set data. -type Entry struct { - Key []byte - Value []byte - UserMeta byte - ExpiresAt uint64 // time.Unix - meta byte - - // Fields maintained internally. - offset uint32 - skipVlog bool -} - -func (e *Entry) estimateSize(threshold int) int { - if len(e.Value) < threshold { - return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta - } - return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas. -} - -// Encodes e to buf. Returns number of bytes written. -func encodeEntry(e *Entry, buf *bytes.Buffer) (int, error) { - h := header{ - klen: uint32(len(e.Key)), - vlen: uint32(len(e.Value)), - expiresAt: e.ExpiresAt, - meta: e.meta, - userMeta: e.UserMeta, - } - - var headerEnc [headerBufSize]byte - h.Encode(headerEnc[:]) - - hash := crc32.New(y.CastagnoliCrcTable) - - buf.Write(headerEnc[:]) - if _, err := hash.Write(headerEnc[:]); err != nil { - return 0, err - } - - buf.Write(e.Key) - if _, err := hash.Write(e.Key); err != nil { - return 0, err - } - - buf.Write(e.Value) - if _, err := hash.Write(e.Value); err != nil { - return 0, err - } - - var crcBuf [crc32.Size]byte - binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32()) - buf.Write(crcBuf[:]) - - return len(headerEnc) + len(e.Key) + len(e.Value) + len(crcBuf), nil -} - -func (e Entry) print(prefix string) { - fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d", - prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value)) -} - -// NewEntry creates a new entry with key and value passed in args. This newly created entry can be -// set in a transaction by calling txn.SetEntry(). All other properties of Entry can be set by -// calling WithMeta, WithDiscard, WithTTL methods on it. -// This function uses key and value reference, hence users must -// not modify key and value until the end of transaction. -func NewEntry(key, value []byte) *Entry { - return &Entry{ - Key: key, - Value: value, - } -} - -// WithMeta adds meta data to Entry e. This byte is stored alongside the key -// and can be used as an aid to interpret the value or store other contextual -// bits corresponding to the key-value pair of entry. -func (e *Entry) WithMeta(meta byte) *Entry { - e.UserMeta = meta - return e -} - -// WithDiscard adds a marker to Entry e. This means all the previous versions of the key (of the -// Entry) will be eligible for garbage collection. -// This method is only useful if you have set a higher limit for options.NumVersionsToKeep. The -// default setting is 1, in which case, this function doesn't add any more benefit. If however, you -// have a higher setting for NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this -// method to indicate that all the older versions can be discarded and removed during compactions. -func (e *Entry) WithDiscard() *Entry { - e.meta = bitDiscardEarlierVersions - return e -} - -// WithTTL adds time to live duration to Entry e. Entry stored with a TTL would automatically expire -// after the time has elapsed, and will be eligible for garbage collection. -func (e *Entry) WithTTL(dur time.Duration) *Entry { - e.ExpiresAt = uint64(time.Now().Add(dur).Unix()) - return e -} - -// withMergeBit sets merge bit in entry's metadata. This -// function is called by MergeOperator's Add method. -func (e *Entry) withMergeBit() *Entry { - e.meta = bitMergeEntry - return e -} diff --git a/vendor/github.com/dgraph-io/badger/table/README.md b/vendor/github.com/dgraph-io/badger/table/README.md deleted file mode 100644 index a784f126..00000000 --- a/vendor/github.com/dgraph-io/badger/table/README.md +++ /dev/null @@ -1,69 +0,0 @@ -Size of table is 122,173,606 bytes for all benchmarks. - -# BenchmarkRead -``` -$ go test -bench ^BenchmarkRead$ -run ^$ -count 3 -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger/table -BenchmarkRead-16 10 153281932 ns/op -BenchmarkRead-16 10 153454443 ns/op -BenchmarkRead-16 10 155349696 ns/op -PASS -ok github.com/dgraph-io/badger/table 23.549s -``` - -Size of table is 122,173,606 bytes, which is ~117MB. - -The rate is ~750MB/s using LoadToRAM (when table is in RAM). - -To read a 64MB table, this would take ~0.0853s, which is negligible. - -# BenchmarkReadAndBuild -```go -$ go test -bench BenchmarkReadAndBuild -run ^$ -count 3 -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger/table -BenchmarkReadAndBuild-16 2 945041628 ns/op -BenchmarkReadAndBuild-16 2 947120893 ns/op -BenchmarkReadAndBuild-16 2 954909506 ns/op -PASS -ok github.com/dgraph-io/badger/table 26.856s -``` - -The rate is ~122MB/s. To build a 64MB table, this would take ~0.52s. Note that this -does NOT include the flushing of the table to disk. All we are doing above is -reading one table (which is in RAM) and write one table in memory. - -The table building takes 0.52-0.0853s ~ 0.4347s. - -# BenchmarkReadMerged -Below, we merge 5 tables. The total size remains unchanged at ~122M. - -```go -$ go test -bench ReadMerged -run ^$ -count 3 -BenchmarkReadMerged-16 2 954475788 ns/op -BenchmarkReadMerged-16 2 955252462 ns/op -BenchmarkReadMerged-16 2 956857353 ns/op -PASS -ok github.com/dgraph-io/badger/table 33.327s -``` - -The rate is ~122MB/s. To read a 64MB table using merge iterator, this would take ~0.52s. - -# BenchmarkRandomRead - -```go -go test -bench BenchmarkRandomRead$ -run ^$ -count 3 -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger/table -BenchmarkRandomRead-16 300000 3596 ns/op -BenchmarkRandomRead-16 300000 3621 ns/op -BenchmarkRandomRead-16 300000 3596 ns/op -PASS -ok github.com/dgraph-io/badger/table 44.727s -``` - -For random read benchmarking, we are randomly reading a key and verifying its value. diff --git a/vendor/github.com/dgraph-io/badger/table/builder.go b/vendor/github.com/dgraph-io/badger/table/builder.go deleted file mode 100644 index f9773bab..00000000 --- a/vendor/github.com/dgraph-io/badger/table/builder.go +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "bytes" - "encoding/binary" - "io" - "math" - - "github.com/AndreasBriese/bbloom" - "github.com/dgraph-io/badger/y" -) - -var ( - restartInterval = 100 // Might want to change this to be based on total size instead of numKeys. -) - -func newBuffer(sz int) *bytes.Buffer { - b := new(bytes.Buffer) - b.Grow(sz) - return b -} - -type header struct { - plen uint16 // Overlap with base key. - klen uint16 // Length of the diff. - vlen uint16 // Length of value. - prev uint32 // Offset for the previous key-value pair. The offset is relative to block base offset. -} - -// Encode encodes the header. -func (h header) Encode(b []byte) { - binary.BigEndian.PutUint16(b[0:2], h.plen) - binary.BigEndian.PutUint16(b[2:4], h.klen) - binary.BigEndian.PutUint16(b[4:6], h.vlen) - binary.BigEndian.PutUint32(b[6:10], h.prev) -} - -// Decode decodes the header. -func (h *header) Decode(buf []byte) int { - h.plen = binary.BigEndian.Uint16(buf[0:2]) - h.klen = binary.BigEndian.Uint16(buf[2:4]) - h.vlen = binary.BigEndian.Uint16(buf[4:6]) - h.prev = binary.BigEndian.Uint32(buf[6:10]) - return h.Size() -} - -// Size returns size of the header. Currently it's just a constant. -func (h header) Size() int { return 10 } - -// Builder is used in building a table. -type Builder struct { - counter int // Number of keys written for the current block. - - // Typically tens or hundreds of meg. This is for one single file. - buf *bytes.Buffer - - baseKey []byte // Base key for the current block. - baseOffset uint32 // Offset for the current block. - - restarts []uint32 // Base offsets of every block. - - // Tracks offset for the previous key-value pair. Offset is relative to block base offset. - prevOffset uint32 - - keyBuf *bytes.Buffer - keyCount int -} - -// NewTableBuilder makes a new TableBuilder. -func NewTableBuilder() *Builder { - return &Builder{ - keyBuf: newBuffer(1 << 20), - buf: newBuffer(1 << 20), - prevOffset: math.MaxUint32, // Used for the first element! - } -} - -// Close closes the TableBuilder. -func (b *Builder) Close() {} - -// Empty returns whether it's empty. -func (b *Builder) Empty() bool { return b.buf.Len() == 0 } - -// keyDiff returns a suffix of newKey that is different from b.baseKey. -func (b Builder) keyDiff(newKey []byte) []byte { - var i int - for i = 0; i < len(newKey) && i < len(b.baseKey); i++ { - if newKey[i] != b.baseKey[i] { - break - } - } - return newKey[i:] -} - -func (b *Builder) addHelper(key []byte, v y.ValueStruct) { - // Add key to bloom filter. - if len(key) > 0 { - var klen [2]byte - keyNoTs := y.ParseKey(key) - binary.BigEndian.PutUint16(klen[:], uint16(len(keyNoTs))) - b.keyBuf.Write(klen[:]) - b.keyBuf.Write(keyNoTs) - b.keyCount++ - } - - // diffKey stores the difference of key with baseKey. - var diffKey []byte - if len(b.baseKey) == 0 { - // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful - // and will have to make copies of keys every time they add to builder, which is even worse. - b.baseKey = append(b.baseKey[:0], key...) - diffKey = key - } else { - diffKey = b.keyDiff(key) - } - - h := header{ - plen: uint16(len(key) - len(diffKey)), - klen: uint16(len(diffKey)), - vlen: uint16(v.EncodedSize()), - prev: b.prevOffset, // prevOffset is the location of the last key-value added. - } - b.prevOffset = uint32(b.buf.Len()) - b.baseOffset // Remember current offset for the next Add call. - - // Layout: header, diffKey, value. - var hbuf [10]byte - h.Encode(hbuf[:]) - b.buf.Write(hbuf[:]) - b.buf.Write(diffKey) // We only need to store the key difference. - - v.EncodeTo(b.buf) - b.counter++ // Increment number of keys added for this current block. -} - -func (b *Builder) finishBlock() { - // When we are at the end of the block and Valid=false, and the user wants to do a Prev, - // we need a dummy header to tell us the offset of the previous key-value pair. - b.addHelper([]byte{}, y.ValueStruct{}) -} - -// Add adds a key-value pair to the block. -// If doNotRestart is true, we will not restart even if b.counter >= restartInterval. -func (b *Builder) Add(key []byte, value y.ValueStruct) { - if b.counter >= restartInterval { - b.finishBlock() - // Start a new block. Initialize the block. - b.restarts = append(b.restarts, uint32(b.buf.Len())) - b.counter = 0 - b.baseKey = []byte{} - b.baseOffset = uint32(b.buf.Len()) - b.prevOffset = math.MaxUint32 // First key-value pair of block has header.prev=MaxInt. - } - b.addHelper(key, value) -} - -// TODO: vvv this was the comment on ReachedCapacity. -// FinalSize returns the *rough* final size of the array, counting the header which is -// not yet written. -// TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty) -// at the end. The diff can vary. - -// ReachedCapacity returns true if we... roughly (?) reached capacity? -func (b *Builder) ReachedCapacity(cap int64) bool { - estimateSz := b.buf.Len() + 8 /* empty header */ + 4*len(b.restarts) + - 8 /* 8 = end of buf offset + len(restarts) */ - return int64(estimateSz) > cap -} - -// blockIndex generates the block index for the table. -// It is mainly a list of all the block base offsets. -func (b *Builder) blockIndex() []byte { - // Store the end offset, so we know the length of the final block. - b.restarts = append(b.restarts, uint32(b.buf.Len())) - - // Add 4 because we want to write out number of restarts at the end. - sz := 4*len(b.restarts) + 4 - out := make([]byte, sz) - buf := out - for _, r := range b.restarts { - binary.BigEndian.PutUint32(buf[:4], r) - buf = buf[4:] - } - binary.BigEndian.PutUint32(buf[:4], uint32(len(b.restarts))) - return out -} - -// Finish finishes the table by appending the index. -func (b *Builder) Finish() []byte { - bf := bbloom.New(float64(b.keyCount), 0.01) - var klen [2]byte - key := make([]byte, 1024) - for { - if _, err := b.keyBuf.Read(klen[:]); err == io.EOF { - break - } else if err != nil { - y.Check(err) - } - kl := int(binary.BigEndian.Uint16(klen[:])) - if cap(key) < kl { - key = make([]byte, 2*int(kl)) // 2 * uint16 will overflow - } - key = key[:kl] - y.Check2(b.keyBuf.Read(key)) - bf.Add(key) - } - - b.finishBlock() // This will never start a new block. - index := b.blockIndex() - b.buf.Write(index) - - // Write bloom filter. - bdata := bf.JSONMarshal() - n, err := b.buf.Write(bdata) - y.Check(err) - var buf [4]byte - binary.BigEndian.PutUint32(buf[:], uint32(n)) - b.buf.Write(buf[:]) - - return b.buf.Bytes() -} diff --git a/vendor/github.com/dgraph-io/badger/table/iterator.go b/vendor/github.com/dgraph-io/badger/table/iterator.go deleted file mode 100644 index c928540e..00000000 --- a/vendor/github.com/dgraph-io/badger/table/iterator.go +++ /dev/null @@ -1,557 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "bytes" - "io" - "math" - "sort" - - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" -) - -type blockIterator struct { - data []byte - pos uint32 - err error - baseKey []byte - - key []byte - val []byte - init bool - - last header // The last header we saw. -} - -func (itr *blockIterator) Reset() { - itr.pos = 0 - itr.err = nil - itr.baseKey = []byte{} - itr.key = []byte{} - itr.val = []byte{} - itr.init = false - itr.last = header{} -} - -func (itr *blockIterator) Init() { - if !itr.init { - itr.Next() - } -} - -func (itr *blockIterator) Valid() bool { - return itr != nil && itr.err == nil -} - -func (itr *blockIterator) Error() error { - return itr.err -} - -func (itr *blockIterator) Close() {} - -var ( - origin = 0 - current = 1 -) - -// Seek brings us to the first block element that is >= input key. -func (itr *blockIterator) Seek(key []byte, whence int) { - itr.err = nil - - switch whence { - case origin: - itr.Reset() - case current: - } - - var done bool - for itr.Init(); itr.Valid(); itr.Next() { - k := itr.Key() - if y.CompareKeys(k, key) >= 0 { - // We are done as k is >= key. - done = true - break - } - } - if !done { - itr.err = io.EOF - } -} - -func (itr *blockIterator) SeekToFirst() { - itr.err = nil - itr.Init() -} - -// SeekToLast brings us to the last element. Valid should return true. -func (itr *blockIterator) SeekToLast() { - itr.err = nil - for itr.Init(); itr.Valid(); itr.Next() { - } - itr.Prev() -} - -// parseKV would allocate a new byte slice for key and for value. -func (itr *blockIterator) parseKV(h header) { - if cap(itr.key) < int(h.plen+h.klen) { - sz := int(h.plen) + int(h.klen) // Convert to int before adding to avoid uint16 overflow. - itr.key = make([]byte, 2*sz) - } - itr.key = itr.key[:h.plen+h.klen] - copy(itr.key, itr.baseKey[:h.plen]) - copy(itr.key[h.plen:], itr.data[itr.pos:itr.pos+uint32(h.klen)]) - itr.pos += uint32(h.klen) - - if itr.pos+uint32(h.vlen) > uint32(len(itr.data)) { - itr.err = errors.Errorf("Value exceeded size of block: %d %d %d %d %v", - itr.pos, h.klen, h.vlen, len(itr.data), h) - return - } - itr.val = y.SafeCopy(itr.val, itr.data[itr.pos:itr.pos+uint32(h.vlen)]) - itr.pos += uint32(h.vlen) -} - -func (itr *blockIterator) Next() { - itr.init = true - itr.err = nil - if itr.pos >= uint32(len(itr.data)) { - itr.err = io.EOF - return - } - - var h header - itr.pos += uint32(h.Decode(itr.data[itr.pos:])) - itr.last = h // Store the last header. - - if h.klen == 0 && h.plen == 0 { - // Last entry in the table. - itr.err = io.EOF - return - } - - // Populate baseKey if it isn't set yet. This would only happen for the first Next. - if len(itr.baseKey) == 0 { - // This should be the first Next() for this block. Hence, prefix length should be zero. - y.AssertTrue(h.plen == 0) - itr.baseKey = itr.data[itr.pos : itr.pos+uint32(h.klen)] - } - itr.parseKV(h) -} - -func (itr *blockIterator) Prev() { - if !itr.init { - return - } - itr.err = nil - if itr.last.prev == math.MaxUint32 { - // This is the first element of the block! - itr.err = io.EOF - itr.pos = 0 - return - } - - // Move back using current header's prev. - itr.pos = itr.last.prev - - var h header - y.AssertTruef(itr.pos < uint32(len(itr.data)), "%d %d", itr.pos, len(itr.data)) - itr.pos += uint32(h.Decode(itr.data[itr.pos:])) - itr.parseKV(h) - itr.last = h -} - -func (itr *blockIterator) Key() []byte { - if itr.err != nil { - return nil - } - return itr.key -} - -func (itr *blockIterator) Value() []byte { - if itr.err != nil { - return nil - } - return itr.val -} - -// Iterator is an iterator for a Table. -type Iterator struct { - t *Table - bpos int - bi *blockIterator - err error - - // Internally, Iterator is bidirectional. However, we only expose the - // unidirectional functionality for now. - reversed bool -} - -// NewIterator returns a new iterator of the Table -func (t *Table) NewIterator(reversed bool) *Iterator { - t.IncrRef() // Important. - ti := &Iterator{t: t, reversed: reversed} - ti.next() - return ti -} - -// Close closes the iterator (and it must be called). -func (itr *Iterator) Close() error { - return itr.t.DecrRef() -} - -func (itr *Iterator) reset() { - itr.bpos = 0 - itr.err = nil -} - -// Valid follows the y.Iterator interface -func (itr *Iterator) Valid() bool { - return itr.err == nil -} - -func (itr *Iterator) seekToFirst() { - numBlocks := len(itr.t.blockIndex) - if numBlocks == 0 { - itr.err = io.EOF - return - } - itr.bpos = 0 - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi = block.NewIterator() - itr.bi.SeekToFirst() - itr.err = itr.bi.Error() -} - -func (itr *Iterator) seekToLast() { - numBlocks := len(itr.t.blockIndex) - if numBlocks == 0 { - itr.err = io.EOF - return - } - itr.bpos = numBlocks - 1 - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi = block.NewIterator() - itr.bi.SeekToLast() - itr.err = itr.bi.Error() -} - -func (itr *Iterator) seekHelper(blockIdx int, key []byte) { - itr.bpos = blockIdx - block, err := itr.t.block(blockIdx) - if err != nil { - itr.err = err - return - } - itr.bi = block.NewIterator() - itr.bi.Seek(key, origin) - itr.err = itr.bi.Error() -} - -// seekFrom brings us to a key that is >= input key. -func (itr *Iterator) seekFrom(key []byte, whence int) { - itr.err = nil - switch whence { - case origin: - itr.reset() - case current: - } - - idx := sort.Search(len(itr.t.blockIndex), func(idx int) bool { - ko := itr.t.blockIndex[idx] - return y.CompareKeys(ko.key, key) > 0 - }) - if idx == 0 { - // The smallest key in our table is already strictly > key. We can return that. - // This is like a SeekToFirst. - itr.seekHelper(0, key) - return - } - - // block[idx].smallest is > key. - // Since idx>0, we know block[idx-1].smallest is <= key. - // There are two cases. - // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first - // element of block[idx]. - // 2) Some element in block[idx-1] is >= key. We should go to that element. - itr.seekHelper(idx-1, key) - if itr.err == io.EOF { - // Case 1. Need to visit block[idx]. - if idx == len(itr.t.blockIndex) { - // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table. - // There's nothing we can do. Valid() should return false as we seek to end of table. - return - } - // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst. - itr.seekHelper(idx, key) - } - // Case 2: No need to do anything. We already did the seek in block[idx-1]. -} - -// seek will reset iterator and seek to >= key. -func (itr *Iterator) seek(key []byte) { - itr.seekFrom(key, origin) -} - -// seekForPrev will reset iterator and seek to <= key. -func (itr *Iterator) seekForPrev(key []byte) { - // TODO: Optimize this. We shouldn't have to take a Prev step. - itr.seekFrom(key, origin) - if !bytes.Equal(itr.Key(), key) { - itr.prev() - } -} - -func (itr *Iterator) next() { - itr.err = nil - - if itr.bpos >= len(itr.t.blockIndex) { - itr.err = io.EOF - return - } - - if itr.bi == nil { - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi = block.NewIterator() - itr.bi.SeekToFirst() - itr.err = itr.bi.Error() - return - } - - itr.bi.Next() - if !itr.bi.Valid() { - itr.bpos++ - itr.bi = nil - itr.next() - return - } -} - -func (itr *Iterator) prev() { - itr.err = nil - if itr.bpos < 0 { - itr.err = io.EOF - return - } - - if itr.bi == nil { - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi = block.NewIterator() - itr.bi.SeekToLast() - itr.err = itr.bi.Error() - return - } - - itr.bi.Prev() - if !itr.bi.Valid() { - itr.bpos-- - itr.bi = nil - itr.prev() - return - } -} - -// Key follows the y.Iterator interface. -// Returns the key with timestamp. -func (itr *Iterator) Key() []byte { - return itr.bi.Key() -} - -// Value follows the y.Iterator interface -func (itr *Iterator) Value() (ret y.ValueStruct) { - ret.Decode(itr.bi.Value()) - return -} - -// Next follows the y.Iterator interface -func (itr *Iterator) Next() { - if !itr.reversed { - itr.next() - } else { - itr.prev() - } -} - -// Rewind follows the y.Iterator interface -func (itr *Iterator) Rewind() { - if !itr.reversed { - itr.seekToFirst() - } else { - itr.seekToLast() - } -} - -// Seek follows the y.Iterator interface -func (itr *Iterator) Seek(key []byte) { - if !itr.reversed { - itr.seek(key) - } else { - itr.seekForPrev(key) - } -} - -// ConcatIterator concatenates the sequences defined by several iterators. (It only works with -// TableIterators, probably just because it's faster to not be so generic.) -type ConcatIterator struct { - idx int // Which iterator is active now. - cur *Iterator - iters []*Iterator // Corresponds to tables. - tables []*Table // Disregarding reversed, this is in ascending order. - reversed bool -} - -// NewConcatIterator creates a new concatenated iterator -func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator { - iters := make([]*Iterator, len(tbls)) - for i := 0; i < len(tbls); i++ { - // Increment the reference count. Since, we're not creating the iterator right now. - // Here, We'll hold the reference of the tables, till the lifecycle of the iterator. - tbls[i].IncrRef() - - // Save cycles by not initializing the iterators until needed. - // iters[i] = tbls[i].NewIterator(reversed) - } - return &ConcatIterator{ - reversed: reversed, - iters: iters, - tables: tbls, - idx: -1, // Not really necessary because s.it.Valid()=false, but good to have. - } -} - -func (s *ConcatIterator) setIdx(idx int) { - s.idx = idx - if idx < 0 || idx >= len(s.iters) { - s.cur = nil - return - } - if s.iters[idx] == nil { - s.iters[idx] = s.tables[idx].NewIterator(s.reversed) - } - s.cur = s.iters[s.idx] -} - -// Rewind implements y.Interface -func (s *ConcatIterator) Rewind() { - if len(s.iters) == 0 { - return - } - if !s.reversed { - s.setIdx(0) - } else { - s.setIdx(len(s.iters) - 1) - } - s.cur.Rewind() -} - -// Valid implements y.Interface -func (s *ConcatIterator) Valid() bool { - return s.cur != nil && s.cur.Valid() -} - -// Key implements y.Interface -func (s *ConcatIterator) Key() []byte { - return s.cur.Key() -} - -// Value implements y.Interface -func (s *ConcatIterator) Value() y.ValueStruct { - return s.cur.Value() -} - -// Seek brings us to element >= key if reversed is false. Otherwise, <= key. -func (s *ConcatIterator) Seek(key []byte) { - var idx int - if !s.reversed { - idx = sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(s.tables[i].Biggest(), key) >= 0 - }) - } else { - n := len(s.tables) - idx = n - 1 - sort.Search(n, func(i int) bool { - return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0 - }) - } - if idx >= len(s.tables) || idx < 0 { - s.setIdx(-1) - return - } - // For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the - // previous table cannot possibly contain key. - s.setIdx(idx) - s.cur.Seek(key) -} - -// Next advances our concat iterator. -func (s *ConcatIterator) Next() { - s.cur.Next() - if s.cur.Valid() { - // Nothing to do. Just stay with the current table. - return - } - for { // In case there are empty tables. - if !s.reversed { - s.setIdx(s.idx + 1) - } else { - s.setIdx(s.idx - 1) - } - if s.cur == nil { - // End of list. Valid will become false. - return - } - s.cur.Rewind() - if s.cur.Valid() { - break - } - } -} - -// Close implements y.Interface. -func (s *ConcatIterator) Close() error { - for _, t := range s.tables { - // DeReference the tables while closing the iterator. - if err := t.DecrRef(); err != nil { - return err - } - } - for _, it := range s.iters { - if it == nil { - continue - } - if err := it.Close(); err != nil { - return errors.Wrap(err, "ConcatIterator") - } - } - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/table/merge_iterator.go b/vendor/github.com/dgraph-io/badger/table/merge_iterator.go deleted file mode 100644 index cbecd849..00000000 --- a/vendor/github.com/dgraph-io/badger/table/merge_iterator.go +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "bytes" - - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" -) - -// MergeIterator merges multiple iterators. -// NOTE: MergeIterator owns the array of iterators and is responsible for closing them. -type MergeIterator struct { - left node - right node - small *node - - curKey []byte - reverse bool -} - -type node struct { - valid bool - key []byte - iter y.Iterator - - // The two iterators are type asserted from `y.Iterator`, used to inline more function calls. - // Calling functions on concrete types is much faster (about 25-30%) than calling the - // interface's function. - merge *MergeIterator - concat *ConcatIterator -} - -func (n *node) setIterator(iter y.Iterator) { - n.iter = iter - // It's okay if the type assertion below fails and n.merge/n.concat are set to nil. - // We handle the nil values of merge and concat in all the methods. - n.merge, _ = iter.(*MergeIterator) - n.concat, _ = iter.(*ConcatIterator) -} - -func (n *node) setKey() { - if n.merge != nil { - n.valid = n.merge.small.valid - if n.valid { - n.key = n.merge.small.key - } - } else if n.concat != nil { - n.valid = n.concat.Valid() - if n.valid { - n.key = n.concat.Key() - } - } else { - n.valid = n.iter.Valid() - if n.valid { - n.key = n.iter.Key() - } - } -} - -func (n *node) next() { - if n.merge != nil { - n.merge.Next() - } else if n.concat != nil { - n.concat.Next() - } else { - n.iter.Next() - } - n.setKey() -} - -func (n *node) rewind() { - n.iter.Rewind() - n.setKey() -} - -func (n *node) seek(key []byte) { - n.iter.Seek(key) - n.setKey() -} - -func (mi *MergeIterator) fix() { - if !mi.bigger().valid { - return - } - if !mi.small.valid { - mi.swapSmall() - return - } - cmp := y.CompareKeys(mi.small.key, mi.bigger().key) - // Both the keys are equal. - if cmp == 0 { - // In case of same keys, move the right iterator ahead. - mi.right.next() - if &mi.right == mi.small { - mi.swapSmall() - } - return - } else if cmp < 0 { // Small is less than bigger(). - if mi.reverse { - mi.swapSmall() - } else { - // we don't need to do anything. Small already points to the smallest. - } - return - } else { // bigger() is less than small. - if mi.reverse { - // Do nothing since we're iterating in reverse. Small currently points to - // the bigger key and that's okay in reverse iteration. - } else { - mi.swapSmall() - } - return - } -} - -func (mi *MergeIterator) bigger() *node { - if mi.small == &mi.left { - return &mi.right - } - return &mi.left -} - -func (mi *MergeIterator) swapSmall() { - if mi.small == &mi.left { - mi.small = &mi.right - return - } - if mi.small == &mi.right { - mi.small = &mi.left - return - } -} - -// Next returns the next element. If it is the same as the current key, ignore it. -func (mi *MergeIterator) Next() { - for mi.Valid() { - if !bytes.Equal(mi.small.key, mi.curKey) { - break - } - mi.small.next() - mi.fix() - } - mi.setCurrent() -} - -func (mi *MergeIterator) setCurrent() { - mi.curKey = append(mi.curKey[:0], mi.small.key...) -} - -// Rewind seeks to first element (or last element for reverse iterator). -func (mi *MergeIterator) Rewind() { - mi.left.rewind() - mi.right.rewind() - mi.fix() - mi.setCurrent() -} - -// Seek brings us to element with key >= given key. -func (mi *MergeIterator) Seek(key []byte) { - mi.left.seek(key) - mi.right.seek(key) - mi.fix() - mi.setCurrent() -} - -// Valid returns whether the MergeIterator is at a valid element. -func (mi *MergeIterator) Valid() bool { - return mi.small.valid -} - -// Key returns the key associated with the current iterator. -func (mi *MergeIterator) Key() []byte { - return mi.small.key -} - -// Value returns the value associated with the iterator. -func (mi *MergeIterator) Value() y.ValueStruct { - return mi.small.iter.Value() -} - -// Close implements y.Iterator. -func (mi *MergeIterator) Close() error { - err1 := mi.left.iter.Close() - err2 := mi.right.iter.Close() - if err1 != nil { - return errors.Wrap(err1, "MergeIterator") - } - return errors.Wrap(err2, "MergeIterator") -} - -// NewMergeIterator creates a merge iterator. -func NewMergeIterator(iters []y.Iterator, reverse bool) y.Iterator { - if len(iters) == 0 { - return nil - } else if len(iters) == 1 { - return iters[0] - } else if len(iters) == 2 { - mi := &MergeIterator{ - reverse: reverse, - } - mi.left.setIterator(iters[0]) - mi.right.setIterator(iters[1]) - // Assign left iterator randomly. This will be fixed when user calls rewind/seek. - mi.small = &mi.left - return mi - } - mid := len(iters) / 2 - return NewMergeIterator( - []y.Iterator{ - NewMergeIterator(iters[:mid], reverse), - NewMergeIterator(iters[mid:], reverse), - }, reverse) -} diff --git a/vendor/github.com/dgraph-io/badger/table/table.go b/vendor/github.com/dgraph-io/badger/table/table.go deleted file mode 100644 index 9bc41787..00000000 --- a/vendor/github.com/dgraph-io/badger/table/table.go +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "bytes" - "crypto/sha256" - "encoding/binary" - "fmt" - "io" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - - "github.com/AndreasBriese/bbloom" - "github.com/dgraph-io/badger/options" - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" -) - -const fileSuffix = ".sst" - -type keyOffset struct { - key []byte - offset int - len int -} - -// TableInterface is useful for testing. -type TableInterface interface { - Smallest() []byte - Biggest() []byte - DoesNotHave(key []byte) bool -} - -// Table represents a loaded table file with the info we have about it -type Table struct { - sync.Mutex - - fd *os.File // Own fd. - tableSize int // Initialized in OpenTable, using fd.Stat(). - - blockIndex []keyOffset - ref int32 // For file garbage collection. Atomic. - - loadingMode options.FileLoadingMode - mmap []byte // Memory mapped. - - // The following are initialized once and const. - smallest, biggest []byte // Smallest and largest keys (with timestamps). - id uint64 // file id, part of filename - - bf bbloom.Bloom - - Checksum []byte -} - -// IncrRef increments the refcount (having to do with whether the file should be deleted) -func (t *Table) IncrRef() { - atomic.AddInt32(&t.ref, 1) -} - -// DecrRef decrements the refcount and possibly deletes the table -func (t *Table) DecrRef() error { - newRef := atomic.AddInt32(&t.ref, -1) - if newRef == 0 { - // We can safely delete this file, because for all the current files, we always have - // at least one reference pointing to them. - - // It's necessary to delete windows files - if t.loadingMode == options.MemoryMap { - if err := y.Munmap(t.mmap); err != nil { - return err - } - t.mmap = nil - } - if err := t.fd.Truncate(0); err != nil { - // This is very important to let the FS know that the file is deleted. - return err - } - filename := t.fd.Name() - if err := t.fd.Close(); err != nil { - return err - } - if err := os.Remove(filename); err != nil { - return err - } - } - return nil -} - -type block struct { - offset int - data []byte -} - -func (b block) NewIterator() *blockIterator { - return &blockIterator{data: b.data} -} - -// OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function -// entry. Returns a table with one reference count on it (decrementing which may delete the file! -// -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before -// deleting. -func OpenTable(fd *os.File, mode options.FileLoadingMode, cksum []byte) (*Table, error) { - fileInfo, err := fd.Stat() - if err != nil { - // It's OK to ignore fd.Close() errs in this function because we have only read - // from the file. - _ = fd.Close() - return nil, y.Wrap(err) - } - - filename := fileInfo.Name() - id, ok := ParseFileID(filename) - if !ok { - _ = fd.Close() - return nil, errors.Errorf("Invalid filename: %s", filename) - } - t := &Table{ - fd: fd, - ref: 1, // Caller is given one reference. - id: id, - loadingMode: mode, - } - - t.tableSize = int(fileInfo.Size()) - - // We first load to RAM, so we can read the index and do checksum. - if err := t.loadToRAM(); err != nil { - return nil, err - } - // Enforce checksum before we read index. Otherwise, if the file was - // truncated, we'd end up with panics in readIndex. - if len(cksum) > 0 && !bytes.Equal(t.Checksum, cksum) { - return nil, fmt.Errorf( - "CHECKSUM_MISMATCH: Table checksum does not match checksum in MANIFEST."+ - " NOT including table %s. This would lead to missing data."+ - "\n sha256 %x Expected\n sha256 %x Found\n", filename, cksum, t.Checksum) - } - if err := t.readIndex(); err != nil { - return nil, y.Wrap(err) - } - - it := t.NewIterator(false) - defer it.Close() - it.Rewind() - if it.Valid() { - t.smallest = it.Key() - } - - it2 := t.NewIterator(true) - defer it2.Close() - it2.Rewind() - if it2.Valid() { - t.biggest = it2.Key() - } - - switch mode { - case options.LoadToRAM: - // No need to do anything. t.mmap is already filled. - case options.MemoryMap: - t.mmap, err = y.Mmap(fd, false, fileInfo.Size()) - if err != nil { - _ = fd.Close() - return nil, y.Wrapf(err, "Unable to map file: %q", fileInfo.Name()) - } - case options.FileIO: - t.mmap = nil - default: - panic(fmt.Sprintf("Invalid loading mode: %v", mode)) - } - return t, nil -} - -// Close closes the open table. (Releases resources back to the OS.) -func (t *Table) Close() error { - if t.loadingMode == options.MemoryMap { - if err := y.Munmap(t.mmap); err != nil { - return err - } - t.mmap = nil - } - - return t.fd.Close() -} - -func (t *Table) read(off, sz int) ([]byte, error) { - if len(t.mmap) > 0 { - if len(t.mmap[off:]) < sz { - return nil, y.ErrEOF - } - return t.mmap[off : off+sz], nil - } - - res := make([]byte, sz) - nbr, err := t.fd.ReadAt(res, int64(off)) - y.NumReads.Add(1) - y.NumBytesRead.Add(int64(nbr)) - return res, err -} - -func (t *Table) readNoFail(off, sz int) []byte { - res, err := t.read(off, sz) - y.Check(err) - return res -} - -func (t *Table) readIndex() error { - if len(t.mmap) != t.tableSize { - panic("Table size does not match the read bytes") - } - readPos := t.tableSize - - // Read bloom filter. - readPos -= 4 - buf := t.readNoFail(readPos, 4) - bloomLen := int(binary.BigEndian.Uint32(buf)) - readPos -= bloomLen - data := t.readNoFail(readPos, bloomLen) - t.bf = bbloom.JSONUnmarshal(data) - - readPos -= 4 - buf = t.readNoFail(readPos, 4) - restartsLen := int(binary.BigEndian.Uint32(buf)) - - readPos -= 4 * restartsLen - buf = t.readNoFail(readPos, 4*restartsLen) - - offsets := make([]int, restartsLen) - for i := 0; i < restartsLen; i++ { - offsets[i] = int(binary.BigEndian.Uint32(buf[:4])) - buf = buf[4:] - } - - // The last offset stores the end of the last block. - for i := 0; i < len(offsets); i++ { - var o int - if i == 0 { - o = 0 - } else { - o = offsets[i-1] - } - - ko := keyOffset{ - offset: o, - len: offsets[i] - o, - } - t.blockIndex = append(t.blockIndex, ko) - } - - // Execute this index read serially, because we already have table data in memory. - var h header - for idx := range t.blockIndex { - ko := &t.blockIndex[idx] - - hbuf := t.readNoFail(ko.offset, h.Size()) - h.Decode(hbuf) - y.AssertTrue(h.plen == 0) - - key := t.readNoFail(ko.offset+len(hbuf), int(h.klen)) - ko.key = append([]byte{}, key...) - } - - return nil -} - -func (t *Table) block(idx int) (block, error) { - y.AssertTruef(idx >= 0, "idx=%d", idx) - if idx >= len(t.blockIndex) { - return block{}, errors.New("block out of index") - } - - ko := t.blockIndex[idx] - blk := block{ - offset: ko.offset, - } - var err error - blk.data, err = t.read(blk.offset, ko.len) - return blk, err -} - -// Size is its file size in bytes -func (t *Table) Size() int64 { return int64(t.tableSize) } - -// Smallest is its smallest key, or nil if there are none -func (t *Table) Smallest() []byte { return t.smallest } - -// Biggest is its biggest key, or nil if there are none -func (t *Table) Biggest() []byte { return t.biggest } - -// Filename is NOT the file name. Just kidding, it is. -func (t *Table) Filename() string { return t.fd.Name() } - -// ID is the table's ID number (used to make the file name). -func (t *Table) ID() uint64 { return t.id } - -// DoesNotHave returns true if (but not "only if") the table does not have the key. It does a -// bloom filter lookup. -func (t *Table) DoesNotHave(key []byte) bool { return !t.bf.Has(key) } - -// ParseFileID reads the file id out of a filename. -func ParseFileID(name string) (uint64, bool) { - name = path.Base(name) - if !strings.HasSuffix(name, fileSuffix) { - return 0, false - } - // suffix := name[len(fileSuffix):] - name = strings.TrimSuffix(name, fileSuffix) - id, err := strconv.Atoi(name) - if err != nil { - return 0, false - } - y.AssertTrue(id >= 0) - return uint64(id), true -} - -// IDToFilename does the inverse of ParseFileID -func IDToFilename(id uint64) string { - return fmt.Sprintf("%06d", id) + fileSuffix -} - -// NewFilename should be named TableFilepath -- it combines the dir with the ID to make a table -// filepath. -func NewFilename(id uint64, dir string) string { - return filepath.Join(dir, IDToFilename(id)) -} - -func (t *Table) loadToRAM() error { - if _, err := t.fd.Seek(0, io.SeekStart); err != nil { - return err - } - t.mmap = make([]byte, t.tableSize) - sum := sha256.New() - tee := io.TeeReader(t.fd, sum) - read, err := tee.Read(t.mmap) - if err != nil || read != t.tableSize { - return y.Wrapf(err, "Unable to load file in memory. Table file: %s", t.Filename()) - } - t.Checksum = sum.Sum(nil) - y.NumReads.Add(1) - y.NumBytesRead.Add(int64(read)) - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/test.sh b/vendor/github.com/dgraph-io/badger/test.sh deleted file mode 100644 index 6a68553e..00000000 --- a/vendor/github.com/dgraph-io/badger/test.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -set -e - -go version - -packages=$(go list ./... | grep github.com/dgraph-io/badger/) - -if [[ ! -z "$TEAMCITY_VERSION" ]]; then - export GOFLAGS="-json" -fi - -# Ensure that we can compile the binary. -pushd badger -go build -v . -popd - -# Run the memory intensive tests first. -go test -v -run='TestBigKeyValuePairs$' --manual=true -go test -v -run='TestPushValueLogLimit' --manual=true - -# Run the special Truncate test. -rm -rf p -go test -v -run='TestTruncateVlogNoClose$' --manual=true -truncate --size=4096 p/000000.vlog -go test -v -run='TestTruncateVlogNoClose2$' --manual=true -go test -v -run='TestTruncateVlogNoClose3$' --manual=true -rm -rf p - -# Then the normal tests. -echo -echo "==> Starting test for table, skl and y package" -go test -v -race github.com/dgraph-io/badger/skl -# Run test for all package except the top level package. The top level package support the -# `vlog_mmap` flag which rest of the packages don't support. -go test -v -race $packages - -echo -echo "==> Starting tests with value log mmapped..." -# Run top level package tests with mmap flag. -go test -v -race github.com/dgraph-io/badger --vlog_mmap=true - -echo -echo "==> Starting tests with value log not mmapped..." -go test -v -race github.com/dgraph-io/badger --vlog_mmap=false - diff --git a/vendor/github.com/dgraph-io/badger/trie/trie.go b/vendor/github.com/dgraph-io/badger/trie/trie.go deleted file mode 100644 index 98e4a9dc..00000000 --- a/vendor/github.com/dgraph-io/badger/trie/trie.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package trie - -type node struct { - children map[byte]*node - ids []uint64 -} - -func newNode() *node { - return &node{ - children: make(map[byte]*node), - ids: []uint64{}, - } -} - -// Trie datastructure. -type Trie struct { - root *node -} - -// NewTrie returns Trie. -func NewTrie() *Trie { - return &Trie{ - root: newNode(), - } -} - -// Add adds the id in the trie for the given prefix path. -func (t *Trie) Add(prefix []byte, id uint64) { - node := t.root - for _, val := range prefix { - child, ok := node.children[val] - if !ok { - child = newNode() - node.children[val] = child - } - node = child - } - // We only need to add the id to the last node of the given prefix. - node.ids = append(node.ids, id) -} - -// Get returns prefix matched ids for the given key. -func (t *Trie) Get(key []byte) map[uint64]struct{} { - out := make(map[uint64]struct{}) - node := t.root - // If root has ids that means we have subscribers for "nil/[]byte{}" - // prefix. Add them to the list. - if len(node.ids) > 0 { - for _, i := range node.ids { - out[i] = struct{}{} - } - } - for _, val := range key { - child, ok := node.children[val] - if !ok { - break - } - // We need ids of the all the node in the matching key path. - for _, id := range child.ids { - out[id] = struct{}{} - } - node = child - } - return out -} - -// Delete will delete the id if the id exist in the given index path. -func (t *Trie) Delete(index []byte, id uint64) { - node := t.root - for _, val := range index { - child, ok := node.children[val] - if !ok { - return - } - node = child - } - // We're just removing the id not the hanging path. - out := node.ids[:0] - for _, val := range node.ids { - if val != id { - out = append(out, val) - } - } - for i := len(out); i < len(node.ids); i++ { - node.ids[i] = 0 // garbage collecting - } - node.ids = out -} diff --git a/vendor/github.com/dgraph-io/badger/txn.go b/vendor/github.com/dgraph-io/badger/txn.go deleted file mode 100644 index 1c635386..00000000 --- a/vendor/github.com/dgraph-io/badger/txn.go +++ /dev/null @@ -1,701 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "context" - "encoding/hex" - "math" - "sort" - "strconv" - "sync" - "sync/atomic" - - "github.com/dgraph-io/badger/y" - "github.com/dgraph-io/ristretto/z" - "github.com/pkg/errors" -) - -type oracle struct { - // A 64-bit integer must be at the top for memory alignment. See issue #311. - refCount int64 - isManaged bool // Does not change value, so no locking required. - - sync.Mutex // For nextTxnTs and commits. - // writeChLock lock is for ensuring that transactions go to the write - // channel in the same order as their commit timestamps. - writeChLock sync.Mutex - nextTxnTs uint64 - - // Used to block NewTransaction, so all previous commits are visible to a new read. - txnMark *y.WaterMark - - // Either of these is used to determine which versions can be permanently - // discarded during compaction. - discardTs uint64 // Used by ManagedDB. - readMark *y.WaterMark // Used by DB. - - // commits stores a key fingerprint and latest commit counter for it. - // refCount is used to clear out commits map to avoid a memory blowup. - commits map[uint64]uint64 - - // closer is used to stop watermarks. - closer *y.Closer -} - -func newOracle(opt Options) *oracle { - orc := &oracle{ - isManaged: opt.managedTxns, - commits: make(map[uint64]uint64), - // We're not initializing nextTxnTs and readOnlyTs. It would be done after replay in Open. - // - // WaterMarks must be 64-bit aligned for atomic package, hence we must use pointers here. - // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - readMark: &y.WaterMark{Name: "badger.PendingReads"}, - txnMark: &y.WaterMark{Name: "badger.TxnTimestamp"}, - closer: y.NewCloser(2), - } - orc.readMark.Init(orc.closer, opt.EventLogging) - orc.txnMark.Init(orc.closer, opt.EventLogging) - return orc -} - -func (o *oracle) Stop() { - o.closer.SignalAndWait() -} - -func (o *oracle) addRef() { - atomic.AddInt64(&o.refCount, 1) -} - -func (o *oracle) decrRef() { - if atomic.AddInt64(&o.refCount, -1) != 0 { - return - } - - // Clear out commits maps to release memory. - o.Lock() - defer o.Unlock() - // Avoids the race where something new is added to commitsMap - // after we check refCount and before we take Lock. - if atomic.LoadInt64(&o.refCount) != 0 { - return - } - if len(o.commits) >= 1000 { // If the map is still small, let it slide. - o.commits = make(map[uint64]uint64) - } -} - -func (o *oracle) readTs() uint64 { - if o.isManaged { - panic("ReadTs should not be retrieved for managed DB") - } - - var readTs uint64 - o.Lock() - readTs = o.nextTxnTs - 1 - o.readMark.Begin(readTs) - o.Unlock() - - // Wait for all txns which have no conflicts, have been assigned a commit - // timestamp and are going through the write to value log and LSM tree - // process. Not waiting here could mean that some txns which have been - // committed would not be read. - y.Check(o.txnMark.WaitForMark(context.Background(), readTs)) - return readTs -} - -func (o *oracle) nextTs() uint64 { - o.Lock() - defer o.Unlock() - return o.nextTxnTs -} - -func (o *oracle) incrementNextTs() { - o.Lock() - defer o.Unlock() - o.nextTxnTs++ -} - -// Any deleted or invalid versions at or below ts would be discarded during -// compaction to reclaim disk space in LSM tree and thence value log. -func (o *oracle) setDiscardTs(ts uint64) { - o.Lock() - defer o.Unlock() - o.discardTs = ts -} - -func (o *oracle) discardAtOrBelow() uint64 { - if o.isManaged { - o.Lock() - defer o.Unlock() - return o.discardTs - } - return o.readMark.DoneUntil() -} - -// hasConflict must be called while having a lock. -func (o *oracle) hasConflict(txn *Txn) bool { - if len(txn.reads) == 0 { - return false - } - for _, ro := range txn.reads { - // A commit at the read timestamp is expected. - // But, any commit after the read timestamp should cause a conflict. - if ts, has := o.commits[ro]; has && ts > txn.readTs { - return true - } - } - return false -} - -func (o *oracle) newCommitTs(txn *Txn) uint64 { - o.Lock() - defer o.Unlock() - - if o.hasConflict(txn) { - return 0 - } - - var ts uint64 - if !o.isManaged { - // This is the general case, when user doesn't specify the read and commit ts. - ts = o.nextTxnTs - o.nextTxnTs++ - o.txnMark.Begin(ts) - - } else { - // If commitTs is set, use it instead. - ts = txn.commitTs - } - - for _, w := range txn.writes { - o.commits[w] = ts // Update the commitTs. - } - return ts -} - -func (o *oracle) doneCommit(cts uint64) { - if o.isManaged { - // No need to update anything. - return - } - o.txnMark.Done(cts) -} - -// Txn represents a Badger transaction. -type Txn struct { - readTs uint64 - commitTs uint64 - - update bool // update is used to conditionally keep track of reads. - reads []uint64 // contains fingerprints of keys read. - writes []uint64 // contains fingerprints of keys written. - - pendingWrites map[string]*Entry // cache stores any writes done by txn. - - db *DB - discarded bool - - size int64 - count int64 - numIterators int32 -} - -type pendingWritesIterator struct { - entries []*Entry - nextIdx int - readTs uint64 - reversed bool -} - -func (pi *pendingWritesIterator) Next() { - pi.nextIdx++ -} - -func (pi *pendingWritesIterator) Rewind() { - pi.nextIdx = 0 -} - -func (pi *pendingWritesIterator) Seek(key []byte) { - key = y.ParseKey(key) - pi.nextIdx = sort.Search(len(pi.entries), func(idx int) bool { - cmp := bytes.Compare(pi.entries[idx].Key, key) - if !pi.reversed { - return cmp >= 0 - } - return cmp <= 0 - }) -} - -func (pi *pendingWritesIterator) Key() []byte { - y.AssertTrue(pi.Valid()) - entry := pi.entries[pi.nextIdx] - return y.KeyWithTs(entry.Key, pi.readTs) -} - -func (pi *pendingWritesIterator) Value() y.ValueStruct { - y.AssertTrue(pi.Valid()) - entry := pi.entries[pi.nextIdx] - return y.ValueStruct{ - Value: entry.Value, - Meta: entry.meta, - UserMeta: entry.UserMeta, - ExpiresAt: entry.ExpiresAt, - Version: pi.readTs, - } -} - -func (pi *pendingWritesIterator) Valid() bool { - return pi.nextIdx < len(pi.entries) -} - -func (pi *pendingWritesIterator) Close() error { - return nil -} - -func (txn *Txn) newPendingWritesIterator(reversed bool) *pendingWritesIterator { - if !txn.update || len(txn.pendingWrites) == 0 { - return nil - } - entries := make([]*Entry, 0, len(txn.pendingWrites)) - for _, e := range txn.pendingWrites { - entries = append(entries, e) - } - // Number of pending writes per transaction shouldn't be too big in general. - sort.Slice(entries, func(i, j int) bool { - cmp := bytes.Compare(entries[i].Key, entries[j].Key) - if !reversed { - return cmp < 0 - } - return cmp > 0 - }) - return &pendingWritesIterator{ - readTs: txn.readTs, - entries: entries, - reversed: reversed, - } -} - -func (txn *Txn) checkSize(e *Entry) error { - count := txn.count + 1 - // Extra bytes for version in key. - size := txn.size + int64(e.estimateSize(txn.db.opt.ValueThreshold)) + 10 - if count >= txn.db.opt.maxBatchCount || size >= txn.db.opt.maxBatchSize { - return ErrTxnTooBig - } - txn.count, txn.size = count, size - return nil -} - -func exceedsSize(prefix string, max int64, key []byte) error { - return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s", - prefix, len(key), max, prefix, hex.Dump(key[:1<<10])) -} - -func (txn *Txn) modify(e *Entry) error { - const maxKeySize = 65000 - - switch { - case !txn.update: - return ErrReadOnlyTxn - case txn.discarded: - return ErrDiscardedTxn - case len(e.Key) == 0: - return ErrEmptyKey - case bytes.HasPrefix(e.Key, badgerPrefix): - return ErrInvalidKey - case len(e.Key) > maxKeySize: - // Key length can't be more than uint16, as determined by table::header. To - // keep things safe and allow badger move prefix and a timestamp suffix, let's - // cut it down to 65000, instead of using 65536. - return exceedsSize("Key", maxKeySize, e.Key) - case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize: - return exceedsSize("Value", txn.db.opt.ValueLogFileSize, e.Value) - } - - if err := txn.checkSize(e); err != nil { - return err - } - fp := z.MemHash(e.Key) // Avoid dealing with byte arrays. - txn.writes = append(txn.writes, fp) - txn.pendingWrites[string(e.Key)] = e - return nil -} - -// Set adds a key-value pair to the database. -// It will return ErrReadOnlyTxn if update flag was set to false when creating the transaction. -// -// The current transaction keeps a reference to the key and val byte slice -// arguments. Users must not modify key and val until the end of the transaction. -func (txn *Txn) Set(key, val []byte) error { - return txn.SetEntry(NewEntry(key, val)) -} - -// SetEntry takes an Entry struct and adds the key-value pair in the struct, -// along with other metadata to the database. -// -// The current transaction keeps a reference to the entry passed in argument. -// Users must not modify the entry until the end of the transaction. -func (txn *Txn) SetEntry(e *Entry) error { - return txn.modify(e) -} - -// Delete deletes a key. -// -// This is done by adding a delete marker for the key at commit timestamp. Any -// reads happening before this timestamp would be unaffected. Any reads after -// this commit would see the deletion. -// -// The current transaction keeps a reference to the key byte slice argument. -// Users must not modify the key until the end of the transaction. -func (txn *Txn) Delete(key []byte) error { - e := &Entry{ - Key: key, - meta: bitDelete, - } - return txn.modify(e) -} - -// Get looks for key and returns corresponding Item. -// If key is not found, ErrKeyNotFound is returned. -func (txn *Txn) Get(key []byte) (item *Item, rerr error) { - if len(key) == 0 { - return nil, ErrEmptyKey - } else if txn.discarded { - return nil, ErrDiscardedTxn - } - - item = new(Item) - if txn.update { - if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) { - if isDeletedOrExpired(e.meta, e.ExpiresAt) { - return nil, ErrKeyNotFound - } - // Fulfill from cache. - item.meta = e.meta - item.val = e.Value - item.userMeta = e.UserMeta - item.key = key - item.status = prefetched - item.version = txn.readTs - item.expiresAt = e.ExpiresAt - // We probably don't need to set db on item here. - return item, nil - } - // Only track reads if this is update txn. No need to track read if txn serviced it - // internally. - txn.addReadKey(key) - } - - seek := y.KeyWithTs(key, txn.readTs) - vs, err := txn.db.get(seek) - if err != nil { - return nil, errors.Wrapf(err, "DB::Get key: %q", key) - } - if vs.Value == nil && vs.Meta == 0 { - return nil, ErrKeyNotFound - } - if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { - return nil, ErrKeyNotFound - } - - item.key = key - item.version = vs.Version - item.meta = vs.Meta - item.userMeta = vs.UserMeta - item.db = txn.db - item.vptr = vs.Value // TODO: Do we need to copy this over? - item.txn = txn - item.expiresAt = vs.ExpiresAt - return item, nil -} - -func (txn *Txn) addReadKey(key []byte) { - if txn.update { - fp := z.MemHash(key) - txn.reads = append(txn.reads, fp) - } -} - -// Discard discards a created transaction. This method is very important and must be called. Commit -// method calls this internally, however, calling this multiple times doesn't cause any issues. So, -// this can safely be called via a defer right when transaction is created. -// -// NOTE: If any operations are run on a discarded transaction, ErrDiscardedTxn is returned. -func (txn *Txn) Discard() { - if txn.discarded { // Avoid a re-run. - return - } - if atomic.LoadInt32(&txn.numIterators) > 0 { - panic("Unclosed iterator at time of Txn.Discard.") - } - txn.discarded = true - if !txn.db.orc.isManaged { - txn.db.orc.readMark.Done(txn.readTs) - } - if txn.update { - txn.db.orc.decrRef() - } -} - -func (txn *Txn) commitAndSend() (func() error, error) { - orc := txn.db.orc - // Ensure that the order in which we get the commit timestamp is the same as - // the order in which we push these updates to the write channel. So, we - // acquire a writeChLock before getting a commit timestamp, and only release - // it after pushing the entries to it. - orc.writeChLock.Lock() - defer orc.writeChLock.Unlock() - - commitTs := orc.newCommitTs(txn) - if commitTs == 0 { - return nil, ErrConflict - } - - // The following debug information is what led to determining the cause of - // bank txn violation bug, and it took a whole bunch of effort to narrow it - // down to here. So, keep this around for at least a couple of months. - // var b strings.Builder - // fmt.Fprintf(&b, "Read: %d. Commit: %d. reads: %v. writes: %v. Keys: ", - // txn.readTs, commitTs, txn.reads, txn.writes) - entries := make([]*Entry, 0, len(txn.pendingWrites)+1) - for _, e := range txn.pendingWrites { - // fmt.Fprintf(&b, "[%q : %q], ", e.Key, e.Value) - - // Suffix the keys with commit ts, so the key versions are sorted in - // descending order of commit timestamp. - e.Key = y.KeyWithTs(e.Key, commitTs) - e.meta |= bitTxn - entries = append(entries, e) - } - // log.Printf("%s\n", b.String()) - e := &Entry{ - Key: y.KeyWithTs(txnKey, commitTs), - Value: []byte(strconv.FormatUint(commitTs, 10)), - meta: bitFinTxn, - } - entries = append(entries, e) - - req, err := txn.db.sendToWriteCh(entries) - if err != nil { - orc.doneCommit(commitTs) - return nil, err - } - ret := func() error { - err := req.Wait() - // Wait before marking commitTs as done. - // We can't defer doneCommit above, because it is being called from a - // callback here. - orc.doneCommit(commitTs) - return err - } - return ret, nil -} - -func (txn *Txn) commitPrecheck() { - if txn.commitTs == 0 && txn.db.opt.managedTxns { - panic("Commit cannot be called with managedDB=true. Use CommitAt.") - } - if txn.discarded { - panic("Trying to commit a discarded txn") - } -} - -// Commit commits the transaction, following these steps: -// -// 1. If there are no writes, return immediately. -// -// 2. Check if read rows were updated since txn started. If so, return ErrConflict. -// -// 3. If no conflict, generate a commit timestamp and update written rows' commit ts. -// -// 4. Batch up all writes, write them to value log and LSM tree. -// -// 5. If callback is provided, Badger will return immediately after checking -// for conflicts. Writes to the database will happen in the background. If -// there is a conflict, an error will be returned and the callback will not -// run. If there are no conflicts, the callback will be called in the -// background upon successful completion of writes or any error during write. -// -// If error is nil, the transaction is successfully committed. In case of a non-nil error, the LSM -// tree won't be updated, so there's no need for any rollback. -func (txn *Txn) Commit() error { - txn.commitPrecheck() // Precheck before discarding txn. - defer txn.Discard() - - if len(txn.writes) == 0 { - return nil // Nothing to do. - } - - txnCb, err := txn.commitAndSend() - if err != nil { - return err - } - // If batchSet failed, LSM would not have been updated. So, no need to rollback anything. - - // TODO: What if some of the txns successfully make it to value log, but others fail. - // Nothing gets updated to LSM, until a restart happens. - return txnCb() -} - -type txnCb struct { - commit func() error - user func(error) - err error -} - -func runTxnCallback(cb *txnCb) { - switch { - case cb == nil: - panic("txn callback is nil") - case cb.user == nil: - panic("Must have caught a nil callback for txn.CommitWith") - case cb.err != nil: - cb.user(cb.err) - case cb.commit != nil: - err := cb.commit() - cb.user(err) - default: - cb.user(nil) - } -} - -// CommitWith acts like Commit, but takes a callback, which gets run via a -// goroutine to avoid blocking this function. The callback is guaranteed to run, -// so it is safe to increment sync.WaitGroup before calling CommitWith, and -// decrementing it in the callback; to block until all callbacks are run. -func (txn *Txn) CommitWith(cb func(error)) { - txn.commitPrecheck() // Precheck before discarding txn. - defer txn.Discard() - - if cb == nil { - panic("Nil callback provided to CommitWith") - } - - if len(txn.writes) == 0 { - // Do not run these callbacks from here, because the CommitWith and the - // callback might be acquiring the same locks. Instead run the callback - // from another goroutine. - go runTxnCallback(&txnCb{user: cb, err: nil}) - return - } - - commitCb, err := txn.commitAndSend() - if err != nil { - go runTxnCallback(&txnCb{user: cb, err: err}) - return - } - - go runTxnCallback(&txnCb{user: cb, commit: commitCb}) -} - -// ReadTs returns the read timestamp of the transaction. -func (txn *Txn) ReadTs() uint64 { - return txn.readTs -} - -// NewTransaction creates a new transaction. Badger supports concurrent execution of transactions, -// providing serializable snapshot isolation, avoiding write skews. Badger achieves this by tracking -// the keys read and at Commit time, ensuring that these read keys weren't concurrently modified by -// another transaction. -// -// For read-only transactions, set update to false. In this mode, we don't track the rows read for -// any changes. Thus, any long running iterations done in this mode wouldn't pay this overhead. -// -// Running transactions concurrently is OK. However, a transaction itself isn't thread safe, and -// should only be run serially. It doesn't matter if a transaction is created by one goroutine and -// passed down to other, as long as the Txn APIs are called serially. -// -// When you create a new transaction, it is absolutely essential to call -// Discard(). This should be done irrespective of what the update param is set -// to. Commit API internally runs Discard, but running it twice wouldn't cause -// any issues. -// -// txn := db.NewTransaction(false) -// defer txn.Discard() -// // Call various APIs. -func (db *DB) NewTransaction(update bool) *Txn { - return db.newTransaction(update, false) -} - -func (db *DB) newTransaction(update, isManaged bool) *Txn { - if db.opt.ReadOnly && update { - // DB is read-only, force read-only transaction. - update = false - } - - txn := &Txn{ - update: update, - db: db, - count: 1, // One extra entry for BitFin. - size: int64(len(txnKey) + 10), // Some buffer for the extra entry. - } - if update { - txn.pendingWrites = make(map[string]*Entry) - txn.db.orc.addRef() - } - // It is important that the oracle addRef happens BEFORE we retrieve a read - // timestamp. Otherwise, it is possible that the oracle commit map would - // become nil after we get the read timestamp. - // The sequence of events can be: - // 1. This txn gets a read timestamp. - // 2. Another txn working on the same keyset commits them, and decrements - // the reference to oracle. - // 3. Oracle ref reaches zero, resetting commit map. - // 4. This txn increments the oracle reference. - // 5. Now this txn would go on to commit the keyset, and no conflicts - // would be detected. - // See issue: https://github.com/dgraph-io/badger/issues/574 - if !isManaged { - txn.readTs = db.orc.readTs() - } - return txn -} - -// View executes a function creating and managing a read-only transaction for the user. Error -// returned by the function is relayed by the View method. -// If View is used with managed transactions, it would assume a read timestamp of MaxUint64. -func (db *DB) View(fn func(txn *Txn) error) error { - var txn *Txn - if db.opt.managedTxns { - txn = db.NewTransactionAt(math.MaxUint64, false) - } else { - txn = db.NewTransaction(false) - } - defer txn.Discard() - - return fn(txn) -} - -// Update executes a function, creating and managing a read-write transaction -// for the user. Error returned by the function is relayed by the Update method. -// Update cannot be used with managed transactions. -func (db *DB) Update(fn func(txn *Txn) error) error { - if db.opt.managedTxns { - panic("Update can only be used with managedDB=false.") - } - txn := db.NewTransaction(true) - defer txn.Discard() - - if err := fn(txn); err != nil { - return err - } - - return txn.Commit() -} diff --git a/vendor/github.com/dgraph-io/badger/util.go b/vendor/github.com/dgraph-io/badger/util.go deleted file mode 100644 index 2726b7ad..00000000 --- a/vendor/github.com/dgraph-io/badger/util.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "encoding/hex" - "io/ioutil" - "math/rand" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/table" - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" -) - -func (s *levelsController) validate() error { - for _, l := range s.levels { - if err := l.validate(); err != nil { - return errors.Wrap(err, "Levels Controller") - } - } - return nil -} - -// Check does some sanity check on one level of data or in-memory index. -func (s *levelHandler) validate() error { - if s.level == 0 { - return nil - } - - s.RLock() - defer s.RUnlock() - numTables := len(s.tables) - for j := 1; j < numTables; j++ { - if j >= len(s.tables) { - return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables) - } - - if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 { - return errors.Errorf( - "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d", - hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()), - s.level, j, numTables) - } - - if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 { - return errors.Errorf( - "Intra: \n%s\n vs \n%s\n: level=%d j=%d numTables=%d", - hex.Dump(s.tables[j].Smallest()), hex.Dump(s.tables[j].Biggest()), s.level, j, numTables) - } - } - return nil -} - -// func (s *KV) debugPrintMore() { s.lc.debugPrintMore() } - -// // debugPrintMore shows key ranges of each level. -// func (s *levelsController) debugPrintMore() { -// s.Lock() -// defer s.Unlock() -// for i := 0; i < s.kv.opt.MaxLevels; i++ { -// s.levels[i].debugPrintMore() -// } -// } - -// func (s *levelHandler) debugPrintMore() { -// s.RLock() -// defer s.RUnlock() -// s.elog.Printf("Level %d:", s.level) -// for _, t := range s.tables { -// y.Printf(" [%s, %s]", t.Smallest(), t.Biggest()) -// } -// y.Printf("\n") -// } - -// reserveFileID reserves a unique file id. -func (s *levelsController) reserveFileID() uint64 { - id := atomic.AddUint64(&s.nextFileID, 1) - return id - 1 -} - -func getIDMap(dir string) map[uint64]struct{} { - fileInfos, err := ioutil.ReadDir(dir) - y.Check(err) - idMap := make(map[uint64]struct{}) - for _, info := range fileInfos { - if info.IsDir() { - continue - } - fileID, ok := table.ParseFileID(info.Name()) - if !ok { - continue - } - idMap[fileID] = struct{}{} - } - return idMap -} - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/.deepsource.toml b/vendor/github.com/dgraph-io/badger/v2/.deepsource.toml deleted file mode 100644 index 266045f0..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/.deepsource.toml +++ /dev/null @@ -1,18 +0,0 @@ -version = 1 - -test_patterns = [ - 'integration/testgc/**', - '**/*_test.go' -] - -exclude_patterns = [ - -] - -[[analyzers]] -name = 'go' -enabled = true - - - [analyzers.meta] - import_path = 'github.com/dgraph-io/badger' diff --git a/vendor/github.com/dgraph-io/badger/v2/.gitignore b/vendor/github.com/dgraph-io/badger/v2/.gitignore deleted file mode 100644 index e3efdf58..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -p/ -badger-test*/ diff --git a/vendor/github.com/dgraph-io/badger/v2/.golangci.yml b/vendor/github.com/dgraph-io/badger/v2/.golangci.yml deleted file mode 100644 index fecb8644..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -run: - tests: false - -linters-settings: - lll: - line-length: 100 - -linters: - disable-all: true - enable: - - errcheck - - ineffassign - - gas - - gofmt - - golint - - gosimple - - govet - - lll - - varcheck - - unused - -issues: - exclude-rules: - - linters: - - gosec - text: "G404: " - \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/v2/.travis.yml b/vendor/github.com/dgraph-io/badger/v2/.travis.yml deleted file mode 100644 index fbcefbae..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/.travis.yml +++ /dev/null @@ -1,52 +0,0 @@ -language: go - -go: - - "1.12" - - "1.13" - - tip -os: - - osx -env: - jobs: - - GOARCH=386 - - GOARCH=amd64 - global: - - secure: CRkV2+/jlO0gXzzS50XGxfMS117FNwiVjxNY/LeWq06RKD+dDCPxTJl3JCNe3l0cYEPAglV2uMMYukDiTqJ7e+HI4nh4N4mv6lwx39N8dAvJe1x5ITS2T4qk4kTjuQb1Q1vw/ZOxoQqmvNKj2uRmBdJ/HHmysbRJ1OzCWML3OXdUwJf0AYlJzTjpMfkOKr7sTtE4rwyyQtd4tKH1fGdurgI9ZuFd9qvYxK2qcJhsQ6CNqMXt+7FkVkN1rIPmofjjBTNryzUr4COFXuWH95aDAif19DeBW4lbNgo1+FpDsrgmqtuhl6NAuptI8q/imow2KXBYJ8JPXsxW8DVFj0IIp0RCd3GjaEnwBEbxAyiIHLfW7AudyTS/dJOvZffPqXnuJ8xj3OPIdNe4xY0hWl8Ju2HhKfLOAHq7VadHZWd3IHLil70EiL4/JLD1rNbMImUZisFaA8pyrcIvYYebjOnk4TscwKFLedClRSX1XsMjWWd0oykQtrdkHM2IxknnBpaLu7mFnfE07f6dkG0nlpyu4SCLey7hr5FdcEmljA0nIxTSYDg6035fQkBEAbe7hlESOekkVNT9IZPwG+lmt3vU4ofi6NqNbJecOuSB+h36IiZ9s4YQtxYNnLgW14zjuFGGyT5smc3IjBT7qngDjKIgyrSVoRkY/8udy9qbUgvBeW8= - - -jobs: - allow_failures: - - go: tip - exclude: - # Exclude builds for 386 architecture on go 1.12 and tip - # Since we don't want it to run for 32 bit - - go: "1.12" - env: GOARCH=386 - - go: tip - env: GOARCH=386 - include: - # Define one extra linux build, which we use to run cross - # compiled 32 bit tests - - os: linux - arch: arm64 - go: "1.14" - env: go_32=yes - -notifications: - email: false - slack: - secure: X7uBLWYbuUhf8QFE16CoS5z7WvFR8EN9j6cEectMW6mKZ3vwXGwVXRIPsgUq/606DsQdCCx34MR8MRWYGlu6TBolbSe9y0EP0i46yipPz22YtuT7umcVUbGEyx8MZKgG0v1u/zA0O4aCsOBpGAA3gxz8h3JlEHDt+hv6U8xRsSllVLzLSNb5lwxDtcfEDxVVqP47GMEgjLPM28Pyt5qwjk7o5a4YSVzkfdxBXxd3gWzFUWzJ5E3cTacli50dK4GVfiLcQY2aQYoYO7AAvDnvP+TPfjDkBlUEE4MUz5CDIN51Xb+WW33sX7g+r3Bj7V5IRcF973RiYkpEh+3eoiPnyWyxhDZBYilty3b+Hysp6d4Ov/3I3ll7Bcny5+cYjakjkMH3l9w3gs6Y82GlpSLSJshKWS8vPRsxFe0Pstj6QSJXTd9EBaFr+l1ScXjJv/Sya9j8N9FfTuOTESWuaL1auX4Y7zEEVHlA8SCNOO8K0eTfxGZnC/YcIHsR8rePEAcFxfOYQppkyLF/XvAtnb/LMUuu0g4y2qNdme6Oelvyar1tFEMRtbl4mRCdu/krXBFtkrsfUaVY6WTPdvXAGotsFJ0wuA53zGVhlcd3+xAlSlR3c1QX95HIMeivJKb5L4nTjP+xnrmQNtnVk+tG4LSH2ltuwcZSSczModtcBmRefrk= - -script: >- - if [ $TRAVIS_OS_NAME = "linux" ] && [ $go_32 ]; then - uname -a - GOOS=linux GOARCH=arm go test -v ./... - # Another round of tests after turning off mmap. - GOOS=linux GOARCH=arm go test -v -vlog_mmap=false github.com/dgraph-io/badger - else - go test -v ./... - # Another round of tests after turning off mmap. - go test -v -vlog_mmap=false github.com/dgraph-io/badger - # Cross-compile for Plan 9 - GOOS=plan9 go build ./... - fi diff --git a/vendor/github.com/dgraph-io/badger/v2/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/v2/CHANGELOG.md deleted file mode 100644 index 2cc490e4..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/CHANGELOG.md +++ /dev/null @@ -1,482 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). - -## [2.2007.4] - 2021-08-25 - -### Fixed - - Fix build on Plan 9 (#1451) (#1508) (#1738) - -### Features - - feat(zstd): backport replacement of DataDog's zstd with Klauspost's zstd (#1736) - -## [2.2007.3] - 2021-07-21 - -### Fixed - - fix(maxVersion): Use choosekey instead of KeyToList (#1532) #1533 - - fix(flatten): Add --num_versions flag (#1518) #1520 - - fix(build): Fix integer overflow on 32-bit architectures #1558 - - fix(pb): avoid protobuf warning due to common filename (#1519) - -### Features - - Add command to stream contents of DB into another DB. (#1486) - -### New APIs - - DB.StreamDB - - DB.MaxVersion - -## [2.2007.2] - 2020-08-31 - -### Fixed - - Compaction: Use separate compactors for L0, L1 (#1466) - - Rework Block and Index cache (#1473) - - Add IsClosed method (#1478) - - Cleanup: Avoid truncating in vlog.Open on error (#1465) - - Cleanup: Do not close cache before compactions (#1464) - -### New APIs -- Badger.DB - - BlockCacheMetrics (#1473) - - IndexCacheMetrics (#1473) -- Badger.Option - - WithBlockCacheSize (#1473) - - WithIndexCacheSize (#1473) - -### Removed APIs [Breaking Changes] -- Badger.DB - - DataCacheMetrics (#1473) - - BfCacheMetrics (#1473) -- Badger.Option - - WithMaxCacheSize (#1473) - - WithMaxBfCacheSize (#1473) - - WithKeepBlockIndicesInCache (#1473) - - WithKeepBlocksInCache (#1473) - -## [2.2007.1] - 2020-08-19 - -### Fixed - - Remove vlog file if bootstrap, syncDir or mmap fails (#1434) - - levels: Compaction incorrectly drops some delete markers (#1422) - - Replay: Update head for LSM entires also (#1456) - -## [2.2007.0] - 2020-08-10 - -### Fixed - - Add a limit to the size of the batches sent over a stream. (#1412) - - Fix Sequence generates duplicate values (#1281) - - Fix race condition in DoesNotHave (#1287) - - Fail fast if cgo is disabled and compression is ZSTD (#1284) - - Proto: make badger/v2 compatible with v1 (#1293) - - Proto: Rename dgraph.badger.v2.pb to badgerpb2 (#1314) - - Handle duplicates in ManagedWriteBatch (#1315) - - Ensure `bitValuePointer` flag is cleared for LSM entry values written to LSM (#1313) - - DropPrefix: Return error on blocked writes (#1329) - - Confirm `badgerMove` entry required before rewrite (#1302) - - Drop move keys when its key prefix is dropped (#1331) - - Iterator: Always add key to txn.reads (#1328) - - Restore: Account for value size as well (#1358) - - Compaction: Expired keys and delete markers are never purged (#1354) - - GC: Consider size of value while rewriting (#1357) - - Force KeepL0InMemory to be true when InMemory is true (#1375) - - Rework DB.DropPrefix (#1381) - - Update head while replaying value log (#1372) - - Avoid panic on multiple closer.Signal calls (#1401) - - Return error if the vlog writes exceeds more than 4GB (#1400) - -### Performance - - Clean up transaction oracle as we go (#1275) - - Use cache for storing block offsets (#1336) - -### Features - - Support disabling conflict detection (#1344) - - Add leveled logging (#1249) - - Support entry version in Write batch (#1310) - - Add Write method to batch write (#1321) - - Support multiple iterators in read-write transactions (#1286) - -### New APIs -- Badger.DB - - NewManagedWriteBatch (#1310) - - DropPrefix (#1381) -- Badger.Option - - WithDetectConflicts (#1344) - - WithKeepBlockIndicesInCache (#1336) - - WithKeepBlocksInCache (#1336) -- Badger.WriteBatch - - DeleteAt (#1310) - - SetEntryAt (#1310) - - Write (#1321) - -### Changes to Default Options - - DefaultOptions: Set KeepL0InMemory to false (#1345) - - Increase default valueThreshold from 32B to 1KB (#1346) - -### Deprecated -- Badger.Option - - WithEventLogging (#1203) - -### Reverts -This sections lists the changes which were reverted because of non-reproducible crashes. -- Compress/Encrypt Blocks in the background (#1227) - - -## [2.0.3] - 2020-03-24 - -### Fixed - -- Add support for watching nil prefix in subscribe API (#1246) - -### Performance - -- Compress/Encrypt Blocks in the background (#1227) -- Disable cache by default (#1257) - -### Features - -- Add BypassDirLock option (#1243) -- Add separate cache for bloomfilters (#1260) - -### New APIs -- badger.DB - - BfCacheMetrics (#1260) - - DataCacheMetrics (#1260) -- badger.Options - - WithBypassLockGuard (#1243) - - WithLoadBloomsOnOpen (#1260) - - WithMaxBfCacheSize (#1260) - -## [2.0.3] - 2020-03-24 - -### Fixed - -- Add support for watching nil prefix in subscribe API (#1246) - -### Performance - -- Compress/Encrypt Blocks in the background (#1227) -- Disable cache by default (#1257) - -### Features - -- Add BypassDirLock option (#1243) -- Add separate cache for bloomfilters (#1260) - -### New APIs -- badger.DB - - BfCacheMetrics (#1260) - - DataCacheMetrics (#1260) -- badger.Options - - WithBypassLockGuard (#1243) - - WithLoadBloomsOnOpen (#1260) - - WithMaxBfCacheSize (#1260) - -## [2.0.2] - 2020-03-02 - -### Fixed - -- Cast sz to uint32 to fix compilation on 32 bit. (#1175) -- Fix checkOverlap in compaction. (#1166) -- Avoid sync in inmemory mode. (#1190) -- Support disabling the cache completely. (#1185) -- Add support for caching bloomfilters. (#1204) -- Fix int overflow for 32bit. (#1216) -- Remove the 'this entry should've caught' log from value.go. (#1170) -- Rework concurrency semantics of valueLog.maxFid. (#1187) - -### Performance - -- Use fastRand instead of locked-rand in skiplist. (#1173) -- Improve write stalling on level 0 and 1. (#1186) -- Disable compression and set ZSTD Compression Level to 1. (#1191) - -## [2.0.1] - 2020-01-02 - -### New APIs - -- badger.Options - - WithInMemory (f5b6321) - - WithZSTDCompressionLevel (3eb4e72) - -- Badger.TableInfo - - EstimatedSz (f46f8ea) - -### Features - -- Introduce in-memory mode in badger. (#1113) - -### Fixed - -- Limit manifest's change set size. (#1119) -- Cast idx to uint32 to fix compilation on i386. (#1118) -- Fix request increment ref bug. (#1121) -- Fix windows dataloss issue. (#1134) -- Fix VerifyValueChecksum checks. (#1138) -- Fix encryption in stream writer. (#1146) -- Fix segmentation fault in vlog.Read. (header.Decode) (#1150) -- Fix merge iterator duplicates issue. (#1157) - -### Performance - -- Set level 15 as default compression level in Zstd. (#1111) -- Optimize createTable in stream_writer.go. (#1132) - -## [2.0.0] - 2019-11-12 - -### New APIs - -- badger.DB - - NewWriteBatchAt (7f43769) - - CacheMetrics (b9056f1) - -- badger.Options - - WithMaxCacheSize (b9056f1) - - WithEventLogging (75c6a44) - - WithBlockSize (1439463) - - WithBloomFalsePositive (1439463) - - WithKeepL0InMemory (ee70ff2) - - WithVerifyValueChecksum (ee70ff2) - - WithCompression (5f3b061) - - WithEncryptionKey (a425b0e) - - WithEncryptionKeyRotationDuration (a425b0e) - - WithChecksumVerificationMode (7b4083d) - -### Features - -- Data cache to speed up lookups and iterations. (#1066) -- Data compression. (#1013) -- Data encryption-at-rest. (#1042) - -### Fixed - -- Fix deadlock when flushing discard stats. (#976) -- Set move key's expiresAt for keys with TTL. (#1006) -- Fix unsafe usage in Decode. (#1097) -- Fix race condition on db.orc.nextTxnTs. (#1101) -- Fix level 0 GC dataloss bug. (#1090) -- Fix deadlock in discard stats. (#1070) -- Support checksum verification for values read from vlog. (#1052) -- Store entire L0 in memory. (#963) -- Fix table.Smallest/Biggest and iterator Prefix bug. (#997) -- Use standard proto functions for Marshal/Unmarshal and Size. (#994) -- Fix boundaries on GC batch size. (#987) -- VlogSize to store correct directory name to expvar.Map. (#956) -- Fix transaction too big issue in restore. (#957) -- Fix race condition in updateDiscardStats. (#973) -- Cast results of len to uint32 to fix compilation in i386 arch. (#961) -- Making the stream writer APIs goroutine-safe. (#959) -- Fix prefix bug in key iterator and allow all versions. (#950) -- Drop discard stats if we can't unmarshal it. (#936) -- Fix race condition in flushDiscardStats function. (#921) -- Ensure rewrite in vlog is within transactional limits. (#911) -- Fix discard stats moved by GC bug. (#929) -- Fix busy-wait loop in Watermark. (#920) - -### Performance - -- Introduce fast merge iterator. (#1080) -- Binary search based table picker. (#983) -- Flush vlog buffer if it grows beyond threshold. (#1067) -- Introduce StreamDone in Stream Writer. (#1061) -- Performance Improvements to block iterator. (#977) -- Prevent unnecessary safecopy in iterator parseKV. (#971) -- Use pointers instead of binary encoding. (#965) -- Reuse block iterator inside table iterator. (#972) -- [breaking/format] Remove vlen from entry header. (#945) -- Replace FarmHash with AESHash for Oracle conflicts. (#952) -- [breaking/format] Optimize Bloom filters. (#940) -- [breaking/format] Use varint for header encoding (without header length). (#935) -- Change file picking strategy in compaction. (#894) -- [breaking/format] Block level changes. (#880) -- [breaking/format] Add key-offset index to the end of SST table. (#881) - - -## [1.6.0] - 2019-07-01 - -This is a release including almost 200 commits, so expect many changes - some of them -not backward compatible. - -Regarding backward compatibility in Badger versions, you might be interested on reading -[VERSIONING.md](VERSIONING.md). - -_Note_: The hashes in parentheses correspond to the commits that impacted the given feature. - -### New APIs - -- badger.DB - - DropPrefix (291295e) - - Flatten (7e41bba) - - KeySplits (4751ef1) - - MaxBatchCount (b65e2a3) - - MaxBatchSize (b65e2a3) - - PrintKeyValueHistogram (fd59907) - - Subscribe (26128a7) - - Sync (851e462) - -- badger.DefaultOptions() and badger.LSMOnlyOptions() (91ce687) - - badger.Options.WithX methods - -- badger.Entry (e9447c9) - - NewEntry - - WithMeta - - WithDiscard - - WithTTL - -- badger.Item - - KeySize (fd59907) - - ValueSize (5242a99) - -- badger.IteratorOptions - - PickTable (7d46029, 49a49e3) - - Prefix (7d46029) - -- badger.Logger (fbb2778) - -- badger.Options - - CompactL0OnClose (7e41bba) - - Logger (3f66663) - - LogRotatesToFlush (2237832) - -- badger.Stream (14cbd89, 3258067) -- badger.StreamWriter (7116e16) -- badger.TableInfo.KeyCount (fd59907) -- badger.TableManifest (2017987) -- badger.Tx.NewKeyIterator (49a49e3) -- badger.WriteBatch (6daccf9, 7e78e80) - -### Modified APIs - -#### Breaking changes: - -- badger.DefaultOptions and badger.LSMOnlyOptions are now functions rather than variables (91ce687) -- badger.Item.Value now receives a function that returns an error (439fd46) -- badger.Txn.Commit doesn't receive any params now (6daccf9) -- badger.DB.Tables now receives a boolean (76b5341) - -#### Not breaking changes: - -- badger.LSMOptions changed values (799c33f) -- badger.DB.NewIterator now allows multiple iterators per RO txn (41d9656) -- badger.Options.TableLoadingMode's new default is options.MemoryMap (6b97bac) - -### Removed APIs - -- badger.ManagedDB (d22c0e8) -- badger.Options.DoNotCompact (7e41bba) -- badger.Txn.SetWithX (e9447c9) - -### Tools: - -- badger bank disect (13db058) -- badger bank test (13db058) --mmap (03870e3) -- badger fill (7e41bba) -- badger flatten (7e41bba) -- badger info --histogram (fd59907) --history --lookup --show-keys --show-meta --with-prefix (09e9b63) --show-internal (fb2eed9) -- badger benchmark read (239041e) -- badger benchmark write (6d3b67d) - -## [1.5.5] - 2019-06-20 - -* Introduce support for Go Modules - -## [1.5.3] - 2018-07-11 -Bug Fixes: -* Fix a panic caused due to item.vptr not copying over vs.Value, when looking - for a move key. - -## [1.5.2] - 2018-06-19 -Bug Fixes: -* Fix the way move key gets generated. -* If a transaction has unclosed, or multiple iterators running simultaneously, - throw a panic. Every iterator must be properly closed. At any point in time, - only one iterator per transaction can be running. This is to avoid bugs in a - transaction data structure which is thread unsafe. - -* *Warning: This change might cause panics in user code. Fix is to properly - close your iterators, and only have one running at a time per transaction.* - -## [1.5.1] - 2018-06-04 -Bug Fixes: -* Fix for infinite yieldItemValue recursion. #503 -* Fix recursive addition of `badgerMove` prefix. https://github.com/dgraph-io/badger/commit/2e3a32f0ccac3066fb4206b28deb39c210c5266f -* Use file size based window size for sampling, instead of fixing it to 10MB. #501 - -Cleanup: -* Clarify comments and documentation. -* Move badger tool one directory level up. - -## [1.5.0] - 2018-05-08 -* Introduce `NumVersionsToKeep` option. This option is used to discard many - versions of the same key, which saves space. -* Add a new `SetWithDiscard` method, which would indicate that all the older - versions of the key are now invalid. Those versions would be discarded during - compactions. -* Value log GC moves are now bound to another keyspace to ensure latest versions - of data are always at the top in LSM tree. -* Introduce `ValueLogMaxEntries` to restrict the number of key-value pairs per - value log file. This helps bound the time it takes to garbage collect one - file. - -## [1.4.0] - 2018-05-04 -* Make mmap-ing of value log optional. -* Run GC multiple times, based on recorded discard statistics. -* Add MergeOperator. -* Force compact L0 on clsoe (#439). -* Add truncate option to warn about data loss (#452). -* Discard key versions during compaction (#464). -* Introduce new `LSMOnlyOptions`, to make Badger act like a typical LSM based DB. - -Bug fix: -* (Temporary) Check max version across all tables in Get (removed in next - release). -* Update commit and read ts while loading from backup. -* Ensure all transaction entries are part of the same value log file. -* On commit, run unlock callbacks before doing writes (#413). -* Wait for goroutines to finish before closing iterators (#421). - -## [1.3.0] - 2017-12-12 -* Add `DB.NextSequence()` method to generate monotonically increasing integer - sequences. -* Add `DB.Size()` method to return the size of LSM and value log files. -* Tweaked mmap code to make Windows 32-bit builds work. -* Tweaked build tags on some files to make iOS builds work. -* Fix `DB.PurgeOlderVersions()` to not violate some constraints. - -## [1.2.0] - 2017-11-30 -* Expose a `Txn.SetEntry()` method to allow setting the key-value pair - and all the metadata at the same time. - -## [1.1.1] - 2017-11-28 -* Fix bug where txn.Get was returing key deleted in same transaction. -* Fix race condition while decrementing reference in oracle. -* Update doneCommit in the callback for CommitAsync. -* Iterator see writes of current txn. - -## [1.1.0] - 2017-11-13 -* Create Badger directory if it does not exist when `badger.Open` is called. -* Added `Item.ValueCopy()` to avoid deadlocks in long-running iterations -* Fixed 64-bit alignment issues to make Badger run on Arm v7 - -## [1.0.1] - 2017-11-06 -* Fix an uint16 overflow when resizing key slice - -[Unreleased]: https://github.com/dgraph-io/badger/compare/v2.2007.2...HEAD -[2.2007.2]: https://github.com/dgraph-io/badger/compare/v2.2007.1...v2.2007.2 -[2.2007.1]: https://github.com/dgraph-io/badger/compare/v2.2007.0...v2.2007.1 -[2.2007.0]: https://github.com/dgraph-io/badger/compare/v2.0.3...v2.2007.0 -[2.0.3]: https://github.com/dgraph-io/badger/compare/v2.0.2...v2.0.3 -[2.0.2]: https://github.com/dgraph-io/badger/compare/v2.0.1...v2.0.2 -[2.0.1]: https://github.com/dgraph-io/badger/compare/v2.0.0...v2.0.1 -[2.0.0]: https://github.com/dgraph-io/badger/compare/v1.6.0...v2.0.0 -[1.6.0]: https://github.com/dgraph-io/badger/compare/v1.5.5...v1.6.0 -[1.5.5]: https://github.com/dgraph-io/badger/compare/v1.5.3...v1.5.5 -[1.5.3]: https://github.com/dgraph-io/badger/compare/v1.5.2...v1.5.3 -[1.5.2]: https://github.com/dgraph-io/badger/compare/v1.5.1...v1.5.2 -[1.5.1]: https://github.com/dgraph-io/badger/compare/v1.5.0...v1.5.1 -[1.5.0]: https://github.com/dgraph-io/badger/compare/v1.4.0...v1.5.0 -[1.4.0]: https://github.com/dgraph-io/badger/compare/v1.3.0...v1.4.0 -[1.3.0]: https://github.com/dgraph-io/badger/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/dgraph-io/badger/compare/v1.1.1...v1.2.0 -[1.1.1]: https://github.com/dgraph-io/badger/compare/v1.1.0...v1.1.1 -[1.1.0]: https://github.com/dgraph-io/badger/compare/v1.0.1...v1.1.0 -[1.0.1]: https://github.com/dgraph-io/badger/compare/v1.0.0...v1.0.1 diff --git a/vendor/github.com/dgraph-io/badger/v2/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/v2/CODE_OF_CONDUCT.md deleted file mode 100644 index bf7bbc29..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,5 +0,0 @@ -# Code of Conduct - -Our Code of Conduct can be found here: - -https://dgraph.io/conduct diff --git a/vendor/github.com/dgraph-io/badger/v2/CONTRIBUTING.md b/vendor/github.com/dgraph-io/badger/v2/CONTRIBUTING.md deleted file mode 100644 index 30512e9d..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/CONTRIBUTING.md +++ /dev/null @@ -1,107 +0,0 @@ -# Contribution Guide - -* [Before you get started](#before-you-get-started) - * [Code of Conduct](#code-of-conduct) -* [Your First Contribution](#your-first-contribution) - * [Find a good first topic](#find-a-good-first-topic) -* [Setting up your development environment](#setting-up-your-development-environment) - * [Fork the project](#fork-the-project) - * [Clone the project](#clone-the-project) - * [New branch for a new code](#new-branch-for-a-new-code) - * [Test](#test) - * [Commit and push](#commit-and-push) - * [Create a Pull Request](#create-a-pull-request) - * [Sign the CLA](#sign-the-cla) - * [Get a code review](#get-a-code-review) - -## Before you get started - -### Code of Conduct - -Please make sure to read and observe our [Code of Conduct](./CODE_OF_CONDUCT.md). - -## Your First Contribution - -### Find a good first topic - -You can start by finding an existing issue with the -[good first issue](https://github.com/dgraph-io/badger/labels/good%20first%20issue) or [help wanted](https://github.com/dgraph-io/badger/labels/help%20wanted) labels. These issues are well suited for new contributors. - - -## Setting up your development environment - -Badger uses [`Go Modules`](https://github.com/golang/go/wiki/Modules) -to manage dependencies. The version of Go should be **1.12** or above. - -### Fork the project - -- Visit https://github.com/dgraph-io/badger -- Click the `Fork` button (top right) to create a fork of the repository - -### Clone the project - -```sh -$ git clone https://github.com/$GITHUB_USER/badger -$ cd badger -$ git remote add upstream git@github.com:dgraph-io/badger.git - -# Never push to the upstream master -git remote set-url --push upstream no_push -``` - -### New branch for a new code - -Get your local master up to date: - -```sh -$ git fetch upstream -$ git checkout master -$ git rebase upstream/master -``` - -Create a new branch from the master: - -```sh -$ git checkout -b my_new_feature -``` - -And now you can finally add your changes to project. - -### Test - -Build and run all tests: - -```sh -$ ./test.sh -``` - -### Commit and push - -Commit your changes: - -```sh -$ git commit -``` - -When the changes are ready to review: - -```sh -$ git push origin my_new_feature -``` - -### Create a Pull Request - -Just open `https://github.com/$GITHUB_USER/badger/pull/new/my_new_feature` and -fill the PR description. - -### Sign the CLA - -Click the **Sign in with Github to agree** button to sign the CLA. [An example](https://cla-assistant.io/dgraph-io/badger?pullRequest=1377). - -### Get a code review - -If your pull request (PR) is opened, it will be assigned to one or more -reviewers. Those reviewers will do a code review. - -To address review comments, you should commit the changes to the same branch of -the PR on your fork. diff --git a/vendor/github.com/dgraph-io/badger/v2/LICENSE b/vendor/github.com/dgraph-io/badger/v2/LICENSE deleted file mode 100644 index d9a10c0d..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/dgraph-io/badger/v2/README.md b/vendor/github.com/dgraph-io/badger/v2/README.md deleted file mode 100644 index f92b82a2..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/README.md +++ /dev/null @@ -1,928 +0,0 @@ -# BadgerDB [![GoDoc](https://godoc.org/github.com/dgraph-io/badger?status.svg)](https://godoc.org/github.com/dgraph-io/badger) [![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger)](https://goreportcard.com/report/github.com/dgraph-io/badger) [![Sourcegraph](https://sourcegraph.com/github.com/dgraph-io/badger/-/badge.svg)](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [![Build Status](https://teamcity.dgraph.io/guestAuth/app/rest/builds/buildType:(id:Badger_UnitTests)/statusIcon.svg)](https://teamcity.dgraph.io/viewLog.html?buildTypeId=Badger_UnitTests&buildId=lastFinished&guest=1) ![Appveyor](https://ci.appveyor.com/api/projects/status/github/dgraph-io/badger?branch=master&svg=true) [![Coverage Status](https://coveralls.io/repos/github/dgraph-io/badger/badge.svg?branch=master)](https://coveralls.io/github/dgraph-io/badger?branch=master) - -![Badger mascot](images/diggy-shadow.png) - -BadgerDB is an embeddable, persistent and fast key-value (KV) database written -in pure Go. It is the underlying database for [Dgraph](https://dgraph.io), a -fast, distributed graph database. It's meant to be a performant alternative to -non-Go-based key-value stores like RocksDB. - -## Project Status [March 24, 2020] - -Badger is stable and is being used to serve data sets worth hundreds of -terabytes. Badger supports concurrent ACID transactions with serializable -snapshot isolation (SSI) guarantees. A Jepsen-style bank test runs nightly for -8h, with `--race` flag and ensures the maintenance of transactional guarantees. -Badger has also been tested to work with filesystem level anomalies, to ensure -persistence and consistency. Badger is being used by a number of projects which -includes Dgraph, Jaeger Tracing, UsenetExpress, and many more. - -The list of projects using Badger can be found [here](#projects-using-badger). - -Badger v1.0 was released in Nov 2017, and the latest version that is data-compatible -with v1.0 is v1.6.0. - -Badger v2.0 was released in Nov 2019 with a new storage format which won't -be compatible with all of the v1.x. Badger v2.0 supports compression, encryption and uses a cache to speed up lookup. - -The [Changelog] is kept fairly up-to-date. - -For more details on our version naming schema please read [Choosing a version](#choosing-a-version). - -[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md - -## Table of Contents - * [Getting Started](#getting-started) - + [Installing](#installing) - - [Choosing a version](#choosing-a-version) - + [Opening a database](#opening-a-database) - + [Transactions](#transactions) - - [Read-only transactions](#read-only-transactions) - - [Read-write transactions](#read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - + [Using key/value pairs](#using-keyvalue-pairs) - + [Monotonically increasing integers](#monotonically-increasing-integers) - * [Merge Operations](#merge-operations) - + [Setting Time To Live(TTL) and User Metadata on Keys](#setting-time-to-livettl-and-user-metadata-on-keys) - + [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Key-only iteration](#key-only-iteration) - + [Stream](#stream) - + [Garbage Collection](#garbage-collection) - + [Database backup](#database-backup) - + [Memory usage](#memory-usage) - + [Statistics](#statistics) - * [Resources](#resources) - + [Blog Posts](#blog-posts) - * [Contact](#contact) - * [Design](#design) - + [Comparisons](#comparisons) - + [Benchmarks](#benchmarks) - * [Projects Using Badger](#projects-using-badger) - * [Contributing](#contributing) - * [Frequently Asked Questions](#frequently-asked-questions) - -## Getting Started - -### Installing -To start using Badger, install Go 1.12 or above and run `go get`: - -```sh -$ go get github.com/dgraph-io/badger/v2 -``` - -This will retrieve the library and install the `badger` command line -utility into your `$GOBIN` path. - - -#### Choosing a version - -BadgerDB is a pretty special package from the point of view that the most important change we can -make to it is not on its API but rather on how data is stored on disk. - -This is why we follow a version naming schema that differs from Semantic Versioning. - -- New major versions are released when the data format on disk changes in an incompatible way. -- New minor versions are released whenever the API changes but data compatibility is maintained. - Note that the changes on the API could be backward-incompatible - unlike Semantic Versioning. -- New patch versions are released when there's no changes to the data format nor the API. - -Following these rules: - -- v1.5.0 and v1.6.0 can be used on top of the same files without any concerns, as their major - version is the same, therefore the data format on disk is compatible. -- v1.6.0 and v2.0.0 are data incompatible as their major version implies, so files created with - v1.6.0 will need to be converted into the new format before they can be used by v2.0.0. - -For a longer explanation on the reasons behind using a new versioning naming schema, you can read -[VERSIONING.md](VERSIONING.md). - -### Opening a database -The top-level object in Badger is a `DB`. It represents multiple files on disk -in specific directories, which contain the data for a single database. - -To open your database, use the `badger.Open()` function, with the appropriate -options. The `Dir` and `ValueDir` options are mandatory and must be -specified by the client. They can be set to the same value to simplify things. - -```go -package main - -import ( - "log" - - badger "github.com/dgraph-io/badger/v2" -) - -func main() { - // Open the Badger database located in the /tmp/badger directory. - // It will be created if it doesn't exist. - db, err := badger.Open(badger.DefaultOptions("/tmp/badger")) - if err != nil { - log.Fatal(err) - } - defer db.Close() -  // Your code here… -} -``` - -Please note that Badger obtains a lock on the directories so multiple processes -cannot open the same database at the same time. - -#### In-Memory Mode/Diskless Mode -By default, Badger ensures all the data is persisted to the disk. It also supports a pure -in-memory mode. When Badger is running in in-memory mode, all the data is stored in the memory. -Reads and writes are much faster in in-memory mode, but all the data stored in Badger will be lost -in case of a crash or close. To open badger in in-memory mode, set the `InMemory` option. - -``` -opt := badger.DefaultOptions("").WithInMemory(true) -``` - -### Transactions - -#### Read-only transactions -To start a read-only transaction, you can use the `DB.View()` method: - -```go -err := db.View(func(txn *badger.Txn) error { -  // Your code here… -  return nil -}) -``` - -You cannot perform any writes or deletes within this transaction. Badger -ensures that you get a consistent view of the database within this closure. Any -writes that happen elsewhere after the transaction has started, will not be -seen by calls made within the closure. - -#### Read-write transactions -To start a read-write transaction, you can use the `DB.Update()` method: - -```go -err := db.Update(func(txn *badger.Txn) error { -  // Your code here… -  return nil -}) -``` - -All database operations are allowed inside a read-write transaction. - -Always check the returned error value. If you return an error -within your closure it will be passed through. - -An `ErrConflict` error will be reported in case of a conflict. Depending on the state -of your application, you have the option to retry the operation if you receive -this error. - -An `ErrTxnTooBig` will be reported in case the number of pending writes/deletes in -the transaction exceeds a certain limit. In that case, it is best to commit the -transaction and start a new transaction immediately. Here is an example (we are -not checking for errors in some places for simplicity): - -```go -updates := make(map[string]string) -txn := db.NewTransaction(true) -for k,v := range updates { - if err := txn.Set([]byte(k),[]byte(v)); err == badger.ErrTxnTooBig { - _ = txn.Commit() - txn = db.NewTransaction(true) - _ = txn.Set([]byte(k),[]byte(v)) - } -} -_ = txn.Commit() -``` - -#### Managing transactions manually -The `DB.View()` and `DB.Update()` methods are wrappers around the -`DB.NewTransaction()` and `Txn.Commit()` methods (or `Txn.Discard()` in case of -read-only transactions). These helper methods will start the transaction, -execute a function, and then safely discard your transaction if an error is -returned. This is the recommended way to use Badger transactions. - -However, sometimes you may want to manually create and commit your -transactions. You can use the `DB.NewTransaction()` function directly, which -takes in a boolean argument to specify whether a read-write transaction is -required. For read-write transactions, it is necessary to call `Txn.Commit()` -to ensure the transaction is committed. For read-only transactions, calling -`Txn.Discard()` is sufficient. `Txn.Commit()` also calls `Txn.Discard()` -internally to cleanup the transaction, so just calling `Txn.Commit()` is -sufficient for read-write transaction. However, if your code doesn’t call -`Txn.Commit()` for some reason (for e.g it returns prematurely with an error), -then please make sure you call `Txn.Discard()` in a `defer` block. Refer to the -code below. - -```go -// Start a writable transaction. -txn := db.NewTransaction(true) -defer txn.Discard() - -// Use the transaction... -err := txn.Set([]byte("answer"), []byte("42")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := txn.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.NewTransaction()` is a boolean stating if the transaction -should be writable. - -Badger allows an optional callback to the `Txn.Commit()` method. Normally, the -callback can be set to `nil`, and the method will return after all the writes -have succeeded. However, if this callback is provided, the `Txn.Commit()` -method returns as soon as it has checked for any conflicts. The actual writing -to the disk happens asynchronously, and the callback is invoked once the -writing has finished, or an error has occurred. This can improve the throughput -of the application in some cases. But it also means that a transaction is not -durable until the callback has been invoked with a `nil` error value. - -### Using key/value pairs -To save a key/value pair, use the `Txn.Set()` method: - -```go -err := db.Update(func(txn *badger.Txn) error { - err := txn.Set([]byte("answer"), []byte("42")) - return err -}) -``` - -Key/Value pair can also be saved by first creating `Entry`, then setting this -`Entry` using `Txn.SetEntry()`. `Entry` also exposes methods to set properties -on it. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")) - err := txn.SetEntry(e) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"`. To retrieve this -value, we can use the `Txn.Get()` method: - -```go -err := db.View(func(txn *badger.Txn) error { - item, err := txn.Get([]byte("answer")) - handle(err) - - var valNot, valCopy []byte - err := item.Value(func(val []byte) error { - // This func with val would only be called if item.Value encounters no error. - - // Accessing val here is valid. - fmt.Printf("The answer is: %s\n", val) - - // Copying or parsing val is valid. - valCopy = append([]byte{}, val...) - - // Assigning val slice to another variable is NOT OK. - valNot = val // Do not do this. - return nil - }) - handle(err) - - // DO NOT access val here. It is the most common cause of bugs. - fmt.Printf("NEVER do this. %s\n", valNot) - - // You must copy it to use it outside item.Value(...). - fmt.Printf("The answer is: %s\n", valCopy) - - // Alternatively, you could also use item.ValueCopy(). - valCopy, err = item.ValueCopy(nil) - handle(err) - fmt.Printf("The answer is: %s\n", valCopy) - - return nil -}) -``` - -`Txn.Get()` returns `ErrKeyNotFound` if the value is not found. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - -Use the `Txn.Delete()` method to delete a key. - -### Monotonically increasing integers - -To get unique monotonically increasing integers with strong durability, you can -use the `DB.GetSequence` method. This method returns a `Sequence` object, which -is thread-safe and can be used concurrently via various goroutines. - -Badger would lease a range of integers to hand out from memory, with the -bandwidth provided to `DB.GetSequence`. The frequency at which disk writes are -done is determined by this lease bandwidth and the frequency of `Next` -invocations. Setting a bandwidth too low would do more disk writes, setting it -too high would result in wasted integers if Badger is closed or crashes. -To avoid wasted integers, call `Release` before closing Badger. - -```go -seq, err := db.GetSequence(key, 1000) -defer seq.Release() -for { - num, err := seq.Next() -} -``` - -### Merge Operations -Badger provides support for ordered merge operations. You can define a func -of type `MergeFunc` which takes in an existing value, and a value to be -_merged_ with it. It returns a new value which is the result of the _merge_ -operation. All values are specified in byte arrays. For e.g., here is a merge -function (`add`) which appends a `[]byte` value to an existing `[]byte` value. - -```Go -// Merge function to append one byte slice to another -func add(originalValue, newValue []byte) []byte { - return append(originalValue, newValue...) -} -``` - -This function can then be passed to the `DB.GetMergeOperator()` method, along -with a key, and a duration value. The duration specifies how often the merge -function is run on values that have been added using the `MergeOperator.Add()` -method. - -`MergeOperator.Get()` method can be used to retrieve the cumulative value of the key -associated with the merge operation. - -```Go -key := []byte("merge") - -m := db.GetMergeOperator(key, add, 200*time.Millisecond) -defer m.Stop() - -m.Add([]byte("A")) -m.Add([]byte("B")) -m.Add([]byte("C")) - -res, _ := m.Get() // res should have value ABC encoded -``` - -Example: Merge operator which increments a counter - -```Go -func uint64ToBytes(i uint64) []byte { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], i) - return buf[:] -} - -func bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} - -// Merge function to add two uint64 numbers -func add(existing, new []byte) []byte { - return uint64ToBytes(bytesToUint64(existing) + bytesToUint64(new)) -} -``` -It can be used as -```Go -key := []byte("merge") - -m := db.GetMergeOperator(key, add, 200*time.Millisecond) -defer m.Stop() - -m.Add(uint64ToBytes(1)) -m.Add(uint64ToBytes(2)) -m.Add(uint64ToBytes(3)) - -res, _ := m.Get() // res should have value 6 encoded -``` - -### Setting Time To Live(TTL) and User Metadata on Keys -Badger allows setting an optional Time to Live (TTL) value on keys. Once the TTL has -elapsed, the key will no longer be retrievable and will be eligible for garbage -collection. A TTL can be set as a `time.Duration` value using the `Entry.WithTTL()` -and `Txn.SetEntry()` API methods. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")).WithTTL(time.Hour) - err := txn.SetEntry(e) - return err -}) -``` - -An optional user metadata value can be set on each key. A user metadata value -is represented by a single byte. It can be used to set certain bits along -with the key to aid in interpreting or decoding the key-value pair. User -metadata can be set using `Entry.WithMeta()` and `Txn.SetEntry()` API methods. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)) - err := txn.SetEntry(e) - return err -}) -``` - -`Entry` APIs can be used to add the user metadata and TTL for same key. This `Entry` -then can be set using `Txn.SetEntry()`. - -```go -err := db.Update(func(txn *badger.Txn) error { - e := badger.NewEntry([]byte("answer"), []byte("42")).WithMeta(byte(1)).WithTTL(time.Hour) - err := txn.SetEntry(e) - return err -}) -``` - -### Iterating over keys -To iterate over keys, we can use an `Iterator`, which can be obtained using the -`Txn.NewIterator()` method. Iteration happens in byte-wise lexicographical sorting -order. - - -```go -err := db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.PrefetchSize = 10 - it := txn.NewIterator(opts) - defer it.Close() - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - k := item.Key() - err := item.Value(func(v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - if err != nil { - return err - } - } - return nil -}) -``` - -The iterator allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -By default, Badger prefetches the values of the next 100 items. You can adjust -that with the `IteratorOptions.PrefetchSize` field. However, setting it to -a value higher than `GOMAXPROCS` (which we recommend to be 128 or higher) -shouldn’t give any additional benefits. You can also turn off the fetching of -values altogether. See section below on key-only iteration. - -#### Prefix scans -To iterate over a key prefix, you can combine `Seek()` and `ValidForPrefix()`: - -```go -db.View(func(txn *badger.Txn) error { - it := txn.NewIterator(badger.DefaultIteratorOptions) - defer it.Close() - prefix := []byte("1234") - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - item := it.Item() - k := item.Key() - err := item.Value(func(v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - if err != nil { - return err - } - } - return nil -}) -``` - -#### Key-only iteration -Badger supports a unique mode of iteration called _key-only_ iteration. It is -several order of magnitudes faster than regular iteration, because it involves -access to the LSM-tree only, which is usually resident entirely in RAM. To -enable key-only iteration, you need to set the `IteratorOptions.PrefetchValues` -field to `false`. This can also be used to do sparse reads for selected keys -during an iteration, by calling `item.Value()` only when required. - -```go -err := db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - k := item.Key() - fmt.Printf("key=%s\n", k) - } - return nil -}) -``` - -### Stream -Badger provides a Stream framework, which concurrently iterates over all or a -portion of the DB, converting data into custom key-values, and streams it out -serially to be sent over network, written to disk, or even written back to -Badger. This is a lot faster way to iterate over Badger than using a single -Iterator. Stream supports Badger in both managed and normal mode. - -Stream uses the natural boundaries created by SSTables within the LSM tree, to -quickly generate key ranges. Each goroutine then picks a range and runs an -iterator to iterate over it. Each iterator iterates over all versions of values -and is created from the same transaction, thus working over a snapshot of the -DB. Every time a new key is encountered, it calls `ChooseKey(item)`, followed -by `KeyToList(key, itr)`. This allows a user to select or reject that key, and -if selected, convert the value versions into custom key-values. The goroutine -batches up 4MB worth of key-values, before sending it over to a channel. -Another goroutine further batches up data from this channel using *smart -batching* algorithm and calls `Send` serially. - -This framework is designed for high throughput key-value iteration, spreading -the work of iteration across many goroutines. `DB.Backup` uses this framework to -provide full and incremental backups quickly. Dgraph is a heavy user of this -framework. In fact, this framework was developed and used within Dgraph, before -getting ported over to Badger. - -```go -stream := db.NewStream() -// db.NewStreamAt(readTs) for managed mode. - -// -- Optional settings -stream.NumGo = 16 // Set number of goroutines to use for iteration. -stream.Prefix = []byte("some-prefix") // Leave nil for iteration over the whole DB. -stream.LogPrefix = "Badger.Streaming" // For identifying stream logs. Outputs to Logger. - -// ChooseKey is called concurrently for every key. If left nil, assumes true by default. -stream.ChooseKey = func(item *badger.Item) bool { - return bytes.HasSuffix(item.Key(), []byte("er")) -} - -// KeyToList is called concurrently for chosen keys. This can be used to convert -// Badger data into custom key-values. If nil, uses stream.ToList, a default -// implementation, which picks all valid key-values. -stream.KeyToList = nil - -// -- End of optional settings. - -// Send is called serially, while Stream.Orchestrate is running. -stream.Send = func(list *pb.KVList) error { - return proto.MarshalText(w, list) // Write to w. -} - -// Run the stream -if err := stream.Orchestrate(context.Background()); err != nil { - return err -} -// Done. -``` - -### Garbage Collection -Badger values need to be garbage collected, because of two reasons: - -* Badger keeps values separately from the LSM tree. This means that the compaction operations -that clean up the LSM tree do not touch the values at all. Values need to be cleaned up -separately. - -* Concurrent read/write transactions could leave behind multiple values for a single key, because they -are stored with different versions. These could accumulate, and take up unneeded space beyond the -time these older versions are needed. - -Badger relies on the client to perform garbage collection at a time of their choosing. It provides -the following method, which can be invoked at an appropriate time: - -* `DB.RunValueLogGC()`: This method is designed to do garbage collection while - Badger is online. Along with randomly picking a file, it uses statistics generated by the - LSM-tree compactions to pick files that are likely to lead to maximum space - reclamation. It is recommended to be called during periods of low activity in - your system, or periodically. One call would only result in removal of at max - one log file. As an optimization, you could also immediately re-run it whenever - it returns nil error (indicating a successful value log GC), as shown below. - - ```go - ticker := time.NewTicker(5 * time.Minute) - defer ticker.Stop() - for range ticker.C { - again: - err := db.RunValueLogGC(0.7) - if err == nil { - goto again - } - } - ``` - -* `DB.PurgeOlderVersions()`: This method is **DEPRECATED** since v1.5.0. Now, Badger's LSM tree automatically discards older/invalid versions of keys. - -**Note: The RunValueLogGC method would not garbage collect the latest value log.** - -### Database backup -There are two public API methods `DB.Backup()` and `DB.Load()` which can be -used to do online backups and restores. Badger v0.9 provides a CLI tool -`badger`, which can do offline backup/restore. Make sure you have `$GOPATH/bin` -in your PATH to use this tool. - -The command below will create a version-agnostic backup of the database, to a -file `badger.bak` in the current working directory - -``` -badger backup --dir -``` - -To restore `badger.bak` in the current working directory to a new database: - -``` -badger restore --dir -``` - -See `badger --help` for more details. - -If you have a Badger database that was created using v0.8 (or below), you can -use the `badger_backup` tool provided in v0.8.1, and then restore it using the -command above to upgrade your database to work with the latest version. - -``` -badger_backup --dir --backup-file badger.bak -``` - -We recommend all users to use the `Backup` and `Restore` APIs and tools. However, -Badger is also rsync-friendly because all files are immutable, barring the -latest value log which is append-only. So, rsync can be used as rudimentary way -to perform a backup. In the following script, we repeat rsync to ensure that the -LSM tree remains consistent with the MANIFEST file while doing a full backup. - -``` -#!/bin/bash -set -o history -set -o histexpand -# Makes a complete copy of a Badger database directory. -# Repeat rsync if the MANIFEST and SSTables are updated. -rsync -avz --delete db/ dst -while !! | grep -q "(MANIFEST\|\.sst)$"; do :; done -``` - -### Memory usage -Badger's memory usage can be managed by tweaking several options available in -the `Options` struct that is passed in when opening the database using -`DB.Open`. - -- `Options.ValueLogLoadingMode` can be set to `options.FileIO` (instead of the - default `options.MemoryMap`) to avoid memory-mapping log files. This can be - useful in environments with low RAM. -- Number of memtables (`Options.NumMemtables`) - - If you modify `Options.NumMemtables`, also adjust `Options.NumLevelZeroTables` and - `Options.NumLevelZeroTablesStall` accordingly. -- Number of concurrent compactions (`Options.NumCompactors`) -- Mode in which LSM tree is loaded (`Options.TableLoadingMode`) -- Size of table (`Options.MaxTableSize`) -- Size of value log file (`Options.ValueLogFileSize`) - -If you want to decrease the memory usage of Badger instance, tweak these -options (ideally one at a time) until you achieve the desired -memory usage. - -### Statistics -Badger records metrics using the [expvar] package, which is included in the Go -standard library. All the metrics are documented in [y/metrics.go][metrics] -file. - -`expvar` package adds a handler in to the default HTTP server (which has to be -started explicitly), and serves up the metrics at the `/debug/vars` endpoint. -These metrics can then be collected by a system like [Prometheus], to get -better visibility into what Badger is doing. - -[expvar]: https://golang.org/pkg/expvar/ -[metrics]: https://github.com/dgraph-io/badger/blob/master/y/metrics.go -[Prometheus]: https://prometheus.io/ - -## Resources - -### Blog Posts -1. [Introducing Badger: A fast key-value store written natively in -Go](https://open.dgraph.io/post/badger/) -2. [Make Badger crash resilient with ALICE](https://blog.dgraph.io/post/alice/) -3. [Badger vs LMDB vs BoltDB: Benchmarking key-value databases in Go](https://blog.dgraph.io/post/badger-lmdb-boltdb/) -4. [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) - -## Design -Badger was written with these design goals in mind: - -- Write a key-value database in pure Go. -- Use latest research to build the fastest KV database for data sets spanning terabytes. -- Optimize for SSDs. - -Badger’s design is based on a paper titled _[WiscKey: Separating Keys from -Values in SSD-conscious Storage][wisckey]_. - -[wisckey]: https://www.usenix.org/system/files/conference/fast16/fast16-papers-lu.pdf - -### Comparisons -| Feature | Badger | RocksDB | BoltDB | -| ------- | ------ | ------- | ------ | -| Design | LSM tree with value log | LSM tree only | B+ tree | -| High Read throughput | Yes | No | Yes | -| High Write throughput | Yes | Yes | No | -| Designed for SSDs | Yes (with latest research 1) | Not specifically 2 | No | -| Embeddable | Yes | Yes | Yes | -| Sorted KV access | Yes | Yes | Yes | -| Pure Go (no Cgo) | Yes | No | Yes | -| Transactions | Yes, ACID, concurrent with SSI3 | Yes (but non-ACID) | Yes, ACID | -| Snapshots | Yes | Yes | Yes | -| TTL support | Yes | Yes | No | -| 3D access (key-value-version) | Yes4 | No | No | - -1 The [WISCKEY paper][wisckey] (on which Badger is based) saw big -wins with separating values from keys, significantly reducing the write -amplification compared to a typical LSM tree. - -2 RocksDB is an SSD optimized version of LevelDB, which was designed specifically for rotating disks. -As such RocksDB's design isn't aimed at SSDs. - -3 SSI: Serializable Snapshot Isolation. For more details, see the blog post [Concurrent ACID Transactions in Badger](https://blog.dgraph.io/post/badger-txn/) - -4 Badger provides direct access to value versions via its Iterator API. -Users can also specify how many versions to keep per key via Options. - -### Benchmarks -We have run comprehensive benchmarks against RocksDB, Bolt and LMDB. The -benchmarking code, and the detailed logs for the benchmarks can be found in the -[badger-bench] repo. More explanation, including graphs can be found the blog posts (linked -above). - -[badger-bench]: https://github.com/dgraph-io/badger-bench - -## Projects Using Badger -Below is a list of known projects that use Badger: - -* [Dgraph](https://github.com/dgraph-io/dgraph) - Distributed graph database. -* [Jaeger](https://github.com/jaegertracing/jaeger) - Distributed tracing platform. -* [go-ipfs](https://github.com/ipfs/go-ipfs) - Go client for the InterPlanetary File System (IPFS), a new hypermedia distribution protocol. -* [Riot](https://github.com/go-ego/riot) - An open-source, distributed search engine. -* [emitter](https://github.com/emitter-io/emitter) - Scalable, low latency, distributed pub/sub broker with message storage, uses MQTT, gossip and badger. -* [OctoSQL](https://github.com/cube2222/octosql) - Query tool that allows you to join, analyse and transform data from multiple databases using SQL. -* [Dkron](https://dkron.io/) - Distributed, fault tolerant job scheduling system. -* [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue. -* [TalariaDB](https://github.com/grab/talaria) - Grab's Distributed, low latency time-series database. -* [Sloop](https://github.com/salesforce/sloop) - Salesforce's Kubernetes History Visualization Project. -* [Immudb](https://github.com/codenotary/immudb) - Lightweight, high-speed immutable database for systems and applications. -* [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger. -* [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go. -* [0-stor](https://github.com/zero-os/0-stor) - Single device object store. -* [Dispatch Protocol](https://github.com/dispatchlabs/disgo) - Blockchain protocol for distributed application data analytics. -* [GarageMQ](https://github.com/valinurovam/garagemq) - AMQP server written in Go. -* [RedixDB](https://alash3al.github.io/redix/) - A real-time persistent key-value store with the same redis protocol. -* [BBVA](https://github.com/BBVA/raft-badger) - Raft backend implementation using BadgerDB for Hashicorp raft. -* [Fantom](https://github.com/Fantom-foundation/go-lachesis) - aBFT Consensus platform for distributed applications. -* [decred](https://github.com/decred/dcrdata) - An open, progressive, and self-funding cryptocurrency with a system of community-based governance integrated into its blockchain. -* [OpenNetSys](https://github.com/opennetsys/c3-go) - Create useful dApps in any software language. -* [HoneyTrap](https://github.com/honeytrap/honeytrap) - An extensible and opensource system for running, monitoring and managing honeypots. -* [Insolar](https://github.com/insolar/insolar) - Enterprise-ready blockchain platform. -* [IoTeX](https://github.com/iotexproject/iotex-core) - The next generation of the decentralized network for IoT powered by scalability- and privacy-centric blockchains. -* [go-sessions](https://github.com/kataras/go-sessions) - The sessions manager for Go net/http and fasthttp. -* [Babble](https://github.com/mosaicnetworks/babble) - BFT Consensus platform for distributed applications. -* [Tormenta](https://github.com/jpincas/tormenta) - Embedded object-persistence layer / simple JSON database for Go projects. -* [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger -* [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB -* [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger. -* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft. -* [Volument](https://volument.com/) - A new take on website analytics backed by Badger. -* [KVdb](https://kvdb.io/) - Hosted key-value store and serverless platform built on top of Badger. - -If you are using Badger in a project please send a pull request to add it to the list. - -## Contributing - -If you're interested in contributing to Badger see [CONTRIBUTING.md](./CONTRIBUTING.md). - -## Frequently Asked Questions -### My writes are getting stuck. Why? - -**Update: With the new `Value(func(v []byte))` API, this deadlock can no longer -happen.** - -The following is true for users on Badger v1.x. - -This can happen if a long running iteration with `Prefetch` is set to false, but -a `Item::Value` call is made internally in the loop. That causes Badger to -acquire read locks over the value log files to avoid value log GC removing the -file from underneath. As a side effect, this also blocks a new value log GC -file from being created, when the value log file boundary is hit. - -Please see Github issues [#293](https://github.com/dgraph-io/badger/issues/293) -and [#315](https://github.com/dgraph-io/badger/issues/315). - -There are multiple workarounds during iteration: - -1. Use `Item::ValueCopy` instead of `Item::Value` when retrieving value. -1. Set `Prefetch` to true. Badger would then copy over the value and release the - file lock immediately. -1. When `Prefetch` is false, don't call `Item::Value` and do a pure key-only - iteration. This might be useful if you just want to delete a lot of keys. -1. Do the writes in a separate transaction after the reads. - -### My writes are really slow. Why? - -Are you creating a new transaction for every single key update, and waiting for -it to `Commit` fully before creating a new one? This will lead to very low -throughput. - -We have created `WriteBatch` API which provides a way to batch up -many updates into a single transaction and `Commit` that transaction using -callbacks to avoid blocking. This amortizes the cost of a transaction really -well, and provides the most efficient way to do bulk writes. - -```go -wb := db.NewWriteBatch() -defer wb.Cancel() - -for i := 0; i < N; i++ { - err := wb.Set(key(i), value(i), 0) // Will create txns as needed. - handle(err) -} -handle(wb.Flush()) // Wait for all txns to finish. -``` - -Note that `WriteBatch` API does not allow any reads. For read-modify-write -workloads, you should be using the `Transaction` API. - -### I don't see any disk writes. Why? - -If you're using Badger with `SyncWrites=false`, then your writes might not be written to value log -and won't get synced to disk immediately. Writes to LSM tree are done inmemory first, before they -get compacted to disk. The compaction would only happen once `MaxTableSize` has been reached. So, if -you're doing a few writes and then checking, you might not see anything on disk. Once you `Close` -the database, you'll see these writes on disk. - -### Reverse iteration doesn't give me the right results. - -Just like forward iteration goes to the first key which is equal or greater than the SEEK key, reverse iteration goes to the first key which is equal or lesser than the SEEK key. Therefore, SEEK key would not be part of the results. You can typically add a `0xff` byte as a suffix to the SEEK key to include it in the results. See the following issues: [#436](https://github.com/dgraph-io/badger/issues/436) and [#347](https://github.com/dgraph-io/badger/issues/347). - -### Which instances should I use for Badger? - -We recommend using instances which provide local SSD storage, without any limit -on the maximum IOPS. In AWS, these are storage optimized instances like i3. They -provide local SSDs which clock 100K IOPS over 4KB blocks easily. - -### I'm getting a closed channel error. Why? - -``` -panic: close of closed channel -panic: send on closed channel -``` - -If you're seeing panics like above, this would be because you're operating on a closed DB. This can happen, if you call `Close()` before sending a write, or multiple times. You should ensure that you only call `Close()` once, and all your read/write operations finish before closing. - -### Are there any Go specific settings that I should use? - -We *highly* recommend setting a high number for `GOMAXPROCS`, which allows Go to -observe the full IOPS throughput provided by modern SSDs. In Dgraph, we have set -it to 128. For more details, [see this -thread](https://groups.google.com/d/topic/golang-nuts/jPb_h3TvlKE/discussion). - -### Are there any Linux specific settings that I should use? - -We recommend setting `max file descriptors` to a high number depending upon the expected size of -your data. On Linux and Mac, you can check the file descriptor limit with `ulimit -n -H` for the -hard limit and `ulimit -n -S` for the soft limit. A soft limit of `65535` is a good lower bound. -You can adjust the limit as needed. - -### I see "manifest has unsupported version: X (we support Y)" error. - -This error means you have a badger directory which was created by an older version of badger and -you're trying to open in a newer version of badger. The underlying data format can change across -badger versions and users will have to migrate their data directory. -Badger data can be migrated from version X of badger to version Y of badger by following the steps -listed below. -Assume you were on badger v1.6.0 and you wish to migrate to v2.0.0 version. -1. Install badger version v1.6.0 - - `cd $GOPATH/src/github.com/dgraph-io/badger` - - `git checkout v1.6.0` - - `cd badger && go install` - - This should install the old badger binary in your $GOBIN. -2. Create Backup - - `badger backup --dir path/to/badger/directory -f badger.backup` -3. Install badger version v2.0.0 - - `cd $GOPATH/src/github.com/dgraph-io/badger` - - `git checkout v2.0.0` - - `cd badger && go install` - - This should install new badger binary in your $GOBIN -4. Install badger version v2.0.0 - - `badger restore --dir path/to/new/badger/directory -f badger.backup` - - This will create a new directory on `path/to/new/badger/directory` and add badger data in - newer format to it. - -NOTE - The above steps shouldn't cause any data loss but please ensure the new data is valid before -deleting the old badger directory. - -### Why do I need gcc to build badger? Does badger need CGO? - -Badger does not directly use CGO but it relies on https://github.com/DataDog/zstd library for -zstd compression and the library requires `gcc/cgo`. You can build badger without cgo by running -`CGO_ENABLED=0 go build`. This will build badger without the support for ZSTD compression algorithm. - -## Contact -- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. -- Please use [Github issue tracker](https://github.com/dgraph-io/badger/issues) for filing bugs or feature requests. -- Join [![Slack Status](http://slack.dgraph.io/badge.svg)](http://slack.dgraph.io). -- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). - diff --git a/vendor/github.com/dgraph-io/badger/v2/VERSIONING.md b/vendor/github.com/dgraph-io/badger/v2/VERSIONING.md deleted file mode 100644 index a890a36f..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/VERSIONING.md +++ /dev/null @@ -1,47 +0,0 @@ -# Serialization Versioning: Semantic Versioning for databases - -Semantic Versioning, commonly known as SemVer, is a great idea that has been very widely adopted as -a way to decide how to name software versions. The whole concept is very well summarized on -semver.org with the following lines: - -> Given a version number MAJOR.MINOR.PATCH, increment the: -> -> 1. MAJOR version when you make incompatible API changes, -> 2. MINOR version when you add functionality in a backwards-compatible manner, and -> 3. PATCH version when you make backwards-compatible bug fixes. -> -> Additional labels for pre-release and build metadata are available as extensions to the -> MAJOR.MINOR.PATCH format. - -Unfortunately, API changes are not the most important changes for libraries that serialize data for -later consumption. For these libraries, such as BadgerDB, changes to the API are much easier to -handle than change to the data format used to store data on disk. - -## Serialization Version specification - -Serialization Versioning, like Semantic Versioning, uses 3 numbers and also calls them -MAJOR.MINOR.PATCH, but the semantics of the numbers are slightly modified: - -Given a version number MAJOR.MINOR.PATCH, increment the: - -- MAJOR version when you make changes that require a transformation of the dataset before it can be -used again. -- MINOR version when old datasets are still readable but the API might have changed in -backwards-compatible or incompatible ways. -- PATCH version when you make backwards-compatible bug fixes. - -Additional labels for pre-release and build metadata are available as extensions to the -MAJOR.MINOR.PATCH format. - -Following this naming strategy, migration from v1.x to v2.x requires a migration strategy for your -existing dataset, and as such has to be carefully planned. Migrations in between different minor -versions (e.g. v1.5.x and v1.6.x) might break your build, as the API *might* have changed, but once -your code compiles there's no need for any data migration. Lastly, changes in between two different -patch versions should never break your build or dataset. - -For more background on our decision to adopt Serialization Versioning, read the blog post -[Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on -[this comment on Dgraph's Discuss forum][discuss]. - -[blog]: https://blog.dgraph.io/post/serialization-versioning/ -[discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7 \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/v2/appveyor.yml b/vendor/github.com/dgraph-io/badger/v2/appveyor.yml deleted file mode 100644 index ac3a9505..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/appveyor.yml +++ /dev/null @@ -1,50 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Platform. -platform: x64 - -clone_folder: c:\gopath\src\github.com\dgraph-io\badger - -# Environment variables -environment: - GOVERSION: 1.12 - GOPATH: c:\gopath - GO111MODULE: on - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;c:\msys64\mingw64\bin;%PATH% - - go version - - go env - - python --version - - gcc --version - -# To run your custom scripts instead of automatic MSBuild -build_script: - # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 - - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' - - cd c:\gopath\src\github.com\dgraph-io\badger - - git branch - - go get -t ./... - -# To run your custom scripts instead of automatic tests -test_script: - # Unit tests - - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - - go test -v github.com/dgraph-io/badger/... - - go test -v -vlog_mmap=false github.com/dgraph-io/badger/... - - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - -notifications: - - provider: Email - to: - - pawan@dgraph.io - on_build_failure: true - on_build_status_changed: true -# to disable deployment -deploy: off - diff --git a/vendor/github.com/dgraph-io/badger/v2/backup.go b/vendor/github.com/dgraph-io/badger/v2/backup.go deleted file mode 100644 index 3c1b7592..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/backup.go +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "io" - - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" - "github.com/golang/protobuf/proto" -) - -// flushThreshold determines when a buffer will be flushed. When performing a -// backup/restore, the entries will be batched up until the total size of batch -// is more than flushThreshold or entry size (without the value size) is more -// than the maxBatchSize. -const flushThreshold = 100 << 20 - -// Backup is a wrapper function over Stream.Backup to generate full and incremental backups of the -// DB. For more control over how many goroutines are used to generate the backup, or if you wish to -// backup only a certain range of keys, use Stream.Backup directly. -func (db *DB) Backup(w io.Writer, since uint64) (uint64, error) { - stream := db.NewStream() - stream.LogPrefix = "DB.Backup" - return stream.Backup(w, since) -} - -// Backup dumps a protobuf-encoded list of all entries in the database into the -// given writer, that are newer than the specified version. It returns a -// timestamp indicating when the entries were dumped which can be passed into a -// later invocation to generate an incremental dump, of entries that have been -// added/modified since the last invocation of Stream.Backup(). -// -// This can be used to backup the data in a database at a given point in time. -func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) { - stream.KeyToList = func(key []byte, itr *Iterator) (*pb.KVList, error) { - list := &pb.KVList{} - for ; itr.Valid(); itr.Next() { - item := itr.Item() - if !bytes.Equal(item.Key(), key) { - return list, nil - } - if item.Version() < since { - // Ignore versions less than given timestamp, or skip older - // versions of the given key. - return list, nil - } - - var valCopy []byte - if !item.IsDeletedOrExpired() { - // No need to copy value, if item is deleted or expired. - var err error - valCopy, err = item.ValueCopy(nil) - if err != nil { - stream.db.opt.Errorf("Key [%x, %d]. Error while fetching value [%v]\n", - item.Key(), item.Version(), err) - return nil, err - } - } - - // clear txn bits - meta := item.meta &^ (bitTxn | bitFinTxn) - kv := &pb.KV{ - Key: item.KeyCopy(nil), - Value: valCopy, - UserMeta: []byte{item.UserMeta()}, - Version: item.Version(), - ExpiresAt: item.ExpiresAt(), - Meta: []byte{meta}, - } - list.Kv = append(list.Kv, kv) - - switch { - case item.DiscardEarlierVersions(): - // If we need to discard earlier versions of this item, add a delete - // marker just below the current version. - list.Kv = append(list.Kv, &pb.KV{ - Key: item.KeyCopy(nil), - Version: item.Version() - 1, - Meta: []byte{bitDelete}, - }) - return list, nil - - case item.IsDeletedOrExpired(): - return list, nil - } - } - return list, nil - } - - var maxVersion uint64 - stream.Send = func(list *pb.KVList) error { - for _, kv := range list.Kv { - if maxVersion < kv.Version { - maxVersion = kv.Version - } - } - return writeTo(list, w) - } - - if err := stream.Orchestrate(context.Background()); err != nil { - return 0, err - } - return maxVersion, nil -} - -func writeTo(list *pb.KVList, w io.Writer) error { - if err := binary.Write(w, binary.LittleEndian, uint64(proto.Size(list))); err != nil { - return err - } - buf, err := proto.Marshal(list) - if err != nil { - return err - } - _, err = w.Write(buf) - return err -} - -// KVLoader is used to write KVList objects in to badger. It can be used to restore a backup. -type KVLoader struct { - db *DB - throttle *y.Throttle - entries []*Entry - entriesSize int64 - totalSize int64 -} - -// NewKVLoader returns a new instance of KVLoader. -func (db *DB) NewKVLoader(maxPendingWrites int) *KVLoader { - return &KVLoader{ - db: db, - throttle: y.NewThrottle(maxPendingWrites), - entries: make([]*Entry, 0, db.opt.maxBatchCount), - } -} - -// Set writes the key-value pair to the database. -func (l *KVLoader) Set(kv *pb.KV) error { - var userMeta, meta byte - if len(kv.UserMeta) > 0 { - userMeta = kv.UserMeta[0] - } - if len(kv.Meta) > 0 { - meta = kv.Meta[0] - } - e := &Entry{ - Key: y.KeyWithTs(kv.Key, kv.Version), - Value: kv.Value, - UserMeta: userMeta, - ExpiresAt: kv.ExpiresAt, - meta: meta, - } - estimatedSize := int64(e.estimateSize(l.db.opt.ValueThreshold)) - // Flush entries if inserting the next entry would overflow the transactional limits. - if int64(len(l.entries))+1 >= l.db.opt.maxBatchCount || - l.entriesSize+estimatedSize >= l.db.opt.maxBatchSize || - l.totalSize >= flushThreshold { - if err := l.send(); err != nil { - return err - } - } - l.entries = append(l.entries, e) - l.entriesSize += estimatedSize - l.totalSize += estimatedSize + int64(len(e.Value)) - return nil -} - -func (l *KVLoader) send() error { - if err := l.throttle.Do(); err != nil { - return err - } - if err := l.db.batchSetAsync(l.entries, func(err error) { - l.throttle.Done(err) - }); err != nil { - return err - } - - l.entries = make([]*Entry, 0, l.db.opt.maxBatchCount) - l.entriesSize = 0 - l.totalSize = 0 - return nil -} - -// Finish is meant to be called after all the key-value pairs have been loaded. -func (l *KVLoader) Finish() error { - if len(l.entries) > 0 { - if err := l.send(); err != nil { - return err - } - } - return l.throttle.Finish() -} - -// Load reads a protobuf-encoded list of all entries from a reader and writes -// them to the database. This can be used to restore the database from a backup -// made by calling DB.Backup(). If more complex logic is needed to restore a badger -// backup, the KVLoader interface should be used instead. -// -// DB.Load() should be called on a database that is not running any other -// concurrent transactions while it is running. -func (db *DB) Load(r io.Reader, maxPendingWrites int) error { - br := bufio.NewReaderSize(r, 16<<10) - unmarshalBuf := make([]byte, 1<<10) - - ldr := db.NewKVLoader(maxPendingWrites) - for { - var sz uint64 - err := binary.Read(br, binary.LittleEndian, &sz) - if err == io.EOF { - break - } else if err != nil { - return err - } - - if cap(unmarshalBuf) < int(sz) { - unmarshalBuf = make([]byte, sz) - } - - if _, err = io.ReadFull(br, unmarshalBuf[:sz]); err != nil { - return err - } - - list := &pb.KVList{} - if err := proto.Unmarshal(unmarshalBuf[:sz], list); err != nil { - return err - } - - for _, kv := range list.Kv { - if err := ldr.Set(kv); err != nil { - return err - } - - // Update nextTxnTs, memtable stores this - // timestamp in badger head when flushed. - if kv.Version >= db.orc.nextTxnTs { - db.orc.nextTxnTs = kv.Version + 1 - } - } - } - - if err := ldr.Finish(); err != nil { - return err - } - db.orc.txnMark.Done(db.orc.nextTxnTs - 1) - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/batch.go b/vendor/github.com/dgraph-io/badger/v2/batch.go deleted file mode 100644 index ff94e861..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/batch.go +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright 2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "sync" - - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" -) - -// WriteBatch holds the necessary info to perform batched writes. -type WriteBatch struct { - sync.Mutex - txn *Txn - db *DB - throttle *y.Throttle - err error - - isManaged bool - commitTs uint64 -} - -// NewWriteBatch creates a new WriteBatch. This provides a way to conveniently do a lot of writes, -// batching them up as tightly as possible in a single transaction and using callbacks to avoid -// waiting for them to commit, thus achieving good performance. This API hides away the logic of -// creating and committing transactions. Due to the nature of SSI guaratees provided by Badger, -// blind writes can never encounter transaction conflicts (ErrConflict). -func (db *DB) NewWriteBatch() *WriteBatch { - if db.opt.managedTxns { - panic("cannot use NewWriteBatch in managed mode. Use NewWriteBatchAt instead") - } - return db.newWriteBatch(false) -} - -func (db *DB) newWriteBatch(isManaged bool) *WriteBatch { - return &WriteBatch{ - db: db, - isManaged: isManaged, - txn: db.newTransaction(true, isManaged), - throttle: y.NewThrottle(16), - } -} - -// SetMaxPendingTxns sets a limit on maximum number of pending transactions while writing batches. -// This function should be called before using WriteBatch. Default value of MaxPendingTxns is -// 16 to minimise memory usage. -func (wb *WriteBatch) SetMaxPendingTxns(max int) { - wb.throttle = y.NewThrottle(max) -} - -// Cancel function must be called if there's a chance that Flush might not get -// called. If neither Flush or Cancel is called, the transaction oracle would -// never get a chance to clear out the row commit timestamp map, thus causing an -// unbounded memory consumption. Typically, you can call Cancel as a defer -// statement right after NewWriteBatch is called. -// -// Note that any committed writes would still go through despite calling Cancel. -func (wb *WriteBatch) Cancel() { - if err := wb.throttle.Finish(); err != nil { - wb.db.opt.Errorf("WatchBatch.Cancel error while finishing: %v", err) - } - wb.txn.Discard() -} - -func (wb *WriteBatch) callback(err error) { - // sync.WaitGroup is thread-safe, so it doesn't need to be run inside wb.Lock. - defer wb.throttle.Done(err) - if err == nil { - return - } - - wb.Lock() - defer wb.Unlock() - if wb.err != nil { - return - } - wb.err = err -} - -func (wb *WriteBatch) Write(kvList *pb.KVList) error { - wb.Lock() - defer wb.Unlock() - for _, kv := range kvList.Kv { - e := Entry{Key: kv.Key, Value: kv.Value} - if len(kv.UserMeta) > 0 { - e.UserMeta = kv.UserMeta[0] - } - y.AssertTrue(kv.Version != 0) - e.version = kv.Version - if err := wb.handleEntry(&e); err != nil { - return err - } - } - return nil -} - -// SetEntryAt is the equivalent of Txn.SetEntry but it also allows setting version for the entry. -// SetEntryAt can be used only in managed mode. -func (wb *WriteBatch) SetEntryAt(e *Entry, ts uint64) error { - if !wb.db.opt.managedTxns { - return errors.New("SetEntryAt can only be used in managed mode. Use SetEntry instead") - } - e.version = ts - return wb.SetEntry(e) -} - -// Should be called with lock acquired. -func (wb *WriteBatch) handleEntry(e *Entry) error { - if err := wb.txn.SetEntry(e); err != ErrTxnTooBig { - return err - } - // Txn has reached it's zenith. Commit now. - if cerr := wb.commit(); cerr != nil { - return cerr - } - // This time the error must not be ErrTxnTooBig, otherwise, we make the - // error permanent. - if err := wb.txn.SetEntry(e); err != nil { - wb.err = err - return err - } - return nil -} - -// SetEntry is the equivalent of Txn.SetEntry. -func (wb *WriteBatch) SetEntry(e *Entry) error { - wb.Lock() - defer wb.Unlock() - return wb.handleEntry(e) -} - -// Set is equivalent of Txn.Set(). -func (wb *WriteBatch) Set(k, v []byte) error { - e := &Entry{Key: k, Value: v} - return wb.SetEntry(e) -} - -// DeleteAt is equivalent of Txn.Delete but accepts a delete timestamp. -func (wb *WriteBatch) DeleteAt(k []byte, ts uint64) error { - e := Entry{Key: k, meta: bitDelete, version: ts} - return wb.SetEntry(&e) -} - -// Delete is equivalent of Txn.Delete. -func (wb *WriteBatch) Delete(k []byte) error { - wb.Lock() - defer wb.Unlock() - - if err := wb.txn.Delete(k); err != ErrTxnTooBig { - return err - } - if err := wb.commit(); err != nil { - return err - } - if err := wb.txn.Delete(k); err != nil { - wb.err = err - return err - } - return nil -} - -// Caller to commit must hold a write lock. -func (wb *WriteBatch) commit() error { - if wb.err != nil { - return wb.err - } - if err := wb.throttle.Do(); err != nil { - return err - } - wb.txn.CommitWith(wb.callback) - wb.txn = wb.db.newTransaction(true, wb.isManaged) - wb.txn.commitTs = wb.commitTs - return wb.err -} - -// Flush must be called at the end to ensure that any pending writes get committed to Badger. Flush -// returns any error stored by WriteBatch. -func (wb *WriteBatch) Flush() error { - wb.Lock() - _ = wb.commit() - wb.txn.Discard() - wb.Unlock() - - if err := wb.throttle.Finish(); err != nil { - return err - } - - return wb.err -} - -// Error returns any errors encountered so far. No commits would be run once an error is detected. -func (wb *WriteBatch) Error() error { - wb.Lock() - defer wb.Unlock() - return wb.err -} diff --git a/vendor/github.com/dgraph-io/badger/v2/compaction.go b/vendor/github.com/dgraph-io/badger/v2/compaction.go deleted file mode 100644 index 0372b8b7..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/compaction.go +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "fmt" - "log" - "math" - "sync" - - "golang.org/x/net/trace" - - "github.com/dgraph-io/badger/v2/table" - "github.com/dgraph-io/badger/v2/y" -) - -type keyRange struct { - left []byte - right []byte - inf bool -} - -var infRange = keyRange{inf: true} - -func (r keyRange) String() string { - return fmt.Sprintf("[left=%x, right=%x, inf=%v]", r.left, r.right, r.inf) -} - -func (r keyRange) equals(dst keyRange) bool { - return bytes.Equal(r.left, dst.left) && - bytes.Equal(r.right, dst.right) && - r.inf == dst.inf -} - -func (r keyRange) overlapsWith(dst keyRange) bool { - if r.inf || dst.inf { - return true - } - - // If my left is greater than dst right, we have no overlap. - if y.CompareKeys(r.left, dst.right) > 0 { - return false - } - // If my right is less than dst left, we have no overlap. - if y.CompareKeys(r.right, dst.left) < 0 { - return false - } - // We have overlap. - return true -} - -func getKeyRange(tables ...*table.Table) keyRange { - if len(tables) == 0 { - return keyRange{} - } - smallest := tables[0].Smallest() - biggest := tables[0].Biggest() - for i := 1; i < len(tables); i++ { - if y.CompareKeys(tables[i].Smallest(), smallest) < 0 { - smallest = tables[i].Smallest() - } - if y.CompareKeys(tables[i].Biggest(), biggest) > 0 { - biggest = tables[i].Biggest() - } - } - - // We pick all the versions of the smallest and the biggest key. Note that version zero would - // be the rightmost key, considering versions are default sorted in descending order. - return keyRange{ - left: y.KeyWithTs(y.ParseKey(smallest), math.MaxUint64), - right: y.KeyWithTs(y.ParseKey(biggest), 0), - } -} - -type levelCompactStatus struct { - ranges []keyRange - delSize int64 -} - -func (lcs *levelCompactStatus) debug() string { - var b bytes.Buffer - for _, r := range lcs.ranges { - b.WriteString(r.String()) - } - return b.String() -} - -func (lcs *levelCompactStatus) overlapsWith(dst keyRange) bool { - for _, r := range lcs.ranges { - if r.overlapsWith(dst) { - return true - } - } - return false -} - -func (lcs *levelCompactStatus) remove(dst keyRange) bool { - final := lcs.ranges[:0] - var found bool - for _, r := range lcs.ranges { - if !r.equals(dst) { - final = append(final, r) - } else { - found = true - } - } - lcs.ranges = final - return found -} - -type compactStatus struct { - sync.RWMutex - levels []*levelCompactStatus -} - -func (cs *compactStatus) toLog(tr trace.Trace) { - cs.RLock() - defer cs.RUnlock() - - tr.LazyPrintf("Compaction status:") - for i, l := range cs.levels { - if l.debug() == "" { - continue - } - tr.LazyPrintf("[%d] %s", i, l.debug()) - } -} - -func (cs *compactStatus) overlapsWith(level int, this keyRange) bool { - cs.RLock() - defer cs.RUnlock() - - thisLevel := cs.levels[level] - return thisLevel.overlapsWith(this) -} - -func (cs *compactStatus) delSize(l int) int64 { - cs.RLock() - defer cs.RUnlock() - return cs.levels[l].delSize -} - -type thisAndNextLevelRLocked struct{} - -// compareAndAdd will check whether we can run this compactDef. That it doesn't overlap with any -// other running compaction. If it can be run, it would store this run in the compactStatus state. -func (cs *compactStatus) compareAndAdd(_ thisAndNextLevelRLocked, cd compactDef) bool { - cs.Lock() - defer cs.Unlock() - - level := cd.thisLevel.level - - y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) - thisLevel := cs.levels[level] - nextLevel := cs.levels[level+1] - - if thisLevel.overlapsWith(cd.thisRange) { - return false - } - if nextLevel.overlapsWith(cd.nextRange) { - return false - } - // Check whether this level really needs compaction or not. Otherwise, we'll end up - // running parallel compactions for the same level. - // Update: We should not be checking size here. Compaction priority already did the size checks. - // Here we should just be executing the wish of others. - - thisLevel.ranges = append(thisLevel.ranges, cd.thisRange) - nextLevel.ranges = append(nextLevel.ranges, cd.nextRange) - thisLevel.delSize += cd.thisSize - return true -} - -func (cs *compactStatus) delete(cd compactDef) { - cs.Lock() - defer cs.Unlock() - - level := cd.thisLevel.level - y.AssertTruef(level < len(cs.levels)-1, "Got level %d. Max levels: %d", level, len(cs.levels)) - - thisLevel := cs.levels[level] - nextLevel := cs.levels[level+1] - - thisLevel.delSize -= cd.thisSize - found := thisLevel.remove(cd.thisRange) - found = nextLevel.remove(cd.nextRange) && found - - if !found { - this := cd.thisRange - next := cd.nextRange - fmt.Printf("Looking for: [%q, %q, %v] in this level.\n", this.left, this.right, this.inf) - fmt.Printf("This Level:\n%s\n", thisLevel.debug()) - fmt.Println() - fmt.Printf("Looking for: [%q, %q, %v] in next level.\n", next.left, next.right, next.inf) - fmt.Printf("Next Level:\n%s\n", nextLevel.debug()) - log.Fatal("keyRange not found") - } -} diff --git a/vendor/github.com/dgraph-io/badger/v2/db.go b/vendor/github.com/dgraph-io/badger/v2/db.go deleted file mode 100644 index cdb1f490..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/db.go +++ /dev/null @@ -1,1845 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "context" - "encoding/binary" - "expvar" - "fmt" - "math" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/v2/options" - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/skl" - "github.com/dgraph-io/badger/v2/table" - "github.com/dgraph-io/badger/v2/y" - "github.com/dgraph-io/ristretto" - humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" -) - -var ( - badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger. - head = []byte("!badger!head") // For storing value offset for replay. - txnKey = []byte("!badger!txn") // For indicating end of entries in txn. - badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC. - lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats -) - -type closers struct { - updateSize *y.Closer - compactors *y.Closer - memtable *y.Closer - writes *y.Closer - valueGC *y.Closer - pub *y.Closer -} - -// DB provides the various functions required to interact with Badger. -// DB is thread-safe. -type DB struct { - sync.RWMutex // Guards list of inmemory tables, not individual reads and writes. - - dirLockGuard *directoryLockGuard - // nil if Dir and ValueDir are the same - valueDirGuard *directoryLockGuard - - closers closers - mt *skl.Skiplist // Our latest (actively written) in-memory table - imm []*skl.Skiplist // Add here only AFTER pushing to flushChan. - opt Options - manifest *manifestFile - lc *levelsController - vlog valueLog - vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt - writeCh chan *request - flushChan chan flushTask // For flushing memtables. - closeOnce sync.Once // For closing DB only once. - - // Number of log rotates since the last memtable flush. We will access this field via atomic - // functions. Since we are not going to use any 64bit atomic functions, there is no need for - // 64 bit alignment of this struct(see #311). - logRotates int32 - - blockWrites int32 - isClosed uint32 - - orc *oracle - - pub *publisher - registry *KeyRegistry - blockCache *ristretto.Cache - indexCache *ristretto.Cache -} - -const ( - kvWriteChCapacity = 1000 -) - -func (db *DB) replayFunction() func(Entry, valuePointer) error { - type txnEntry struct { - nk []byte - v y.ValueStruct - } - - var txn []txnEntry - var lastCommit uint64 - - toLSM := func(nk []byte, vs y.ValueStruct) { - for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() { - db.opt.Debugf("Replay: Making room for writes") - time.Sleep(10 * time.Millisecond) - } - db.mt.Put(nk, vs) - } - - first := true - return func(e Entry, vp valuePointer) error { // Function for replaying. - if first { - db.opt.Debugf("First key=%q\n", e.Key) - } - first = false - db.orc.Lock() - if db.orc.nextTxnTs < y.ParseTs(e.Key) { - db.orc.nextTxnTs = y.ParseTs(e.Key) - } - db.orc.Unlock() - - nk := make([]byte, len(e.Key)) - copy(nk, e.Key) - var nv []byte - meta := e.meta - if db.shouldWriteValueToLSM(e) { - nv = make([]byte, len(e.Value)) - copy(nv, e.Value) - } else { - nv = vp.Encode() - meta = meta | bitValuePointer - } - // Update vhead. If the crash happens while replay was in progess - // and the head is not updated, we will end up replaying all the - // files starting from file zero, again. - db.updateHead([]valuePointer{vp}) - - v := y.ValueStruct{ - Value: nv, - Meta: meta, - UserMeta: e.UserMeta, - ExpiresAt: e.ExpiresAt, - } - - switch { - case e.meta&bitFinTxn > 0: - txnTs, err := strconv.ParseUint(string(e.Value), 10, 64) - if err != nil { - return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value) - } - y.AssertTrue(lastCommit == txnTs) - y.AssertTrue(len(txn) > 0) - // Got the end of txn. Now we can store them. - for _, t := range txn { - toLSM(t.nk, t.v) - } - txn = txn[:0] - lastCommit = 0 - - case e.meta&bitTxn > 0: - txnTs := y.ParseTs(nk) - if lastCommit == 0 { - lastCommit = txnTs - } - if lastCommit != txnTs { - db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n", - lastCommit) - txn = txn[:0] - lastCommit = txnTs - } - te := txnEntry{nk: nk, v: v} - txn = append(txn, te) - - default: - // This entry is from a rewrite or via SetEntryAt(..). - toLSM(nk, v) - - // We shouldn't get this entry in the middle of a transaction. - y.AssertTrue(lastCommit == 0) - y.AssertTrue(len(txn) == 0) - } - return nil - } -} - -// Open returns a new DB object. -func Open(opt Options) (db *DB, err error) { - // It's okay to have zero compactors which will disable all compactions but - // we cannot have just one compactor otherwise we will end up with all data - // one level 2. - if opt.NumCompactors == 1 { - return nil, errors.New("Cannot have 1 compactor. Need at least 2") - } - if opt.InMemory && (opt.Dir != "" || opt.ValueDir != "") { - return nil, errors.New("Cannot use badger in Disk-less mode with Dir or ValueDir set") - } - opt.maxBatchSize = (15 * opt.MaxTableSize) / 100 - opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize) - - // We are limiting opt.ValueThreshold to maxValueThreshold for now. - if opt.ValueThreshold > maxValueThreshold { - return nil, errors.Errorf("Invalid ValueThreshold, must be less or equal to %d", - maxValueThreshold) - } - - // If ValueThreshold is greater than opt.maxBatchSize, we won't be able to push any data using - // the transaction APIs. Transaction batches entries into batches of size opt.maxBatchSize. - if int64(opt.ValueThreshold) > opt.maxBatchSize { - return nil, errors.Errorf("Valuethreshold greater than max batch size of %d. Either "+ - "reduce opt.ValueThreshold or increase opt.MaxTableSize.", opt.maxBatchSize) - } - if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) { - return nil, ErrValueLogSize - } - if !(opt.ValueLogLoadingMode == options.FileIO || - opt.ValueLogLoadingMode == options.MemoryMap) { - return nil, ErrInvalidLoadingMode - } - - // Keep L0 in memory if either KeepL0InMemory is set or if InMemory is set. - opt.KeepL0InMemory = opt.KeepL0InMemory || opt.InMemory - - // Compact L0 on close if either it is set or if KeepL0InMemory is set. When - // keepL0InMemory is set we need to compact L0 on close otherwise we might lose data. - opt.CompactL0OnClose = opt.CompactL0OnClose || opt.KeepL0InMemory - - if opt.ReadOnly { - // Can't truncate if the DB is read only. - opt.Truncate = false - // Do not perform compaction in read only mode. - opt.CompactL0OnClose = false - } - var dirLockGuard, valueDirLockGuard *directoryLockGuard - - // Create directories and acquire lock on it only if badger is not running in InMemory mode. - // We don't have any directories/files in InMemory mode so we don't need to acquire - // any locks on them. - if !opt.InMemory { - if err := createDirs(opt); err != nil { - return nil, err - } - if !opt.BypassLockGuard { - dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly) - if err != nil { - return nil, err - } - defer func() { - if dirLockGuard != nil { - _ = dirLockGuard.release() - } - }() - absDir, err := filepath.Abs(opt.Dir) - if err != nil { - return nil, err - } - absValueDir, err := filepath.Abs(opt.ValueDir) - if err != nil { - return nil, err - } - if absValueDir != absDir { - valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly) - if err != nil { - return nil, err - } - defer func() { - if valueDirLockGuard != nil { - _ = valueDirLockGuard.release() - } - }() - } - } - } - - manifestFile, manifest, err := openOrCreateManifestFile(opt) - if err != nil { - return nil, err - } - defer func() { - if manifestFile != nil { - _ = manifestFile.close() - } - }() - - db = &DB{ - imm: make([]*skl.Skiplist, 0, opt.NumMemtables), - flushChan: make(chan flushTask, opt.NumMemtables), - writeCh: make(chan *request, kvWriteChCapacity), - opt: opt, - manifest: manifestFile, - dirLockGuard: dirLockGuard, - valueDirGuard: valueDirLockGuard, - orc: newOracle(opt), - pub: newPublisher(), - } - // Cleanup all the goroutines started by badger in case of an error. - defer func() { - if err != nil { - db.cleanup() - db = nil - } - }() - - if opt.BlockCacheSize > 0 { - config := ristretto.Config{ - // Use 5% of cache memory for storing counters. - NumCounters: int64(float64(opt.BlockCacheSize) * 0.05 * 2), - MaxCost: int64(float64(opt.BlockCacheSize) * 0.95), - BufferItems: 64, - Metrics: true, - } - db.blockCache, err = ristretto.NewCache(&config) - if err != nil { - return nil, errors.Wrap(err, "failed to create data cache") - } - } - - if opt.IndexCacheSize > 0 { - config := ristretto.Config{ - // Use 5% of cache memory for storing counters. - NumCounters: int64(float64(opt.IndexCacheSize) * 0.05 * 2), - MaxCost: int64(float64(opt.IndexCacheSize) * 0.95), - BufferItems: 64, - Metrics: true, - } - db.indexCache, err = ristretto.NewCache(&config) - if err != nil { - return nil, errors.Wrap(err, "failed to create bf cache") - } - } - if db.opt.InMemory { - db.opt.SyncWrites = false - // If badger is running in memory mode, push everything into the LSM Tree. - db.opt.ValueThreshold = math.MaxInt32 - } - krOpt := KeyRegistryOptions{ - ReadOnly: opt.ReadOnly, - Dir: opt.Dir, - EncryptionKey: opt.EncryptionKey, - EncryptionKeyRotationDuration: opt.EncryptionKeyRotationDuration, - InMemory: opt.InMemory, - } - - if db.registry, err = OpenKeyRegistry(krOpt); err != nil { - return db, err - } - db.calculateSize() - db.closers.updateSize = y.NewCloser(1) - go db.updateSize(db.closers.updateSize) - db.mt = skl.NewSkiplist(arenaSize(opt)) - - // newLevelsController potentially loads files in directory. - if db.lc, err = newLevelsController(db, &manifest); err != nil { - return db, err - } - - // Initialize vlog struct. - db.vlog.init(db) - - if !opt.ReadOnly { - db.closers.compactors = y.NewCloser(1) - db.lc.startCompact(db.closers.compactors) - - db.closers.memtable = y.NewCloser(1) - go func() { - _ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up. - }() - } - - headKey := y.KeyWithTs(head, math.MaxUint64) - // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key - vs, err := db.get(headKey) - if err != nil { - return db, errors.Wrap(err, "Retrieving head") - } - db.orc.nextTxnTs = vs.Version - var vptr valuePointer - if len(vs.Value) > 0 { - vptr.Decode(vs.Value) - } - - replayCloser := y.NewCloser(1) - go db.doWrites(replayCloser) - - if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil { - replayCloser.SignalAndWait() - return db, y.Wrapf(err, "During db.vlog.open") - } - replayCloser.SignalAndWait() // Wait for replay to be applied first. - - // Let's advance nextTxnTs to one more than whatever we observed via - // replaying the logs. - db.orc.txnMark.Done(db.orc.nextTxnTs) - // In normal mode, we must update readMark so older versions of keys can be removed during - // compaction when run in offline mode via the flatten tool. - db.orc.readMark.Done(db.orc.nextTxnTs) - db.orc.incrementNextTs() - - db.closers.writes = y.NewCloser(1) - go db.doWrites(db.closers.writes) - - if !db.opt.InMemory { - db.closers.valueGC = y.NewCloser(1) - go db.vlog.waitOnGC(db.closers.valueGC) - } - - db.closers.pub = y.NewCloser(1) - go db.pub.listenForUpdates(db.closers.pub) - - valueDirLockGuard = nil - dirLockGuard = nil - manifestFile = nil - return db, nil -} - -// cleanup stops all the goroutines started by badger. This is used in open to -// cleanup goroutines in case of an error. -func (db *DB) cleanup() { - db.stopMemoryFlush() - db.stopCompactions() - - db.blockCache.Close() - db.indexCache.Close() - if db.closers.updateSize != nil { - db.closers.updateSize.Signal() - } - if db.closers.valueGC != nil { - db.closers.valueGC.Signal() - } - if db.closers.writes != nil { - db.closers.writes.Signal() - } - if db.closers.pub != nil { - db.closers.pub.Signal() - } - - db.orc.Stop() - - // Do not use vlog.Close() here. vlog.Close truncates the files. We don't - // want to truncate files unless the user has specified the truncate flag. - db.vlog.stopFlushDiscardStats() -} - -// BlockCacheMetrics returns the metrics for the underlying block cache. -func (db *DB) BlockCacheMetrics() *ristretto.Metrics { - if db.blockCache != nil { - return db.blockCache.Metrics - } - return nil -} - -// IndexCacheMetrics returns the metrics for the underlying index cache. -func (db *DB) IndexCacheMetrics() *ristretto.Metrics { - if db.indexCache != nil { - return db.indexCache.Metrics - } - return nil -} - -// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to -// disk. Calling DB.Close() multiple times would still only close the DB once. -func (db *DB) Close() error { - var err error - db.closeOnce.Do(func() { - err = db.close() - }) - return err -} - -// IsClosed denotes if the badger DB is closed or not. A DB instance should not -// be used after closing it. -func (db *DB) IsClosed() bool { - return atomic.LoadUint32(&db.isClosed) == 1 -} - -func (db *DB) close() (err error) { - db.opt.Debugf("Closing database") - - atomic.StoreInt32(&db.blockWrites, 1) - - if !db.opt.InMemory { - // Stop value GC first. - db.closers.valueGC.SignalAndWait() - } - - // Stop writes next. - db.closers.writes.SignalAndWait() - - // Don't accept any more write. - close(db.writeCh) - - db.closers.pub.SignalAndWait() - - // Now close the value log. - if vlogErr := db.vlog.Close(); vlogErr != nil { - err = errors.Wrap(vlogErr, "DB.Close") - } - - // Make sure that block writer is done pushing stuff into memtable! - // Otherwise, you will have a race condition: we are trying to flush memtables - // and remove them completely, while the block / memtable writer is still - // trying to push stuff into the memtable. This will also resolve the value - // offset problem: as we push into memtable, we update value offsets there. - if !db.mt.Empty() { - db.opt.Debugf("Flushing memtable") - for { - pushedFlushTask := func() bool { - db.Lock() - defer db.Unlock() - y.AssertTrue(db.mt != nil) - select { - case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}: - db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm. - db.mt = nil // Will segfault if we try writing! - db.opt.Debugf("pushed to flush chan\n") - return true - default: - // If we fail to push, we need to unlock and wait for a short while. - // The flushing operation needs to update s.imm. Otherwise, we have a deadlock. - // TODO: Think about how to do this more cleanly, maybe without any locks. - } - return false - }() - if pushedFlushTask { - break - } - time.Sleep(10 * time.Millisecond) - } - } - db.stopMemoryFlush() - db.stopCompactions() - - // Force Compact L0 - // We don't need to care about cstatus since no parallel compaction is running. - if db.opt.CompactL0OnClose { - err := db.lc.doCompact(173, compactionPriority{level: 0, score: 1.73}) - switch err { - case errFillTables: - // This error only means that there might be enough tables to do a compaction. So, we - // should not report it to the end user to avoid confusing them. - case nil: - db.opt.Infof("Force compaction on level 0 done") - default: - db.opt.Warningf("While forcing compaction on level 0: %v", err) - } - } - - if lcErr := db.lc.close(); err == nil { - err = errors.Wrap(lcErr, "DB.Close") - } - db.opt.Debugf("Waiting for closer") - db.closers.updateSize.SignalAndWait() - db.orc.Stop() - db.blockCache.Close() - db.indexCache.Close() - - atomic.StoreUint32(&db.isClosed, 1) - - if db.opt.InMemory { - return - } - - if db.dirLockGuard != nil { - if guardErr := db.dirLockGuard.release(); err == nil { - err = errors.Wrap(guardErr, "DB.Close") - } - } - if db.valueDirGuard != nil { - if guardErr := db.valueDirGuard.release(); err == nil { - err = errors.Wrap(guardErr, "DB.Close") - } - } - if manifestErr := db.manifest.close(); err == nil { - err = errors.Wrap(manifestErr, "DB.Close") - } - if registryErr := db.registry.Close(); err == nil { - err = errors.Wrap(registryErr, "DB.Close") - } - - // Fsync directories to ensure that lock file, and any other removed files whose directory - // we haven't specifically fsynced, are guaranteed to have their directory entry removal - // persisted to disk. - if syncErr := db.syncDir(db.opt.Dir); err == nil { - err = errors.Wrap(syncErr, "DB.Close") - } - if syncErr := db.syncDir(db.opt.ValueDir); err == nil { - err = errors.Wrap(syncErr, "DB.Close") - } - - return err -} - -// VerifyChecksum verifies checksum for all tables on all levels. -// This method can be used to verify checksum, if opt.ChecksumVerificationMode is NoVerification. -func (db *DB) VerifyChecksum() error { - return db.lc.verifyChecksum() -} - -const ( - lockFile = "LOCK" -) - -// Sync syncs database content to disk. This function provides -// more control to user to sync data whenever required. -func (db *DB) Sync() error { - return db.vlog.sync(math.MaxUint32) -} - -// getMemtables returns the current memtables and get references. -func (db *DB) getMemTables() ([]*skl.Skiplist, func()) { - db.RLock() - defer db.RUnlock() - - tables := make([]*skl.Skiplist, len(db.imm)+1) - - // Get mutable memtable. - tables[0] = db.mt - tables[0].IncrRef() - - // Get immutable memtables. - last := len(db.imm) - 1 - for i := range db.imm { - tables[i+1] = db.imm[last-i] - tables[i+1].IncrRef() - } - return tables, func() { - for _, tbl := range tables { - tbl.DecrRef() - } - } -} - -// get returns the value in memtable or disk for given key. -// Note that value will include meta byte. -// -// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to -// maintain this invariant to search for the latest value of a key, or else we need to search in all -// tables and find the max version among them. To maintain this invariant, we also need to ensure -// that all versions of a key are always present in the same table from level 1, because compaction -// can push any table down. -// -// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one -// value log to another (while reclaiming space during value log GC), we have logically moved this -// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal -// gets, we can stop going down the LSM tree once we find any version of the key (note however that -// we will ALWAYS skip versions with ts greater than the key version). However, if that key has -// been moved, then for the corresponding movekey, we'll look through all the levels of the tree -// to ensure that we pick the highest version of the movekey present. -func (db *DB) get(key []byte) (y.ValueStruct, error) { - if db.IsClosed() { - return y.ValueStruct{}, ErrDBClosed - } - tables, decr := db.getMemTables() // Lock should be released. - defer decr() - - var maxVs *y.ValueStruct - var version uint64 - if bytes.HasPrefix(key, badgerMove) { - // If we are checking badgerMove key, we should look into all the - // levels, so we can pick up the newer versions, which might have been - // compacted down the tree. - maxVs = &y.ValueStruct{} - version = y.ParseTs(key) - } - - y.NumGets.Add(1) - for i := 0; i < len(tables); i++ { - vs := tables[i].Get(key) - y.NumMemtableGets.Add(1) - if vs.Meta == 0 && vs.Value == nil { - continue - } - // Found a version of the key. For user keyspace, return immediately. For move keyspace, - // continue iterating, unless we found a version == given key version. - if maxVs == nil || vs.Version == version { - return vs, nil - } - if maxVs.Version < vs.Version { - *maxVs = vs - } - } - return db.lc.get(key, maxVs, 0) -} - -// updateHead should not be called without the db.Lock() since db.vhead is used -// by the writer go routines and memtable flushing goroutine. -func (db *DB) updateHead(ptrs []valuePointer) { - var ptr valuePointer - for i := len(ptrs) - 1; i >= 0; i-- { - p := ptrs[i] - if !p.IsZero() { - ptr = p - break - } - } - if ptr.IsZero() { - return - } - - y.AssertTrue(!ptr.Less(db.vhead)) - db.vhead = ptr -} - -var requestPool = sync.Pool{ - New: func() interface{} { - return new(request) - }, -} - -func (db *DB) shouldWriteValueToLSM(e Entry) bool { - return len(e.Value) < db.opt.ValueThreshold -} - -func (db *DB) writeToLSM(b *request) error { - // We should check the length of b.Prts and b.Entries only when badger is not - // running in InMemory mode. In InMemory mode, we don't write anything to the - // value log and that's why the length of b.Ptrs will always be zero. - if !db.opt.InMemory && len(b.Ptrs) != len(b.Entries) { - return errors.Errorf("Ptrs and Entries don't match: %+v", b) - } - - for i, entry := range b.Entries { - if entry.meta&bitFinTxn != 0 { - continue - } - if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case. - db.mt.Put(entry.Key, - y.ValueStruct{ - Value: entry.Value, - // Ensure value pointer flag is removed. Otherwise, the value will fail - // to be retrieved during iterator prefetch. `bitValuePointer` is only - // known to be set in write to LSM when the entry is loaded from a backup - // with lower ValueThreshold and its value was stored in the value log. - Meta: entry.meta &^ bitValuePointer, - UserMeta: entry.UserMeta, - ExpiresAt: entry.ExpiresAt, - }) - } else { - db.mt.Put(entry.Key, - y.ValueStruct{ - Value: b.Ptrs[i].Encode(), - Meta: entry.meta | bitValuePointer, - UserMeta: entry.UserMeta, - ExpiresAt: entry.ExpiresAt, - }) - } - } - return nil -} - -// writeRequests is called serially by only one goroutine. -func (db *DB) writeRequests(reqs []*request) error { - if len(reqs) == 0 { - return nil - } - - done := func(err error) { - for _, r := range reqs { - r.Err = err - r.Wg.Done() - } - } - db.opt.Debugf("writeRequests called. Writing to value log") - err := db.vlog.write(reqs) - if err != nil { - done(err) - return err - } - - db.opt.Debugf("Sending updates to subscribers") - db.pub.sendUpdates(reqs) - db.opt.Debugf("Writing to memtable") - var count int - for _, b := range reqs { - if len(b.Entries) == 0 { - continue - } - count += len(b.Entries) - var i uint64 - for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() { - i++ - if i%100 == 0 { - db.opt.Debugf("Making room for writes") - } - // We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm. - // When flushChan is full and you are blocked there, and the flusher is trying to update s.imm, - // you will get a deadlock. - time.Sleep(10 * time.Millisecond) - } - if err != nil { - done(err) - return errors.Wrap(err, "writeRequests") - } - if err := db.writeToLSM(b); err != nil { - done(err) - return errors.Wrap(err, "writeRequests") - } - db.Lock() - db.updateHead(b.Ptrs) - db.Unlock() - } - done(nil) - db.opt.Debugf("%d entries written", count) - return nil -} - -func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) { - if atomic.LoadInt32(&db.blockWrites) == 1 { - return nil, ErrBlockedWrites - } - var count, size int64 - for _, e := range entries { - size += int64(e.estimateSize(db.opt.ValueThreshold)) - count++ - } - if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize { - return nil, ErrTxnTooBig - } - - // We can only service one request because we need each txn to be stored in a contigous section. - // Txns should not interleave among other txns or rewrites. - req := requestPool.Get().(*request) - req.reset() - req.Entries = entries - req.Wg.Add(1) - req.IncrRef() // for db write - db.writeCh <- req // Handled in doWrites. - y.NumPuts.Add(int64(len(entries))) - - return req, nil -} - -func (db *DB) doWrites(lc *y.Closer) { - defer lc.Done() - pendingCh := make(chan struct{}, 1) - - writeRequests := func(reqs []*request) { - if err := db.writeRequests(reqs); err != nil { - db.opt.Errorf("writeRequests: %v", err) - } - <-pendingCh - } - - // This variable tracks the number of pending writes. - reqLen := new(expvar.Int) - y.PendingWrites.Set(db.opt.Dir, reqLen) - - reqs := make([]*request, 0, 10) - for { - var r *request - select { - case r = <-db.writeCh: - case <-lc.HasBeenClosed(): - goto closedCase - } - - for { - reqs = append(reqs, r) - reqLen.Set(int64(len(reqs))) - - if len(reqs) >= 3*kvWriteChCapacity { - pendingCh <- struct{}{} // blocking. - goto writeCase - } - - select { - // Either push to pending, or continue to pick from writeCh. - case r = <-db.writeCh: - case pendingCh <- struct{}{}: - goto writeCase - case <-lc.HasBeenClosed(): - goto closedCase - } - } - - closedCase: - // All the pending request are drained. - // Don't close the writeCh, because it has be used in several places. - for { - select { - case r = <-db.writeCh: - reqs = append(reqs, r) - default: - pendingCh <- struct{}{} // Push to pending before doing a write. - writeRequests(reqs) - return - } - } - - writeCase: - go writeRequests(reqs) - reqs = make([]*request, 0, 10) - reqLen.Set(0) - } -} - -// batchSet applies a list of badger.Entry. If a request level error occurs it -// will be returned. -// Check(kv.BatchSet(entries)) -func (db *DB) batchSet(entries []*Entry) error { - req, err := db.sendToWriteCh(entries) - if err != nil { - return err - } - - return req.Wait() -} - -// batchSetAsync is the asynchronous version of batchSet. It accepts a callback -// function which is called when all the sets are complete. If a request level -// error occurs, it will be passed back via the callback. -// err := kv.BatchSetAsync(entries, func(err error)) { -// Check(err) -// } -func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error { - req, err := db.sendToWriteCh(entries) - if err != nil { - return err - } - go func() { - err := req.Wait() - // Write is complete. Let's call the callback function now. - f(err) - }() - return nil -} - -var errNoRoom = errors.New("No room for write") - -// ensureRoomForWrite is always called serially. -func (db *DB) ensureRoomForWrite() error { - var err error - db.Lock() - defer db.Unlock() - - // Here we determine if we need to force flush memtable. Given we rotated log file, it would - // make sense to force flush a memtable, so the updated value head would have a chance to be - // pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled, - // which can take a lot longer if the write load has fewer keys and larger values. This force - // flush, thus avoids the need to read through a lot of log files on a crash and restart. - // Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before - // inserting every entry in Memtable. We will get latest db.head after all entries for a request - // are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting - // first entry in Memtable, below condition will be true and we will endup flushing old value of - // db.head. Hence we are limiting no of value log files to be read to db.logRotates only. - forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush - - if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize { - return nil - } - - y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed. - select { - case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}: - // After every memtable flush, let's reset the counter. - atomic.StoreInt32(&db.logRotates, 0) - - // Ensure value log is synced to disk so this memtable's contents wouldn't be lost. - err = db.vlog.sync(db.vhead.Fid) - if err != nil { - return err - } - - db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n", - db.mt.MemSize(), len(db.flushChan)) - // We manage to push this task. Let's modify imm. - db.imm = append(db.imm, db.mt) - db.mt = skl.NewSkiplist(arenaSize(db.opt)) - // New memtable is empty. We certainly have room. - return nil - default: - // We need to do this to unlock and allow the flusher to modify imm. - return errNoRoom - } -} - -func arenaSize(opt Options) int64 { - return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize) -} - -// buildL0Table builds a new table from the memtable. -func buildL0Table(ft flushTask, bopts table.Options) []byte { - iter := ft.mt.NewIterator() - defer iter.Close() - b := table.NewTableBuilder(bopts) - defer b.Close() - var vp valuePointer - for iter.SeekToFirst(); iter.Valid(); iter.Next() { - if len(ft.dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), ft.dropPrefixes) { - continue - } - vs := iter.Value() - if vs.Meta&bitValuePointer > 0 { - vp.Decode(vs.Value) - } - b.Add(iter.Key(), iter.Value(), vp.Len) - } - return b.Finish() -} - -type flushTask struct { - mt *skl.Skiplist - vptr valuePointer - dropPrefixes [][]byte -} - -func (db *DB) pushHead(ft flushTask) error { - // We don't need to store head pointer in the in-memory mode since we will - // never be replay anything. - if db.opt.InMemory { - return nil - } - // Ensure we never push a zero valued head pointer. - if ft.vptr.IsZero() { - return errors.New("Head should not be zero") - } - - // Store badger head even if vptr is zero, need it for readTs - db.opt.Infof("Storing value log head: %+v\n", ft.vptr) - val := ft.vptr.Encode() - - // Pick the max commit ts, so in case of crash, our read ts would be higher than all the - // commits. - headTs := y.KeyWithTs(head, db.orc.nextTs()) - ft.mt.Put(headTs, y.ValueStruct{Value: val}) - - return nil -} - -// handleFlushTask must be run serially. -func (db *DB) handleFlushTask(ft flushTask) error { - // There can be a scenario, when empty memtable is flushed. For example, memtable is empty and - // after writing request to value log, rotation count exceeds db.LogRotatesToFlush. - if ft.mt.Empty() { - return nil - } - - if err := db.pushHead(ft); err != nil { - return err - } - - dk, err := db.registry.latestDataKey() - if err != nil { - return y.Wrapf(err, "failed to get datakey in db.handleFlushTask") - } - bopts := buildTableOptions(db.opt) - bopts.DataKey = dk - // Builder does not need cache but the same options are used for opening table. - bopts.BlockCache = db.blockCache - bopts.IndexCache = db.indexCache - tableData := buildL0Table(ft, bopts) - - fileID := db.lc.reserveFileID() - if db.opt.KeepL0InMemory { - tbl, err := table.OpenInMemoryTable(tableData, fileID, &bopts) - if err != nil { - return errors.Wrapf(err, "failed to open table in memory") - } - return db.lc.addLevel0Table(tbl) - } - - fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true) - if err != nil { - return y.Wrap(err) - } - - // Don't block just to sync the directory entry. - dirSyncCh := make(chan error, 1) - go func() { dirSyncCh <- db.syncDir(db.opt.Dir) }() - - if _, err = fd.Write(tableData); err != nil { - db.opt.Errorf("ERROR while writing to level 0: %v", err) - return err - } - - if dirSyncErr := <-dirSyncCh; dirSyncErr != nil { - // Do dir sync as best effort. No need to return due to an error there. - db.opt.Errorf("ERROR while syncing level directory: %v", dirSyncErr) - } - tbl, err := table.OpenTable(fd, bopts) - if err != nil { - db.opt.Debugf("ERROR while opening table: %v", err) - return err - } - // We own a ref on tbl. - err = db.lc.addLevel0Table(tbl) // This will incrRef - _ = tbl.DecrRef() // Releases our ref. - return err -} - -// flushMemtable must keep running until we send it an empty flushTask. If there -// are errors during handling the flush task, we'll retry indefinitely. -func (db *DB) flushMemtable(lc *y.Closer) error { - defer lc.Done() - - for ft := range db.flushChan { - if ft.mt == nil { - // We close db.flushChan now, instead of sending a nil ft.mt. - continue - } - for { - err := db.handleFlushTask(ft) - if err == nil { - // Update s.imm. Need a lock. - db.Lock() - // This is a single-threaded operation. ft.mt corresponds to the head of - // db.imm list. Once we flush it, we advance db.imm. The next ft.mt - // which would arrive here would match db.imm[0], because we acquire a - // lock over DB when pushing to flushChan. - // TODO: This logic is dirty AF. Any change and this could easily break. - y.AssertTrue(ft.mt == db.imm[0]) - db.imm = db.imm[1:] - ft.mt.DecrRef() // Return memory. - db.Unlock() - - break - } - // Encountered error. Retry indefinitely. - db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err) - time.Sleep(time.Second) - } - } - return nil -} - -func exists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return true, err -} - -// This function does a filewalk, calculates the size of vlog and sst files and stores it in -// y.LSMSize and y.VlogSize. -func (db *DB) calculateSize() { - if db.opt.InMemory { - return - } - newInt := func(val int64) *expvar.Int { - v := new(expvar.Int) - v.Add(val) - return v - } - - totalSize := func(dir string) (int64, int64) { - var lsmSize, vlogSize int64 - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - ext := filepath.Ext(path) - switch ext { - case ".sst": - lsmSize += info.Size() - case ".vlog": - vlogSize += info.Size() - } - return nil - }) - if err != nil { - db.opt.Debugf("Got error while calculating total size of directory: %s", dir) - } - return lsmSize, vlogSize - } - - lsmSize, vlogSize := totalSize(db.opt.Dir) - y.LSMSize.Set(db.opt.Dir, newInt(lsmSize)) - // If valueDir is different from dir, we'd have to do another walk. - if db.opt.ValueDir != db.opt.Dir { - _, vlogSize = totalSize(db.opt.ValueDir) - } - y.VlogSize.Set(db.opt.ValueDir, newInt(vlogSize)) -} - -func (db *DB) updateSize(lc *y.Closer) { - defer lc.Done() - if db.opt.InMemory { - return - } - - metricsTicker := time.NewTicker(time.Minute) - defer metricsTicker.Stop() - - for { - select { - case <-metricsTicker.C: - db.calculateSize() - case <-lc.HasBeenClosed(): - return - } - } -} - -// RunValueLogGC triggers a value log garbage collection. -// -// It picks value log files to perform GC based on statistics that are collected -// during compactions. If no such statistics are available, then log files are -// picked in random order. The process stops as soon as the first log file is -// encountered which does not result in garbage collection. -// -// When a log file is picked, it is first sampled. If the sample shows that we -// can discard at least discardRatio space of that file, it would be rewritten. -// -// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is -// thrown indicating that the call resulted in no file rewrites. -// -// We recommend setting discardRatio to 0.5, thus indicating that a file be -// rewritten if half the space can be discarded. This results in a lifetime -// value log write amplification of 2 (1 from original write + 0.5 rewrite + -// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer -// space reclaims, while setting it to a lower value would result in more space -// reclaims at the cost of increased activity on the LSM tree. discardRatio -// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an -// ErrInvalidRequest is returned. -// -// Only one GC is allowed at a time. If another value log GC is running, or DB -// has been closed, this would return an ErrRejected. -// -// Note: Every time GC is run, it would produce a spike of activity on the LSM -// tree. -func (db *DB) RunValueLogGC(discardRatio float64) error { - if db.opt.InMemory { - return ErrGCInMemoryMode - } - if discardRatio >= 1.0 || discardRatio <= 0.0 { - return ErrInvalidRequest - } - - // startLevel is the level from which we should search for the head key. When badger is running - // with KeepL0InMemory flag, all tables on L0 are kept in memory. This means we should pick head - // key from Level 1 onwards because if we pick the headkey from Level 0 we might end up losing - // data. See test TestL0GCBug. - startLevel := 0 - if db.opt.KeepL0InMemory { - startLevel = 1 - } - // Find head on disk - headKey := y.KeyWithTs(head, math.MaxUint64) - // Need to pass with timestamp, lsm get removes the last 8 bytes and compares key - val, err := db.lc.get(headKey, nil, startLevel) - if err != nil { - return errors.Wrap(err, "Retrieving head from on-disk LSM") - } - - var head valuePointer - if len(val.Value) > 0 { - head.Decode(val.Value) - } - - // Pick a log file and run GC - return db.vlog.runGC(discardRatio, head) -} - -// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to -// call RunValueLogGC. -func (db *DB) Size() (lsm, vlog int64) { - if y.LSMSize.Get(db.opt.Dir) == nil { - lsm, vlog = 0, 0 - return - } - lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value() - vlog = y.VlogSize.Get(db.opt.ValueDir).(*expvar.Int).Value() - return -} - -// Sequence represents a Badger sequence. -type Sequence struct { - sync.Mutex - db *DB - key []byte - next uint64 - leased uint64 - bandwidth uint64 -} - -// Next would return the next integer in the sequence, updating the lease by running a transaction -// if needed. -func (seq *Sequence) Next() (uint64, error) { - seq.Lock() - defer seq.Unlock() - if seq.next >= seq.leased { - if err := seq.updateLease(); err != nil { - return 0, err - } - } - val := seq.next - seq.next++ - return val, nil -} - -// Release the leased sequence to avoid wasted integers. This should be done right -// before closing the associated DB. However it is valid to use the sequence after -// it was released, causing a new lease with full bandwidth. -func (seq *Sequence) Release() error { - seq.Lock() - defer seq.Unlock() - err := seq.db.Update(func(txn *Txn) error { - item, err := txn.Get(seq.key) - if err != nil { - return err - } - - var num uint64 - if err := item.Value(func(v []byte) error { - num = binary.BigEndian.Uint64(v) - return nil - }); err != nil { - return err - } - - if num == seq.leased { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], seq.next) - return txn.SetEntry(NewEntry(seq.key, buf[:])) - } - - return nil - }) - if err != nil { - return err - } - seq.leased = seq.next - return nil -} - -func (seq *Sequence) updateLease() error { - return seq.db.Update(func(txn *Txn) error { - item, err := txn.Get(seq.key) - switch { - case err == ErrKeyNotFound: - seq.next = 0 - case err != nil: - return err - default: - var num uint64 - if err := item.Value(func(v []byte) error { - num = binary.BigEndian.Uint64(v) - return nil - }); err != nil { - return err - } - seq.next = num - } - - lease := seq.next + seq.bandwidth - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], lease) - if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil { - return err - } - seq.leased = lease - return nil - }) -} - -// GetSequence would initiate a new sequence object, generating it from the stored lease, if -// available, in the database. Sequence can be used to get a list of monotonically increasing -// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the -// size of the lease, determining how many Next() requests can be served from memory. -// -// GetSequence is not supported on ManagedDB. Calling this would result in a panic. -func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) { - if db.opt.managedTxns { - panic("Cannot use GetSequence with managedDB=true.") - } - - switch { - case len(key) == 0: - return nil, ErrEmptyKey - case bandwidth == 0: - return nil, ErrZeroBandwidth - } - seq := &Sequence{ - db: db, - key: key, - next: 0, - leased: 0, - bandwidth: bandwidth, - } - err := seq.updateLease() - return seq, err -} - -// Tables gets the TableInfo objects from the level controller. If withKeysCount -// is true, TableInfo objects also contain counts of keys for the tables. -func (db *DB) Tables(withKeysCount bool) []TableInfo { - return db.lc.getTableInfo(withKeysCount) -} - -// KeySplits can be used to get rough key ranges to divide up iteration over -// the DB. -func (db *DB) KeySplits(prefix []byte) []string { - var splits []string - // We just want table ranges here and not keys count. - for _, ti := range db.Tables(false) { - // We don't use ti.Left, because that has a tendency to store !badger - // keys. - if bytes.HasPrefix(ti.Right, prefix) { - splits = append(splits, string(ti.Right)) - } - } - sort.Strings(splits) - return splits -} - -// MaxBatchCount returns max possible entries in batch -func (db *DB) MaxBatchCount() int64 { - return db.opt.maxBatchCount -} - -// MaxBatchSize returns max possible batch size -func (db *DB) MaxBatchSize() int64 { - return db.opt.maxBatchSize -} - -func (db *DB) stopMemoryFlush() { - // Stop memtable flushes. - if db.closers.memtable != nil { - close(db.flushChan) - db.closers.memtable.SignalAndWait() - } -} - -func (db *DB) stopCompactions() { - // Stop compactions. - if db.closers.compactors != nil { - db.closers.compactors.SignalAndWait() - } -} - -func (db *DB) startCompactions() { - // Resume compactions. - if db.closers.compactors != nil { - db.closers.compactors = y.NewCloser(1) - db.lc.startCompact(db.closers.compactors) - } -} - -func (db *DB) startMemoryFlush() { - // Start memory fluhser. - if db.closers.memtable != nil { - db.flushChan = make(chan flushTask, db.opt.NumMemtables) - db.closers.memtable = y.NewCloser(1) - go func() { - _ = db.flushMemtable(db.closers.memtable) - }() - } -} - -// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same -// level. This ensures that all the versions of keys are colocated and not split across multiple -// levels, which is necessary after a restore from backup. During Flatten, live compactions are -// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition -// between flattening the tree and new tables being created at level zero. -func (db *DB) Flatten(workers int) error { - db.stopCompactions() - defer db.startCompactions() - - compactAway := func(cp compactionPriority) error { - db.opt.Infof("Attempting to compact with %+v\n", cp) - errCh := make(chan error, 1) - for i := 0; i < workers; i++ { - go func() { - errCh <- db.lc.doCompact(175, cp) - }() - } - var success int - var rerr error - for i := 0; i < workers; i++ { - err := <-errCh - if err != nil { - rerr = err - db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err) - } else { - success++ - } - } - if success == 0 { - return rerr - } - // We could do at least one successful compaction. So, we'll consider this a success. - db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n", - success, cp.level) - return nil - } - - hbytes := func(sz int64) string { - return humanize.Bytes(uint64(sz)) - } - - for { - db.opt.Infof("\n") - var levels []int - for i, l := range db.lc.levels { - sz := l.getTotalSize() - db.opt.Infof("Level: %d. %8s Size. %8s Max.\n", - i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize)) - if sz > 0 { - levels = append(levels, i) - } - } - if len(levels) <= 1 { - prios := db.lc.pickCompactLevels() - if len(prios) == 0 || prios[0].score <= 1.0 { - db.opt.Infof("All tables consolidated into one level. Flattening done.\n") - return nil - } - if err := compactAway(prios[0]); err != nil { - return err - } - continue - } - // Create an artificial compaction priority, to ensure that we compact the level. - cp := compactionPriority{level: levels[0], score: 1.71} - if err := compactAway(cp); err != nil { - return err - } - } -} - -func (db *DB) blockWrite() error { - // Stop accepting new writes. - if !atomic.CompareAndSwapInt32(&db.blockWrites, 0, 1) { - return ErrBlockedWrites - } - - // Make all pending writes finish. The following will also close writeCh. - db.closers.writes.SignalAndWait() - db.opt.Infof("Writes flushed. Stopping compactions now...") - return nil -} - -func (db *DB) unblockWrite() { - db.closers.writes = y.NewCloser(1) - go db.doWrites(db.closers.writes) - - // Resume writes. - atomic.StoreInt32(&db.blockWrites, 0) -} - -func (db *DB) prepareToDrop() (func(), error) { - if db.opt.ReadOnly { - panic("Attempting to drop data in read-only mode.") - } - // In order prepare for drop, we need to block the incoming writes and - // write it to db. Then, flush all the pending flushtask. So that, we - // don't miss any entries. - if err := db.blockWrite(); err != nil { - return nil, err - } - reqs := make([]*request, 0, 10) - for { - select { - case r := <-db.writeCh: - reqs = append(reqs, r) - default: - if err := db.writeRequests(reqs); err != nil { - db.opt.Errorf("writeRequests: %v", err) - } - db.stopMemoryFlush() - return func() { - db.opt.Infof("Resuming writes") - db.startMemoryFlush() - db.unblockWrite() - }, nil - } - } -} - -// DropAll would drop all the data stored in Badger. It does this in the following way. -// - Stop accepting new writes. -// - Pause memtable flushes and compactions. -// - Pick all tables from all levels, create a changeset to delete all these -// tables and apply it to manifest. -// - Pick all log files from value log, and delete all of them. Restart value log files from zero. -// - Resume memtable flushes and compactions. -// -// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do -// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and -// writes are paused before running DropAll, and resumed after it is finished. -func (db *DB) DropAll() error { - f, err := db.dropAll() - if f != nil { - f() - } - return err -} - -func (db *DB) dropAll() (func(), error) { - db.opt.Infof("DropAll called. Blocking writes...") - f, err := db.prepareToDrop() - if err != nil { - return f, err - } - // prepareToDrop will stop all the incomming write and flushes any pending flush tasks. - // Before we drop, we'll stop the compaction because anyways all the datas are going to - // be deleted. - db.stopCompactions() - resume := func() { - db.startCompactions() - f() - } - // Block all foreign interactions with memory tables. - db.Lock() - defer db.Unlock() - - // Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed. - db.mt.DecrRef() - for _, mt := range db.imm { - mt.DecrRef() - } - db.imm = db.imm[:0] - db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes. - - num, err := db.lc.dropTree() - if err != nil { - return resume, err - } - db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num) - - num, err = db.vlog.dropAll() - if err != nil { - return resume, err - } - db.vhead = valuePointer{} // Zero it out. - db.lc.nextFileID = 1 - db.opt.Infof("Deleted %d value log files. DropAll done.\n", num) - db.blockCache.Clear() - db.indexCache.Clear() - - return resume, nil -} - -// DropPrefix would drop all the keys with the provided prefix. It does this in the following way: -// - Stop accepting new writes. -// - Stop memtable flushes before acquiring lock. Because we're acquring lock here -// and memtable flush stalls for lock, which leads to deadlock -// - Flush out all memtables, skipping over keys with the given prefix, Kp. -// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp -// back after a restart. -// - Stop compaction. -// - Compact L0->L1, skipping over Kp. -// - Compact rest of the levels, Li->Li, picking tables which have Kp. -// - Resume memtable flushes, compactions and writes. -func (db *DB) DropPrefix(prefixes ...[]byte) error { - db.opt.Infof("DropPrefix Called") - f, err := db.prepareToDrop() - if err != nil { - return err - } - defer f() - // Block all foreign interactions with memory tables. - db.Lock() - defer db.Unlock() - - db.imm = append(db.imm, db.mt) - for _, memtable := range db.imm { - if memtable.Empty() { - memtable.DecrRef() - continue - } - task := flushTask{ - mt: memtable, - // Ensure that the head of value log gets persisted to disk. - vptr: db.vhead, - dropPrefixes: prefixes, - } - db.opt.Debugf("Flushing memtable") - if err := db.handleFlushTask(task); err != nil { - db.opt.Errorf("While trying to flush memtable: %v", err) - return err - } - memtable.DecrRef() - } - db.stopCompactions() - defer db.startCompactions() - db.imm = db.imm[:0] - db.mt = skl.NewSkiplist(arenaSize(db.opt)) - - // Drop prefixes from the levels. - if err := db.lc.dropPrefixes(prefixes); err != nil { - return err - } - db.opt.Infof("DropPrefix done") - return nil -} - -// KVList contains a list of key-value pairs. -type KVList = pb.KVList - -// Subscribe can be used to watch key changes for the given key prefixes. -// At least one prefix should be passed, or an error will be returned. -// You can use an empty prefix to monitor all changes to the DB. -// This function blocks until the given context is done or an error occurs. -// The given function will be called with a new KVList containing the modified keys and the -// corresponding values. -func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes ...[]byte) error { - if cb == nil { - return ErrNilCallback - } - - c := y.NewCloser(1) - recvCh, id := db.pub.newSubscriber(c, prefixes...) - slurp := func(batch *pb.KVList) error { - for { - select { - case kvs := <-recvCh: - batch.Kv = append(batch.Kv, kvs.Kv...) - default: - if len(batch.GetKv()) > 0 { - return cb(batch) - } - return nil - } - } - } - for { - select { - case <-c.HasBeenClosed(): - // No need to delete here. Closer will be called only while - // closing DB. Subscriber will be deleted by cleanSubscribers. - err := slurp(new(pb.KVList)) - // Drain if any pending updates. - c.Done() - return err - case <-ctx.Done(): - c.Done() - db.pub.deleteSubscriber(id) - // Delete the subscriber to avoid further updates. - return ctx.Err() - case batch := <-recvCh: - err := slurp(batch) - if err != nil { - c.Done() - // Delete the subscriber if there is an error by the callback. - db.pub.deleteSubscriber(id) - return err - } - } - } -} - -// shouldEncrypt returns bool, which tells whether to encrypt or not. -func (db *DB) shouldEncrypt() bool { - return len(db.opt.EncryptionKey) > 0 -} - -func (db *DB) syncDir(dir string) error { - if db.opt.InMemory { - return nil - } - return syncDir(dir) -} - -func createDirs(opt Options) error { - for _, path := range []string{opt.Dir, opt.ValueDir} { - dirExists, err := exists(path) - if err != nil { - return y.Wrapf(err, "Invalid Dir: %q", path) - } - if !dirExists { - if opt.ReadOnly { - return errors.Errorf("Cannot find directory %q for read-only open", path) - } - // Try to create the directory - err = os.Mkdir(path, 0700) - if err != nil { - return y.Wrapf(err, "Error Creating Dir: %q", path) - } - } - } - return nil -} - -// Stream the contents of this DB to a new DB with options outOptions that will be -// created in outDir. -func (db *DB) StreamDB(outOptions Options) error { - outDir := outOptions.Dir - - // Open output DB. - outDB, err := OpenManaged(outOptions) - if err != nil { - return errors.Wrapf(err, "cannot open out DB at %s", outDir) - } - defer outDB.Close() - writer := outDB.NewStreamWriter() - if err := writer.Prepare(); err != nil { - errors.Wrapf(err, "cannot create stream writer in out DB at %s", outDir) - } - - // Stream contents of DB to the output DB. - stream := db.NewStreamAt(math.MaxUint64) - stream.LogPrefix = fmt.Sprintf("Streaming DB to new DB at %s", outDir) - stream.Send = func(kvs *pb.KVList) error { - return writer.Write(kvs) - } - if err := stream.Orchestrate(context.Background()); err != nil { - return errors.Wrapf(err, "cannot stream DB to out DB at %s", outDir) - } - if err := writer.Flush(); err != nil { - return errors.Wrapf(err, "cannot flush writer") - } - return nil -} - -// MaxVersion returns the maximum commited version across all keys in the DB. It -// uses the stream framework to find the maximum version. -func (db *DB) MaxVersion() (uint64, error) { - maxVersion := uint64(0) - var mu sync.Mutex - var stream *Stream - if db.opt.managedTxns { - stream = db.NewStreamAt(math.MaxUint64) - } else { - stream = db.NewStream() - } - - stream.ChooseKey = func(item *Item) bool { - mu.Lock() - if item.Version() > maxVersion { - maxVersion = item.Version() - } - mu.Unlock() - return false - } - stream.KeyToList = nil - stream.Send = nil - if err := stream.Orchestrate(context.Background()); err != nil { - return 0, err - } - return maxVersion, nil - -} diff --git a/vendor/github.com/dgraph-io/badger/v2/dir_plan9.go b/vendor/github.com/dgraph-io/badger/v2/dir_plan9.go deleted file mode 100644 index ad323d70..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/dir_plan9.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" -) - -// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part -// of the locking mechanism, it's just advisory. -type directoryLockGuard struct { - // File handle on the directory, which we've locked. - f *os.File - // The absolute path to our pid file. - path string -} - -// acquireDirectoryLock gets a lock on the directory. -// It will also write our pid to dirPath/pidFileName for convenience. -// readOnly is not supported on Plan 9. -func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) ( - *directoryLockGuard, error) { - if readOnly { - return nil, ErrPlan9NotSupported - } - - // Convert to absolute path so that Release still works even if we do an unbalanced - // chdir in the meantime. - absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) - if err != nil { - return nil, errors.Wrap(err, "cannot get absolute path for pid lock file") - } - - // If the file was unpacked or created by some other program, it might not - // have the ModeExclusive bit set. Set it before we call OpenFile, so that we - // can be confident that a successful OpenFile implies exclusive use. - // - // OpenFile fails if the file ModeExclusive bit set *and* the file is already open. - // So, if the file is closed when the DB crashed, we're fine. When the process - // that was managing the DB crashes, the OS will close the file for us. - // - // This bit of code is copied from Go's lockedfile internal package: - // https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L58 - if fi, err := os.Stat(absPidFilePath); err == nil { - if fi.Mode()&os.ModeExclusive == 0 { - if err := os.Chmod(absPidFilePath, fi.Mode()|os.ModeExclusive); err != nil { - return nil, errors.Wrapf(err, "could not set exclusive mode bit") - } - } - } else if !os.IsNotExist(err) { - return nil, err - } - f, err := os.OpenFile(absPidFilePath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666|os.ModeExclusive) - if err != nil { - if isLocked(err) { - return nil, errors.Wrapf(err, - "Cannot open pid lock file %q. Another process is using this Badger database", - absPidFilePath) - } - return nil, errors.Wrapf(err, "Cannot open pid lock file %q", absPidFilePath) - } - - if _, err = fmt.Fprintf(f, "%d\n", os.Getpid()); err != nil { - f.Close() - return nil, errors.Wrapf(err, "could not write pid") - } - return &directoryLockGuard{f, absPidFilePath}, nil -} - -// Release deletes the pid file and releases our lock on the directory. -func (guard *directoryLockGuard) release() error { - // It's important that we remove the pid file first. - err := os.Remove(guard.path) - - if closeErr := guard.f.Close(); err == nil { - err = closeErr - } - guard.path = "" - guard.f = nil - - return err -} - -// openDir opens a directory for syncing. -func openDir(path string) (*os.File, error) { return os.Open(path) } - -// When you create or delete a file, you have to ensure the directory entry for the file is synced -// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync, -// or see https://github.com/coreos/etcd/issues/6368 for an example.) -func syncDir(dir string) error { - f, err := openDir(dir) - if err != nil { - return errors.Wrapf(err, "While opening directory: %s.", dir) - } - - err = f.Sync() - closeErr := f.Close() - if err != nil { - return errors.Wrapf(err, "While syncing directory: %s.", dir) - } - return errors.Wrapf(closeErr, "While closing directory: %s.", dir) -} - -// Opening an exclusive-use file returns an error. -// The expected error strings are: -// -// - "open/create -- file is locked" (cwfs, kfs) -// - "exclusive lock" (fossil) -// - "exclusive use file already open" (ramfs) -// -// See https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L16 -var lockedErrStrings = [...]string{ - "file is locked", - "exclusive lock", - "exclusive use file already open", -} - -// Even though plan9 doesn't support the Lock/RLock/Unlock functions to -// manipulate already-open files, IsLocked is still meaningful: os.OpenFile -// itself may return errors that indicate that a file with the ModeExclusive bit -// set is already open. -func isLocked(err error) bool { - s := err.Error() - - for _, frag := range lockedErrStrings { - if strings.Contains(s, frag) { - return true - } - } - return false -} diff --git a/vendor/github.com/dgraph-io/badger/v2/dir_unix.go b/vendor/github.com/dgraph-io/badger/v2/dir_unix.go deleted file mode 100644 index f8457b0b..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/dir_unix.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build !windows,!plan9 - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -// directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part -// of the locking mechanism, it's just advisory. -type directoryLockGuard struct { - // File handle on the directory, which we've flocked. - f *os.File - // The absolute path to our pid file. - path string - // Was this a shared lock for a read-only database? - readOnly bool -} - -// acquireDirectoryLock gets a lock on the directory (using flock). If -// this is not read-only, it will also write our pid to -// dirPath/pidFileName for convenience. -func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) ( - *directoryLockGuard, error) { - // Convert to absolute path so that Release still works even if we do an unbalanced - // chdir in the meantime. - absPidFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) - if err != nil { - return nil, errors.Wrap(err, "cannot get absolute path for pid lock file") - } - f, err := os.Open(dirPath) - if err != nil { - return nil, errors.Wrapf(err, "cannot open directory %q", dirPath) - } - opts := unix.LOCK_EX | unix.LOCK_NB - if readOnly { - opts = unix.LOCK_SH | unix.LOCK_NB - } - - err = unix.Flock(int(f.Fd()), opts) - if err != nil { - f.Close() - return nil, errors.Wrapf(err, - "Cannot acquire directory lock on %q. Another process is using this Badger database.", - dirPath) - } - - if !readOnly { - // Yes, we happily overwrite a pre-existing pid file. We're the - // only read-write badger process using this directory. - err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) - if err != nil { - f.Close() - return nil, errors.Wrapf(err, - "Cannot write pid file %q", absPidFilePath) - } - } - return &directoryLockGuard{f, absPidFilePath, readOnly}, nil -} - -// Release deletes the pid file and releases our lock on the directory. -func (guard *directoryLockGuard) release() error { - var err error - if !guard.readOnly { - // It's important that we remove the pid file first. - err = os.Remove(guard.path) - } - - if closeErr := guard.f.Close(); err == nil { - err = closeErr - } - guard.path = "" - guard.f = nil - - return err -} - -// openDir opens a directory for syncing. -func openDir(path string) (*os.File, error) { return os.Open(path) } - -// When you create or delete a file, you have to ensure the directory entry for the file is synced -// in order to guarantee the file is visible (if the system crashes). (See the man page for fsync, -// or see https://github.com/coreos/etcd/issues/6368 for an example.) -func syncDir(dir string) error { - f, err := openDir(dir) - if err != nil { - return errors.Wrapf(err, "While opening directory: %s.", dir) - } - - err = f.Sync() - closeErr := f.Close() - if err != nil { - return errors.Wrapf(err, "While syncing directory: %s.", dir) - } - return errors.Wrapf(closeErr, "While closing directory: %s.", dir) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/dir_windows.go b/vendor/github.com/dgraph-io/badger/v2/dir_windows.go deleted file mode 100644 index 60f982e2..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/dir_windows.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build windows - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -// OpenDir opens a directory in windows with write access for syncing. -import ( - "os" - "path/filepath" - "syscall" - - "github.com/pkg/errors" -) - -// FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage. -// FILE_FLAG_DELETE_ON_CLOSE - The file is to be deleted immediately after all of its handles are -// closed, which includes the specified handle and any other open or duplicated handles. -// See: https://docs.microsoft.com/en-us/windows/desktop/FileIO/file-attribute-constants -// NOTE: Added here to avoid importing golang.org/x/sys/windows -const ( - FILE_ATTRIBUTE_TEMPORARY = 0x00000100 - FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 -) - -func openDir(path string) (*os.File, error) { - fd, err := openDirWin(path) - if err != nil { - return nil, err - } - return os.NewFile(uintptr(fd), path), nil -} - -func openDirWin(path string) (fd syscall.Handle, err error) { - if len(path) == 0 { - return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND - } - pathp, err := syscall.UTF16PtrFromString(path) - if err != nil { - return syscall.InvalidHandle, err - } - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE) - sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE) - createmode := uint32(syscall.OPEN_EXISTING) - fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS) - return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0) -} - -// DirectoryLockGuard holds a lock on the directory. -type directoryLockGuard struct { - h syscall.Handle - path string -} - -// AcquireDirectoryLock acquires exclusive access to a directory. -func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) (*directoryLockGuard, error) { - if readOnly { - return nil, ErrWindowsNotSupported - } - - // Convert to absolute path so that Release still works even if we do an unbalanced - // chdir in the meantime. - absLockFilePath, err := filepath.Abs(filepath.Join(dirPath, pidFileName)) - if err != nil { - return nil, errors.Wrap(err, "Cannot get absolute path for pid lock file") - } - - // This call creates a file handler in memory that only one process can use at a time. When - // that process ends, the file is deleted by the system. - // FILE_ATTRIBUTE_TEMPORARY is used to tell Windows to try to create the handle in memory. - // FILE_FLAG_DELETE_ON_CLOSE is not specified in syscall_windows.go but tells Windows to delete - // the file when all processes holding the handler are closed. - // XXX: this works but it's a bit klunky. i'd prefer to use LockFileEx but it needs unsafe pkg. - h, err := syscall.CreateFile( - syscall.StringToUTF16Ptr(absLockFilePath), 0, 0, nil, - syscall.OPEN_ALWAYS, - uint32(FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE), - 0) - if err != nil { - return nil, errors.Wrapf(err, - "Cannot create lock file %q. Another process is using this Badger database", - absLockFilePath) - } - - return &directoryLockGuard{h: h, path: absLockFilePath}, nil -} - -// Release removes the directory lock. -func (g *directoryLockGuard) release() error { - g.path = "" - return syscall.CloseHandle(g.h) -} - -// Windows doesn't support syncing directories to the file system. See -// https://github.com/dgraph-io/badger/issues/699#issuecomment-504133587 for more details. -func syncDir(dir string) error { return nil } diff --git a/vendor/github.com/dgraph-io/badger/v2/doc.go b/vendor/github.com/dgraph-io/badger/v2/doc.go deleted file mode 100644 index 83dc9a28..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Package badger implements an embeddable, simple and fast key-value database, -written in pure Go. It is designed to be highly performant for both reads and -writes simultaneously. Badger uses Multi-Version Concurrency Control (MVCC), and -supports transactions. It runs transactions concurrently, with serializable -snapshot isolation guarantees. - -Badger uses an LSM tree along with a value log to separate keys from values, -hence reducing both write amplification and the size of the LSM tree. This -allows LSM tree to be served entirely from RAM, while the values are served -from SSD. - - -Usage - -Badger has the following main types: DB, Txn, Item and Iterator. DB contains -keys that are associated with values. It must be opened with the appropriate -options before it can be accessed. - -All operations happen inside a Txn. Txn represents a transaction, which can -be read-only or read-write. Read-only transactions can read values for a -given key (which are returned inside an Item), or iterate over a set of -key-value pairs using an Iterator (which are returned as Item type values as -well). Read-write transactions can also update and delete keys from the DB. - -See the examples for more usage details. -*/ -package badger diff --git a/vendor/github.com/dgraph-io/badger/v2/errors.go b/vendor/github.com/dgraph-io/badger/v2/errors.go deleted file mode 100644 index fed827ab..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/errors.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "math" - - "github.com/pkg/errors" -) - -const ( - // ValueThresholdLimit is the maximum permissible value of opt.ValueThreshold. - ValueThresholdLimit = math.MaxUint16 - 16 + 1 -) - -var ( - // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid - // range. - ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be between 1MB and 2GB") - - // ErrKeyNotFound is returned when key isn't found on a txn.Get. - ErrKeyNotFound = errors.New("Key not found") - - // ErrTxnTooBig is returned if too many writes are fit into a single transaction. - ErrTxnTooBig = errors.New("Txn is too big to fit into one request") - - // ErrConflict is returned when a transaction conflicts with another transaction. This can - // happen if the read rows had been updated concurrently by another transaction. - ErrConflict = errors.New("Transaction Conflict. Please retry") - - // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction. - ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction") - - // ErrDiscardedTxn is returned if a previously discarded transaction is re-used. - ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one") - - // ErrEmptyKey is returned if an empty key is passed on an update function. - ErrEmptyKey = errors.New("Key cannot be empty") - - // ErrInvalidKey is returned if the key has a special !badger! prefix, - // reserved for internal usage. - ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix") - - // ErrRetry is returned when a log file containing the value is not found. - // This usually indicates that it may have been garbage collected, and the - // operation needs to be retried. - ErrRetry = errors.New("Unable to find log file. Please retry") - - // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called. - // In such a case, GC can't be run. - ErrThresholdZero = errors.New( - "Value log GC can't run because threshold is set to zero") - - // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite. - ErrNoRewrite = errors.New( - "Value log GC attempt didn't result in any cleanup") - - // ErrRejected is returned if a value log GC is called either while another GC is running, or - // after DB::Close has been called. - ErrRejected = errors.New("Value log GC request rejected") - - // ErrInvalidRequest is returned if the user request is invalid. - ErrInvalidRequest = errors.New("Invalid request") - - // ErrManagedTxn is returned if the user tries to use an API which isn't - // allowed due to external management of transactions, when using ManagedDB. - ErrManagedTxn = errors.New( - "Invalid API request. Not allowed to perform this action using ManagedDB") - - // ErrInvalidDump if a data dump made previously cannot be loaded into the database. - ErrInvalidDump = errors.New("Data dump cannot be read") - - // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence. - ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero") - - // ErrInvalidLoadingMode is returned when opt.ValueLogLoadingMode option is not - // within the valid range - ErrInvalidLoadingMode = errors.New("Invalid ValueLogLoadingMode, must be FileIO or MemoryMap") - - // ErrReplayNeeded is returned when opt.ReadOnly is set but the - // database requires a value log replay. - ErrReplayNeeded = errors.New("Database was not properly closed, cannot open read-only") - - // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows - ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows") - - // ErrPlan9NotSupported is returned when opt.ReadOnly is used on Plan 9 - ErrPlan9NotSupported = errors.New("Read-only mode is not supported on Plan 9") - - // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of - // corrupt data to allow Badger to run properly. - ErrTruncateNeeded = errors.New( - "Value log truncate required to run DB. This might result in data loss") - - // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all - // data from Badger, we stop accepting new writes, by returning this error. - ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close") - - // ErrNilCallback is returned when subscriber's callback is nil. - ErrNilCallback = errors.New("Callback cannot be nil") - - // ErrEncryptionKeyMismatch is returned when the storage key is not - // matched with the key previously given. - ErrEncryptionKeyMismatch = errors.New("Encryption key mismatch") - - // ErrInvalidDataKeyID is returned if the datakey id is invalid. - ErrInvalidDataKeyID = errors.New("Invalid datakey id") - - // ErrInvalidEncryptionKey is returned if length of encryption keys is invalid. - ErrInvalidEncryptionKey = errors.New("Encryption key's length should be" + - "either 16, 24, or 32 bytes") - // ErrGCInMemoryMode is returned when db.RunValueLogGC is called in in-memory mode. - ErrGCInMemoryMode = errors.New("Cannot run value log GC when DB is opened in InMemory mode") - - // ErrDBClosed is returned when a get operation is performed after closing the DB. - ErrDBClosed = errors.New("DB Closed") -) diff --git a/vendor/github.com/dgraph-io/badger/v2/histogram.go b/vendor/github.com/dgraph-io/badger/v2/histogram.go deleted file mode 100644 index d8c94bb7..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/histogram.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "math" -) - -// PrintHistogram builds and displays the key-value size histogram. -// When keyPrefix is set, only the keys that have prefix "keyPrefix" are -// considered for creating the histogram -func (db *DB) PrintHistogram(keyPrefix []byte) { - if db == nil { - fmt.Println("\nCannot build histogram: DB is nil.") - return - } - histogram := db.buildHistogram(keyPrefix) - fmt.Printf("Histogram of key sizes (in bytes)\n") - histogram.keySizeHistogram.printHistogram() - fmt.Printf("Histogram of value sizes (in bytes)\n") - histogram.valueSizeHistogram.printHistogram() -} - -// histogramData stores information about a histogram -type histogramData struct { - bins []int64 - countPerBin []int64 - totalCount int64 - min int64 - max int64 - sum int64 -} - -// sizeHistogram contains keySize histogram and valueSize histogram -type sizeHistogram struct { - keySizeHistogram, valueSizeHistogram histogramData -} - -// newSizeHistogram returns a new instance of keyValueSizeHistogram with -// properly initialized fields. -func newSizeHistogram() *sizeHistogram { - // TODO(ibrahim): find appropriate bin size. - keyBins := createHistogramBins(1, 16) - valueBins := createHistogramBins(1, 30) - return &sizeHistogram{ - keySizeHistogram: histogramData{ - bins: keyBins, - countPerBin: make([]int64, len(keyBins)+1), - max: math.MinInt64, - min: math.MaxInt64, - sum: 0, - }, - valueSizeHistogram: histogramData{ - bins: valueBins, - countPerBin: make([]int64, len(valueBins)+1), - max: math.MinInt64, - min: math.MaxInt64, - sum: 0, - }, - } -} - -// createHistogramBins creates bins for an histogram. The bin sizes are powers -// of two of the form [2^min_exponent, ..., 2^max_exponent]. -func createHistogramBins(minExponent, maxExponent uint32) []int64 { - var bins []int64 - for i := minExponent; i <= maxExponent; i++ { - bins = append(bins, int64(1)< histogram.max { - histogram.max = value - } - if value < histogram.min { - histogram.min = value - } - - histogram.sum += value - histogram.totalCount++ - - for index := 0; index <= len(histogram.bins); index++ { - // Allocate value in the last buckets if we reached the end of the Bounds array. - if index == len(histogram.bins) { - histogram.countPerBin[index]++ - break - } - - // Check if the value should be added to the "index" bin - if value < int64(histogram.bins[index]) { - histogram.countPerBin[index]++ - break - } - } -} - -// buildHistogram builds the key-value size histogram. -// When keyPrefix is set, only the keys that have prefix "keyPrefix" are -// considered for creating the histogram -func (db *DB) buildHistogram(keyPrefix []byte) *sizeHistogram { - txn := db.NewTransaction(false) - defer txn.Discard() - - itr := txn.NewIterator(DefaultIteratorOptions) - defer itr.Close() - - badgerHistogram := newSizeHistogram() - - // Collect key and value sizes. - for itr.Seek(keyPrefix); itr.ValidForPrefix(keyPrefix); itr.Next() { - item := itr.Item() - badgerHistogram.keySizeHistogram.Update(item.KeySize()) - badgerHistogram.valueSizeHistogram.Update(item.ValueSize()) - } - return badgerHistogram -} - -// printHistogram prints the histogram data in a human-readable format. -func (histogram histogramData) printHistogram() { - fmt.Printf("Total count: %d\n", histogram.totalCount) - fmt.Printf("Min value: %d\n", histogram.min) - fmt.Printf("Max value: %d\n", histogram.max) - fmt.Printf("Mean: %.2f\n", float64(histogram.sum)/float64(histogram.totalCount)) - fmt.Printf("%24s %9s\n", "Range", "Count") - - numBins := len(histogram.bins) - for index, count := range histogram.countPerBin { - if count == 0 { - continue - } - - // The last bin represents the bin that contains the range from - // the last bin up to infinity so it's processed differently than the - // other bins. - if index == len(histogram.countPerBin)-1 { - lowerBound := int(histogram.bins[numBins-1]) - fmt.Printf("[%10d, %10s) %9d\n", lowerBound, "infinity", count) - continue - } - - upperBound := int(histogram.bins[index]) - lowerBound := 0 - if index > 0 { - lowerBound = int(histogram.bins[index-1]) - } - - fmt.Printf("[%10d, %10d) %9d\n", lowerBound, upperBound, count) - } - fmt.Println() -} diff --git a/vendor/github.com/dgraph-io/badger/v2/iterator.go b/vendor/github.com/dgraph-io/badger/v2/iterator.go deleted file mode 100644 index 11d0c27a..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/iterator.go +++ /dev/null @@ -1,756 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "fmt" - "hash/crc32" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/v2/options" - "github.com/dgraph-io/badger/v2/table" - "github.com/dgryski/go-farm" - - "github.com/dgraph-io/badger/v2/y" -) - -type prefetchStatus uint8 - -const ( - prefetched prefetchStatus = iota + 1 -) - -// Item is returned during iteration. Both the Key() and Value() output is only valid until -// iterator.Next() is called. -type Item struct { - status prefetchStatus - err error - wg sync.WaitGroup - db *DB - key []byte - vptr []byte - meta byte // We need to store meta to know about bitValuePointer. - userMeta byte - expiresAt uint64 - val []byte - slice *y.Slice // Used only during prefetching. - next *Item - version uint64 - txn *Txn -} - -// String returns a string representation of Item -func (item *Item) String() string { - return fmt.Sprintf("key=%q, version=%d, meta=%x", item.Key(), item.Version(), item.meta) -} - -// Key returns the key. -// -// Key is only valid as long as item is valid, or transaction is valid. If you need to use it -// outside its validity, please use KeyCopy. -func (item *Item) Key() []byte { - return item.key -} - -// KeyCopy returns a copy of the key of the item, writing it to dst slice. -// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and -// returned. -func (item *Item) KeyCopy(dst []byte) []byte { - return y.SafeCopy(dst, item.key) -} - -// Version returns the commit timestamp of the item. -func (item *Item) Version() uint64 { - return item.version -} - -// Value retrieves the value of the item from the value log. -// -// This method must be called within a transaction. Calling it outside a -// transaction is considered undefined behavior. If an iterator is being used, -// then Item.Value() is defined in the current iteration only, because items are -// reused. -// -// If you need to use a value outside a transaction, please use Item.ValueCopy -// instead, or copy it yourself. Value might change once discard or commit is called. -// Use ValueCopy if you want to do a Set after Get. -func (item *Item) Value(fn func(val []byte) error) error { - item.wg.Wait() - if item.status == prefetched { - if item.err == nil && fn != nil { - if err := fn(item.val); err != nil { - return err - } - } - return item.err - } - buf, cb, err := item.yieldItemValue() - defer runCallback(cb) - if err != nil { - return err - } - if fn != nil { - return fn(buf) - } - return nil -} - -// ValueCopy returns a copy of the value of the item from the value log, writing it to dst slice. -// If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and -// returned. Tip: It might make sense to reuse the returned slice as dst argument for the next call. -// -// This function is useful in long running iterate/update transactions to avoid a write deadlock. -// See Github issue: https://github.com/dgraph-io/badger/issues/315 -func (item *Item) ValueCopy(dst []byte) ([]byte, error) { - item.wg.Wait() - if item.status == prefetched { - return y.SafeCopy(dst, item.val), item.err - } - buf, cb, err := item.yieldItemValue() - defer runCallback(cb) - return y.SafeCopy(dst, buf), err -} - -func (item *Item) hasValue() bool { - if item.meta == 0 && item.vptr == nil { - // key not found - return false - } - return true -} - -// IsDeletedOrExpired returns true if item contains deleted or expired value. -func (item *Item) IsDeletedOrExpired() bool { - return isDeletedOrExpired(item.meta, item.expiresAt) -} - -// DiscardEarlierVersions returns whether the item was created with the -// option to discard earlier versions of a key when multiple are available. -func (item *Item) DiscardEarlierVersions() bool { - return item.meta&bitDiscardEarlierVersions > 0 -} - -func (item *Item) yieldItemValue() ([]byte, func(), error) { - key := item.Key() // No need to copy. - for { - if !item.hasValue() { - return nil, nil, nil - } - - if item.slice == nil { - item.slice = new(y.Slice) - } - - if (item.meta & bitValuePointer) == 0 { - val := item.slice.Resize(len(item.vptr)) - copy(val, item.vptr) - return val, nil, nil - } - - var vp valuePointer - vp.Decode(item.vptr) - result, cb, err := item.db.vlog.Read(vp, item.slice) - if err != ErrRetry { - if err != nil { - item.db.opt.Logger.Errorf(`Unable to read: Key: %v, Version : %v, - meta: %v, userMeta: %v`, key, item.version, item.meta, item.userMeta) - } - return result, cb, err - } - if bytes.HasPrefix(key, badgerMove) { - // err == ErrRetry - // Error is retry even after checking the move keyspace. So, let's - // just assume that value is not present. - return nil, cb, nil - } - - // The value pointer is pointing to a deleted value log. Look for the - // move key and read that instead. - runCallback(cb) - // Do not put badgerMove on the left in append. It seems to cause some sort of manipulation. - keyTs := y.KeyWithTs(item.Key(), item.Version()) - key = make([]byte, len(badgerMove)+len(keyTs)) - n := copy(key, badgerMove) - copy(key[n:], keyTs) - // Note that we can't set item.key to move key, because that would - // change the key user sees before and after this call. Also, this move - // logic is internal logic and should not impact the external behavior - // of the retrieval. - vs, err := item.db.get(key) - if err != nil { - return nil, nil, err - } - if vs.Version != item.Version() { - return nil, nil, nil - } - // Bug fix: Always copy the vs.Value into vptr here. Otherwise, when item is reused this - // slice gets overwritten. - item.vptr = y.SafeCopy(item.vptr, vs.Value) - item.meta &^= bitValuePointer // Clear the value pointer bit. - if vs.Meta&bitValuePointer > 0 { - item.meta |= bitValuePointer // This meta would only be about value pointer. - } - } -} - -func runCallback(cb func()) { - if cb != nil { - cb() - } -} - -func (item *Item) prefetchValue() { - val, cb, err := item.yieldItemValue() - defer runCallback(cb) - - item.err = err - item.status = prefetched - if val == nil { - return - } - if item.db.opt.ValueLogLoadingMode == options.MemoryMap { - buf := item.slice.Resize(len(val)) - copy(buf, val) - item.val = buf - } else { - item.val = val - } -} - -// EstimatedSize returns the approximate size of the key-value pair. -// -// This can be called while iterating through a store to quickly estimate the -// size of a range of key-value pairs (without fetching the corresponding -// values). -func (item *Item) EstimatedSize() int64 { - if !item.hasValue() { - return 0 - } - if (item.meta & bitValuePointer) == 0 { - return int64(len(item.key) + len(item.vptr)) - } - var vp valuePointer - vp.Decode(item.vptr) - return int64(vp.Len) // includes key length. -} - -// KeySize returns the size of the key. -// Exact size of the key is key + 8 bytes of timestamp -func (item *Item) KeySize() int64 { - return int64(len(item.key)) -} - -// ValueSize returns the approximate size of the value. -// -// This can be called to quickly estimate the size of a value without fetching -// it. -func (item *Item) ValueSize() int64 { - if !item.hasValue() { - return 0 - } - if (item.meta & bitValuePointer) == 0 { - return int64(len(item.vptr)) - } - var vp valuePointer - vp.Decode(item.vptr) - - klen := int64(len(item.key) + 8) // 8 bytes for timestamp. - // 6 bytes are for the approximate length of the header. Since header is encoded in varint, we - // cannot find the exact length of header without fetching it. - return int64(vp.Len) - klen - 6 - crc32.Size -} - -// UserMeta returns the userMeta set by the user. Typically, this byte, optionally set by the user -// is used to interpret the value. -func (item *Item) UserMeta() byte { - return item.userMeta -} - -// ExpiresAt returns a Unix time value indicating when the item will be -// considered expired. 0 indicates that the item will never expire. -func (item *Item) ExpiresAt() uint64 { - return item.expiresAt -} - -// TODO: Switch this to use linked list container in Go. -type list struct { - head *Item - tail *Item -} - -func (l *list) push(i *Item) { - i.next = nil - if l.tail == nil { - l.head = i - l.tail = i - return - } - l.tail.next = i - l.tail = i -} - -func (l *list) pop() *Item { - if l.head == nil { - return nil - } - i := l.head - if l.head == l.tail { - l.tail = nil - l.head = nil - } else { - l.head = i.next - } - i.next = nil - return i -} - -// IteratorOptions is used to set options when iterating over Badger key-value -// stores. -// -// This package provides DefaultIteratorOptions which contains options that -// should work for most applications. Consider using that as a starting point -// before customizing it for your own needs. -type IteratorOptions struct { - // Indicates whether we should prefetch values during iteration and store them. - PrefetchValues bool - // How many KV pairs to prefetch while iterating. Valid only if PrefetchValues is true. - PrefetchSize int - Reverse bool // Direction of iteration. False is forward, true is backward. - AllVersions bool // Fetch all valid versions of the same key. - - // The following option is used to narrow down the SSTables that iterator picks up. If - // Prefix is specified, only tables which could have this prefix are picked based on their range - // of keys. - Prefix []byte // Only iterate over this given prefix. - prefixIsKey bool // If set, use the prefix for bloom filter lookup. - - InternalAccess bool // Used to allow internal access to badger keys. -} - -func (opt *IteratorOptions) compareToPrefix(key []byte) int { - // We should compare key without timestamp. For example key - a[TS] might be > "aa" prefix. - key = y.ParseKey(key) - if len(key) > len(opt.Prefix) { - key = key[:len(opt.Prefix)] - } - return bytes.Compare(key, opt.Prefix) -} - -func (opt *IteratorOptions) pickTable(t table.TableInterface) bool { - if len(opt.Prefix) == 0 { - return true - } - if opt.compareToPrefix(t.Smallest()) > 0 { - return false - } - if opt.compareToPrefix(t.Biggest()) < 0 { - return false - } - // Bloom filter lookup would only work if opt.Prefix does NOT have the read - // timestamp as part of the key. - if opt.prefixIsKey && t.DoesNotHave(farm.Fingerprint64(opt.Prefix)) { - return false - } - return true -} - -// pickTables picks the necessary table for the iterator. This function also assumes -// that the tables are sorted in the right order. -func (opt *IteratorOptions) pickTables(all []*table.Table) []*table.Table { - if len(opt.Prefix) == 0 { - out := make([]*table.Table, len(all)) - copy(out, all) - return out - } - sIdx := sort.Search(len(all), func(i int) bool { - return opt.compareToPrefix(all[i].Biggest()) >= 0 - }) - if sIdx == len(all) { - // Not found. - return []*table.Table{} - } - - filtered := all[sIdx:] - if !opt.prefixIsKey { - eIdx := sort.Search(len(filtered), func(i int) bool { - return opt.compareToPrefix(filtered[i].Smallest()) > 0 - }) - out := make([]*table.Table, len(filtered[:eIdx])) - copy(out, filtered[:eIdx]) - return out - } - - var out []*table.Table - hash := farm.Fingerprint64(opt.Prefix) - for _, t := range filtered { - // When we encounter the first table whose smallest key is higher than - // opt.Prefix, we can stop. - if opt.compareToPrefix(t.Smallest()) > 0 { - return out - } - // opt.Prefix is actually the key. So, we can run bloom filter checks - // as well. - if t.DoesNotHave(hash) { - continue - } - out = append(out, t) - } - return out -} - -// DefaultIteratorOptions contains default options when iterating over Badger key-value stores. -var DefaultIteratorOptions = IteratorOptions{ - PrefetchValues: true, - PrefetchSize: 100, - Reverse: false, - AllVersions: false, -} - -// Iterator helps iterating over the KV pairs in a lexicographically sorted order. -type Iterator struct { - iitr y.Iterator - txn *Txn - readTs uint64 - - opt IteratorOptions - item *Item - data list - waste list - - lastKey []byte // Used to skip over multiple versions of the same key. - - closed bool - - // ThreadId is an optional value that can be set to identify which goroutine created - // the iterator. It can be used, for example, to uniquely identify each of the - // iterators created by the stream interface - ThreadId int -} - -// NewIterator returns a new iterator. Depending upon the options, either only keys, or both -// key-value pairs would be fetched. The keys are returned in lexicographically sorted order. -// Using prefetch is recommended if you're doing a long running iteration, for performance. -// -// Multiple Iterators: -// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write -// txn, iterators have the nuance of being a snapshot of the writes for the transaction at the time -// iterator was created. If writes are performed after an iterator is created, then that iterator -// will not be able to see those writes. Only writes performed before an iterator was created can be -// viewed. -func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator { - if txn.discarded { - panic("Transaction has already been discarded") - } - if txn.db.IsClosed() { - panic(ErrDBClosed.Error()) - } - - // Keep track of the number of active iterators. - atomic.AddInt32(&txn.numIterators, 1) - - // TODO: If Prefix is set, only pick those memtables which have keys with - // the prefix. - tables, decr := txn.db.getMemTables() - defer decr() - txn.db.vlog.incrIteratorCount() - var iters []y.Iterator - if itr := txn.newPendingWritesIterator(opt.Reverse); itr != nil { - iters = append(iters, itr) - } - for i := 0; i < len(tables); i++ { - iters = append(iters, tables[i].NewUniIterator(opt.Reverse)) - } - iters = txn.db.lc.appendIterators(iters, &opt) // This will increment references. - - res := &Iterator{ - txn: txn, - iitr: table.NewMergeIterator(iters, opt.Reverse), - opt: opt, - readTs: txn.readTs, - } - return res -} - -// NewKeyIterator is just like NewIterator, but allows the user to iterate over all versions of a -// single key. Internally, it sets the Prefix option in provided opt, and uses that prefix to -// additionally run bloom filter lookups before picking tables from the LSM tree. -func (txn *Txn) NewKeyIterator(key []byte, opt IteratorOptions) *Iterator { - if len(opt.Prefix) > 0 { - panic("opt.Prefix should be nil for NewKeyIterator.") - } - opt.Prefix = key // This key must be without the timestamp. - opt.prefixIsKey = true - opt.AllVersions = true - return txn.NewIterator(opt) -} - -func (it *Iterator) newItem() *Item { - item := it.waste.pop() - if item == nil { - item = &Item{slice: new(y.Slice), db: it.txn.db, txn: it.txn} - } - return item -} - -// Item returns pointer to the current key-value pair. -// This item is only valid until it.Next() gets called. -func (it *Iterator) Item() *Item { - tx := it.txn - tx.addReadKey(it.item.Key()) - return it.item -} - -// Valid returns false when iteration is done. -func (it *Iterator) Valid() bool { - if it.item == nil { - return false - } - if it.opt.prefixIsKey { - return bytes.Equal(it.item.key, it.opt.Prefix) - } - return bytes.HasPrefix(it.item.key, it.opt.Prefix) -} - -// ValidForPrefix returns false when iteration is done -// or when the current key is not prefixed by the specified prefix. -func (it *Iterator) ValidForPrefix(prefix []byte) bool { - return it.Valid() && bytes.HasPrefix(it.item.key, prefix) -} - -// Close would close the iterator. It is important to call this when you're done with iteration. -func (it *Iterator) Close() { - if it.closed { - return - } - it.closed = true - - it.iitr.Close() - // It is important to wait for the fill goroutines to finish. Otherwise, we might leave zombie - // goroutines behind, which are waiting to acquire file read locks after DB has been closed. - waitFor := func(l list) { - item := l.pop() - for item != nil { - item.wg.Wait() - item = l.pop() - } - } - waitFor(it.waste) - waitFor(it.data) - - // TODO: We could handle this error. - _ = it.txn.db.vlog.decrIteratorCount() - atomic.AddInt32(&it.txn.numIterators, -1) -} - -// Next would advance the iterator by one. Always check it.Valid() after a Next() -// to ensure you have access to a valid it.Item(). -func (it *Iterator) Next() { - // Reuse current item - it.item.wg.Wait() // Just cleaner to wait before pushing to avoid doing ref counting. - it.waste.push(it.item) - - // Set next item to current - it.item = it.data.pop() - - for it.iitr.Valid() { - if it.parseItem() { - // parseItem calls one extra next. - // This is used to deal with the complexity of reverse iteration. - break - } - } -} - -func isDeletedOrExpired(meta byte, expiresAt uint64) bool { - if meta&bitDelete > 0 { - return true - } - if expiresAt == 0 { - return false - } - return expiresAt <= uint64(time.Now().Unix()) -} - -// parseItem is a complex function because it needs to handle both forward and reverse iteration -// implementation. We store keys such that their versions are sorted in descending order. This makes -// forward iteration efficient, but revese iteration complicated. This tradeoff is better because -// forward iteration is more common than reverse. -// -// This function advances the iterator. -func (it *Iterator) parseItem() bool { - mi := it.iitr - key := mi.Key() - - setItem := func(item *Item) { - if it.item == nil { - it.item = item - } else { - it.data.push(item) - } - } - - // Skip badger keys. - if !it.opt.InternalAccess && bytes.HasPrefix(key, badgerPrefix) { - mi.Next() - return false - } - - // Skip any versions which are beyond the readTs. - version := y.ParseTs(key) - if version > it.readTs { - mi.Next() - return false - } - - if it.opt.AllVersions { - // Return deleted or expired values also, otherwise user can't figure out - // whether the key was deleted. - item := it.newItem() - it.fill(item) - setItem(item) - mi.Next() - return true - } - - // If iterating in forward direction, then just checking the last key against current key would - // be sufficient. - if !it.opt.Reverse { - if y.SameKey(it.lastKey, key) { - mi.Next() - return false - } - // Only track in forward direction. - // We should update lastKey as soon as we find a different key in our snapshot. - // Consider keys: a 5, b 7 (del), b 5. When iterating, lastKey = a. - // Then we see b 7, which is deleted. If we don't store lastKey = b, we'll then return b 5, - // which is wrong. Therefore, update lastKey here. - it.lastKey = y.SafeCopy(it.lastKey, mi.Key()) - } - -FILL: - // If deleted, advance and return. - vs := mi.Value() - if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { - mi.Next() - return false - } - - item := it.newItem() - it.fill(item) - // fill item based on current cursor position. All Next calls have returned, so reaching here - // means no Next was called. - - mi.Next() // Advance but no fill item yet. - if !it.opt.Reverse || !mi.Valid() { // Forward direction, or invalid. - setItem(item) - return true - } - - // Reverse direction. - nextTs := y.ParseTs(mi.Key()) - mik := y.ParseKey(mi.Key()) - if nextTs <= it.readTs && bytes.Equal(mik, item.key) { - // This is a valid potential candidate. - goto FILL - } - // Ignore the next candidate. Return the current one. - setItem(item) - return true -} - -func (it *Iterator) fill(item *Item) { - vs := it.iitr.Value() - item.meta = vs.Meta - item.userMeta = vs.UserMeta - item.expiresAt = vs.ExpiresAt - - item.version = y.ParseTs(it.iitr.Key()) - item.key = y.SafeCopy(item.key, y.ParseKey(it.iitr.Key())) - - item.vptr = y.SafeCopy(item.vptr, vs.Value) - item.val = nil - if it.opt.PrefetchValues { - item.wg.Add(1) - go func() { - // FIXME we are not handling errors here. - item.prefetchValue() - item.wg.Done() - }() - } -} - -func (it *Iterator) prefetch() { - prefetchSize := 2 - if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 { - prefetchSize = it.opt.PrefetchSize - } - - i := it.iitr - var count int - it.item = nil - for i.Valid() { - if !it.parseItem() { - continue - } - count++ - if count == prefetchSize { - break - } - } -} - -// Seek would seek to the provided key if present. If absent, it would seek to the next -// smallest key greater than the provided key if iterating in the forward direction. -// Behavior would be reversed if iterating backwards. -func (it *Iterator) Seek(key []byte) { - if len(key) > 0 { - it.txn.addReadKey(key) - } - for i := it.data.pop(); i != nil; i = it.data.pop() { - i.wg.Wait() - it.waste.push(i) - } - - it.lastKey = it.lastKey[:0] - if len(key) == 0 { - key = it.opt.Prefix - } - if len(key) == 0 { - it.iitr.Rewind() - it.prefetch() - return - } - - if !it.opt.Reverse { - key = y.KeyWithTs(key, it.txn.readTs) - } else { - key = y.KeyWithTs(key, 0) - } - it.iitr.Seek(key) - it.prefetch() -} - -// Rewind would rewind the iterator cursor all the way to zero-th position, which would be the -// smallest key if iterating forward, and largest if iterating backward. It does not keep track of -// whether the cursor started with a Seek(). -func (it *Iterator) Rewind() { - it.Seek(nil) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/key_registry.go b/vendor/github.com/dgraph-io/badger/v2/key_registry.go deleted file mode 100644 index db32acd1..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/key_registry.go +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "crypto/aes" - "crypto/rand" - "encoding/binary" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - "time" - - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" -) - -const ( - // KeyRegistryFileName is the file name for the key registry file. - KeyRegistryFileName = "KEYREGISTRY" - // KeyRegistryRewriteFileName is the file name for the rewrite key registry file. - KeyRegistryRewriteFileName = "REWRITE-KEYREGISTRY" -) - -// SanityText is used to check whether the given user provided storage key is valid or not -var sanityText = []byte("Hello Badger") - -// KeyRegistry used to maintain all the data keys. -type KeyRegistry struct { - sync.RWMutex - dataKeys map[uint64]*pb.DataKey - lastCreated int64 //lastCreated is the timestamp(seconds) of the last data key generated. - nextKeyID uint64 - fp *os.File - opt KeyRegistryOptions -} - -type KeyRegistryOptions struct { - Dir string - ReadOnly bool - EncryptionKey []byte - EncryptionKeyRotationDuration time.Duration - InMemory bool -} - -// newKeyRegistry returns KeyRegistry. -func newKeyRegistry(opt KeyRegistryOptions) *KeyRegistry { - return &KeyRegistry{ - dataKeys: make(map[uint64]*pb.DataKey), - nextKeyID: 0, - opt: opt, - } -} - -// OpenKeyRegistry opens key registry if it exists, otherwise it'll create key registry -// and returns key registry. -func OpenKeyRegistry(opt KeyRegistryOptions) (*KeyRegistry, error) { - // sanity check the encryption key length. - if len(opt.EncryptionKey) > 0 { - switch len(opt.EncryptionKey) { - default: - return nil, y.Wrapf(ErrInvalidEncryptionKey, "During OpenKeyRegistry") - case 16, 24, 32: - break - } - } - // If db is opened in InMemory mode, we don't need to write key registry to the disk. - if opt.InMemory { - return newKeyRegistry(opt), nil - } - path := filepath.Join(opt.Dir, KeyRegistryFileName) - var flags uint32 - if opt.ReadOnly { - flags |= y.ReadOnly - } else { - flags |= y.Sync - } - fp, err := y.OpenExistingFile(path, flags) - // OpenExistingFile just open file. - // So checking whether the file exist or not. If not - // We'll create new keyregistry. - if os.IsNotExist(err) { - // Creating new registry file if not exist. - kr := newKeyRegistry(opt) - if opt.ReadOnly { - return kr, nil - } - // Writing the key registry to the file. - if err := WriteKeyRegistry(kr, opt); err != nil { - return nil, y.Wrapf(err, "Error while writing key registry.") - } - fp, err = y.OpenExistingFile(path, flags) - if err != nil { - return nil, y.Wrapf(err, "Error while opening newly created key registry.") - } - } else if err != nil { - return nil, y.Wrapf(err, "Error while opening key registry.") - } - kr, err := readKeyRegistry(fp, opt) - if err != nil { - // This case happens only if the file is opened properly and - // not able to read. - fp.Close() - return nil, err - } - if opt.ReadOnly { - // We'll close the file in readonly mode. - return kr, fp.Close() - } - kr.fp = fp - return kr, nil -} - -// keyRegistryIterator reads all the datakey from the key registry -type keyRegistryIterator struct { - encryptionKey []byte - fp *os.File - // lenCrcBuf contains crc buf and data length to move forward. - lenCrcBuf [8]byte -} - -// newKeyRegistryIterator returns iterator which will allow you to iterate -// over the data key of the key registry. -func newKeyRegistryIterator(fp *os.File, encryptionKey []byte) (*keyRegistryIterator, error) { - return &keyRegistryIterator{ - encryptionKey: encryptionKey, - fp: fp, - lenCrcBuf: [8]byte{}, - }, validRegistry(fp, encryptionKey) -} - -// validRegistry checks that given encryption key is valid or not. -func validRegistry(fp *os.File, encryptionKey []byte) error { - iv := make([]byte, aes.BlockSize) - var err error - if _, err = fp.Read(iv); err != nil { - return y.Wrapf(err, "Error while reading IV for key registry.") - } - eSanityText := make([]byte, len(sanityText)) - if _, err = fp.Read(eSanityText); err != nil { - return y.Wrapf(err, "Error while reading sanity text.") - } - if len(encryptionKey) > 0 { - // Decrypting sanity text. - if eSanityText, err = y.XORBlock(eSanityText, encryptionKey, iv); err != nil { - return y.Wrapf(err, "During validRegistry") - } - } - // Check the given key is valid or not. - if !bytes.Equal(eSanityText, sanityText) { - return ErrEncryptionKeyMismatch - } - return nil -} - -func (kri *keyRegistryIterator) next() (*pb.DataKey, error) { - var err error - // Read crc buf and data length. - if _, err = kri.fp.Read(kri.lenCrcBuf[:]); err != nil { - // EOF means end of the iteration. - if err != io.EOF { - return nil, y.Wrapf(err, "While reading crc in keyRegistryIterator.next") - } - return nil, err - } - l := int64(binary.BigEndian.Uint32(kri.lenCrcBuf[0:4])) - // Read protobuf data. - data := make([]byte, l) - if _, err = kri.fp.Read(data); err != nil { - // EOF means end of the iteration. - if err != io.EOF { - return nil, y.Wrapf(err, "While reading protobuf in keyRegistryIterator.next") - } - return nil, err - } - // Check checksum. - if crc32.Checksum(data, y.CastagnoliCrcTable) != binary.BigEndian.Uint32(kri.lenCrcBuf[4:]) { - return nil, y.Wrapf(y.ErrChecksumMismatch, "Error while checking checksum for data key.") - } - dataKey := &pb.DataKey{} - if err = dataKey.Unmarshal(data); err != nil { - return nil, y.Wrapf(err, "While unmarshal of datakey in keyRegistryIterator.next") - } - if len(kri.encryptionKey) > 0 { - // Decrypt the key if the storage key exists. - if dataKey.Data, err = y.XORBlock(dataKey.Data, kri.encryptionKey, dataKey.Iv); err != nil { - return nil, y.Wrapf(err, "While decrypting datakey in keyRegistryIterator.next") - } - } - return dataKey, nil -} - -// readKeyRegistry will read the key registry file and build the key registry struct. -func readKeyRegistry(fp *os.File, opt KeyRegistryOptions) (*KeyRegistry, error) { - itr, err := newKeyRegistryIterator(fp, opt.EncryptionKey) - if err != nil { - return nil, err - } - kr := newKeyRegistry(opt) - var dk *pb.DataKey - dk, err = itr.next() - for err == nil && dk != nil { - if dk.KeyId > kr.nextKeyID { - // Set the maximum key ID for next key ID generation. - kr.nextKeyID = dk.KeyId - } - if dk.CreatedAt > kr.lastCreated { - // Set the last generated key timestamp. - kr.lastCreated = dk.CreatedAt - } - // No need to lock since we are building the initial state. - kr.dataKeys[dk.KeyId] = dk - // Forward the iterator. - dk, err = itr.next() - } - // We read all the key. So, Ignoring this error. - if err == io.EOF { - err = nil - } - return kr, err -} - -/* -Structure of Key Registry. -+-------------------+---------------------+--------------------+--------------+------------------+ -| IV | Sanity Text | DataKey1 | DataKey2 | ... | -+-------------------+---------------------+--------------------+--------------+------------------+ -*/ - -// WriteKeyRegistry will rewrite the existing key registry file with new one. -// It is okay to give closed key registry. Since, it's using only the datakey. -func WriteKeyRegistry(reg *KeyRegistry, opt KeyRegistryOptions) error { - buf := &bytes.Buffer{} - iv, err := y.GenerateIV() - y.Check(err) - // Encrypt sanity text if the encryption key is presents. - eSanity := sanityText - if len(opt.EncryptionKey) > 0 { - var err error - eSanity, err = y.XORBlock(eSanity, opt.EncryptionKey, iv) - if err != nil { - return y.Wrapf(err, "Error while encrpting sanity text in WriteKeyRegistry") - } - } - y.Check2(buf.Write(iv)) - y.Check2(buf.Write(eSanity)) - // Write all the datakeys to the buf. - for _, k := range reg.dataKeys { - // Writing the datakey to the given buffer. - if err := storeDataKey(buf, opt.EncryptionKey, k); err != nil { - return y.Wrapf(err, "Error while storing datakey in WriteKeyRegistry") - } - } - tmpPath := filepath.Join(opt.Dir, KeyRegistryRewriteFileName) - // Open temporary file to write the data and do atomic rename. - fp, err := y.OpenTruncFile(tmpPath, true) - if err != nil { - return y.Wrapf(err, "Error while opening tmp file in WriteKeyRegistry") - } - // Write buf to the disk. - if _, err = fp.Write(buf.Bytes()); err != nil { - // close the fd before returning error. We're not using defer - // because, for windows we need to close the fd explicitly before - // renaming. - fp.Close() - return y.Wrapf(err, "Error while writing buf in WriteKeyRegistry") - } - // In Windows the files should be closed before doing a Rename. - if err = fp.Close(); err != nil { - return y.Wrapf(err, "Error while closing tmp file in WriteKeyRegistry") - } - // Rename to the original file. - if err = os.Rename(tmpPath, filepath.Join(opt.Dir, KeyRegistryFileName)); err != nil { - return y.Wrapf(err, "Error while renaming file in WriteKeyRegistry") - } - // Sync Dir. - return syncDir(opt.Dir) -} - -// dataKey returns datakey of the given key id. -func (kr *KeyRegistry) dataKey(id uint64) (*pb.DataKey, error) { - kr.RLock() - defer kr.RUnlock() - if id == 0 { - // nil represent plain text. - return nil, nil - } - dk, ok := kr.dataKeys[id] - if !ok { - return nil, y.Wrapf(ErrInvalidDataKeyID, "Error for the KEY ID %d", id) - } - return dk, nil -} - -// latestDataKey will give you the latest generated datakey based on the rotation -// period. If the last generated datakey lifetime exceeds the rotation period. -// It'll create new datakey. -func (kr *KeyRegistry) latestDataKey() (*pb.DataKey, error) { - if len(kr.opt.EncryptionKey) == 0 { - // nil is for no encryption. - return nil, nil - } - // validKey return datakey if the last generated key duration less than - // rotation duration. - validKey := func() (*pb.DataKey, bool) { - // Time diffrence from the last generated time. - diff := time.Since(time.Unix(kr.lastCreated, 0)) - if diff < kr.opt.EncryptionKeyRotationDuration { - return kr.dataKeys[kr.nextKeyID], true - } - return nil, false - } - kr.RLock() - key, valid := validKey() - kr.RUnlock() - if valid { - // If less than EncryptionKeyRotationDuration, returns the last generated key. - return key, nil - } - kr.Lock() - defer kr.Unlock() - // Key might have generated by another go routine. So, - // checking once again. - key, valid = validKey() - if valid { - return key, nil - } - k := make([]byte, len(kr.opt.EncryptionKey)) - iv, err := y.GenerateIV() - if err != nil { - return nil, err - } - _, err = rand.Read(k) - if err != nil { - return nil, err - } - // Otherwise Increment the KeyID and generate new datakey. - kr.nextKeyID++ - dk := &pb.DataKey{ - KeyId: kr.nextKeyID, - Data: k, - CreatedAt: time.Now().Unix(), - Iv: iv, - } - // Don't store the datakey on file if badger is running in InMemory mode. - if !kr.opt.InMemory { - // Store the datekey. - buf := &bytes.Buffer{} - if err = storeDataKey(buf, kr.opt.EncryptionKey, dk); err != nil { - return nil, err - } - // Persist the datakey to the disk - if _, err = kr.fp.Write(buf.Bytes()); err != nil { - return nil, err - } - } - // storeDatakey encrypts the datakey So, placing un-encrypted key in the memory. - dk.Data = k - kr.lastCreated = dk.CreatedAt - kr.dataKeys[kr.nextKeyID] = dk - return dk, nil -} - -// Close closes the key registry. -func (kr *KeyRegistry) Close() error { - if !(kr.opt.ReadOnly || kr.opt.InMemory) { - return kr.fp.Close() - } - return nil -} - -// storeDataKey stores datakey in an encrypted format in the given buffer. If storage key preset. -func storeDataKey(buf *bytes.Buffer, storageKey []byte, k *pb.DataKey) error { - // xor will encrypt the IV and xor with the given data. - // It'll used for both encryption and decryption. - xor := func() error { - if len(storageKey) == 0 { - return nil - } - var err error - k.Data, err = y.XORBlock(k.Data, storageKey, k.Iv) - return err - } - // In memory datakey will be plain text so encrypting before storing to the disk. - var err error - if err = xor(); err != nil { - return y.Wrapf(err, "Error while encrypting datakey in storeDataKey") - } - var data []byte - if data, err = k.Marshal(); err != nil { - err = y.Wrapf(err, "Error while marshaling datakey in storeDataKey") - var err2 error - // decrypting the datakey back. - if err2 = xor(); err2 != nil { - return y.Wrapf(err, - y.Wrapf(err2, "Error while decrypting datakey in storeDataKey").Error()) - } - return err - } - var lenCrcBuf [8]byte - binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(data))) - binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(data, y.CastagnoliCrcTable)) - y.Check2(buf.Write(lenCrcBuf[:])) - y.Check2(buf.Write(data)) - // Decrypting the datakey back since we're using the pointer. - return xor() -} diff --git a/vendor/github.com/dgraph-io/badger/v2/level_handler.go b/vendor/github.com/dgraph-io/badger/v2/level_handler.go deleted file mode 100644 index ce48965f..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/level_handler.go +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "fmt" - "sort" - "sync" - - "github.com/dgryski/go-farm" - - "github.com/dgraph-io/badger/v2/table" - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" -) - -type levelHandler struct { - // Guards tables, totalSize. - sync.RWMutex - - // For level >= 1, tables are sorted by key ranges, which do not overlap. - // For level 0, tables are sorted by time. - // For level 0, newest table are at the back. Compact the oldest one first, which is at the front. - tables []*table.Table - totalSize int64 - - // The following are initialized once and const. - level int - strLevel string - maxTotalSize int64 - db *DB -} - -func (s *levelHandler) getTotalSize() int64 { - s.RLock() - defer s.RUnlock() - return s.totalSize -} - -// initTables replaces s.tables with given tables. This is done during loading. -func (s *levelHandler) initTables(tables []*table.Table) { - s.Lock() - defer s.Unlock() - - s.tables = tables - s.totalSize = 0 - for _, t := range tables { - s.totalSize += t.Size() - } - - if s.level == 0 { - // Key range will overlap. Just sort by fileID in ascending order - // because newer tables are at the end of level 0. - sort.Slice(s.tables, func(i, j int) bool { - return s.tables[i].ID() < s.tables[j].ID() - }) - } else { - // Sort tables by keys. - sort.Slice(s.tables, func(i, j int) bool { - return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 - }) - } -} - -// deleteTables remove tables idx0, ..., idx1-1. -func (s *levelHandler) deleteTables(toDel []*table.Table) error { - s.Lock() // s.Unlock() below - - toDelMap := make(map[uint64]struct{}) - for _, t := range toDel { - toDelMap[t.ID()] = struct{}{} - } - - // Make a copy as iterators might be keeping a slice of tables. - var newTables []*table.Table - for _, t := range s.tables { - _, found := toDelMap[t.ID()] - if !found { - newTables = append(newTables, t) - continue - } - s.totalSize -= t.Size() - } - s.tables = newTables - - s.Unlock() // Unlock s _before_ we DecrRef our tables, which can be slow. - - return decrRefs(toDel) -} - -// replaceTables will replace tables[left:right] with newTables. Note this EXCLUDES tables[right]. -// You must call decr() to delete the old tables _after_ writing the update to the manifest. -func (s *levelHandler) replaceTables(toDel, toAdd []*table.Table) error { - // Need to re-search the range of tables in this level to be replaced as other goroutines might - // be changing it as well. (They can't touch our tables, but if they add/remove other tables, - // the indices get shifted around.) - s.Lock() // We s.Unlock() below. - - toDelMap := make(map[uint64]struct{}) - for _, t := range toDel { - toDelMap[t.ID()] = struct{}{} - } - var newTables []*table.Table - for _, t := range s.tables { - _, found := toDelMap[t.ID()] - if !found { - newTables = append(newTables, t) - continue - } - s.totalSize -= t.Size() - } - - // Increase totalSize first. - for _, t := range toAdd { - s.totalSize += t.Size() - t.IncrRef() - newTables = append(newTables, t) - } - - // Assign tables. - s.tables = newTables - sort.Slice(s.tables, func(i, j int) bool { - return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 - }) - s.Unlock() // s.Unlock before we DecrRef tables -- that can be slow. - return decrRefs(toDel) -} - -// addTable adds toAdd table to levelHandler. Normally when we add tables to levelHandler, we sort -// tables based on table.Smallest. This is required for correctness of the system. But in case of -// stream writer this can be avoided. We can just add tables to levelHandler's table list -// and after all addTable calls, we can sort table list(check sortTable method). -// NOTE: levelHandler.sortTables() should be called after call addTable calls are done. -func (s *levelHandler) addTable(t *table.Table) { - s.Lock() - defer s.Unlock() - - s.totalSize += t.Size() // Increase totalSize first. - t.IncrRef() - s.tables = append(s.tables, t) -} - -// sortTables sorts tables of levelHandler based on table.Smallest. -// Normally it should be called after all addTable calls. -func (s *levelHandler) sortTables() { - s.RLock() - defer s.RUnlock() - - sort.Slice(s.tables, func(i, j int) bool { - return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 - }) -} - -func decrRefs(tables []*table.Table) error { - for _, table := range tables { - if err := table.DecrRef(); err != nil { - return err - } - } - return nil -} - -func newLevelHandler(db *DB, level int) *levelHandler { - return &levelHandler{ - level: level, - strLevel: fmt.Sprintf("l%d", level), - db: db, - } -} - -// tryAddLevel0Table returns true if ok and no stalling. -func (s *levelHandler) tryAddLevel0Table(t *table.Table) bool { - y.AssertTrue(s.level == 0) - // Need lock as we may be deleting the first table during a level 0 compaction. - s.Lock() - defer s.Unlock() - // Stall (by returning false) if we are above the specified stall setting for L0. - if len(s.tables) >= s.db.opt.NumLevelZeroTablesStall { - return false - } - - s.tables = append(s.tables, t) - t.IncrRef() - s.totalSize += t.Size() - - return true -} - -func (s *levelHandler) numTables() int { - s.RLock() - defer s.RUnlock() - return len(s.tables) -} - -func (s *levelHandler) close() error { - s.RLock() - defer s.RUnlock() - var err error - for _, t := range s.tables { - if closeErr := t.Close(); closeErr != nil && err == nil { - err = closeErr - } - } - return errors.Wrap(err, "levelHandler.close") -} - -// getTableForKey acquires a read-lock to access s.tables. It returns a list of tableHandlers. -func (s *levelHandler) getTableForKey(key []byte) ([]*table.Table, func() error) { - s.RLock() - defer s.RUnlock() - - if s.level == 0 { - // For level 0, we need to check every table. Remember to make a copy as s.tables may change - // once we exit this function, and we don't want to lock s.tables while seeking in tables. - // CAUTION: Reverse the tables. - out := make([]*table.Table, 0, len(s.tables)) - for i := len(s.tables) - 1; i >= 0; i-- { - out = append(out, s.tables[i]) - s.tables[i].IncrRef() - } - return out, func() error { - for _, t := range out { - if err := t.DecrRef(); err != nil { - return err - } - } - return nil - } - } - // For level >= 1, we can do a binary search as key range does not overlap. - idx := sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(s.tables[i].Biggest(), key) >= 0 - }) - if idx >= len(s.tables) { - // Given key is strictly > than every element we have. - return nil, func() error { return nil } - } - tbl := s.tables[idx] - tbl.IncrRef() - return []*table.Table{tbl}, tbl.DecrRef -} - -// get returns value for a given key or the key after that. If not found, return nil. -func (s *levelHandler) get(key []byte) (y.ValueStruct, error) { - tables, decr := s.getTableForKey(key) - keyNoTs := y.ParseKey(key) - - hash := farm.Fingerprint64(keyNoTs) - var maxVs y.ValueStruct - for _, th := range tables { - if th.DoesNotHave(hash) { - y.NumLSMBloomHits.Add(s.strLevel, 1) - continue - } - - it := th.NewIterator(false) - defer it.Close() - - y.NumLSMGets.Add(s.strLevel, 1) - it.Seek(key) - if !it.Valid() { - continue - } - if y.SameKey(key, it.Key()) { - if version := y.ParseTs(it.Key()); maxVs.Version < version { - maxVs = it.ValueCopy() - maxVs.Version = version - } - } - } - return maxVs, decr() -} - -// appendIterators appends iterators to an array of iterators, for merging. -// Note: This obtains references for the table handlers. Remember to close these iterators. -func (s *levelHandler) appendIterators(iters []y.Iterator, opt *IteratorOptions) []y.Iterator { - s.RLock() - defer s.RUnlock() - - if s.level == 0 { - // Remember to add in reverse order! - // The newer table at the end of s.tables should be added first as it takes precedence. - // Level 0 tables are not in key sorted order, so we need to consider them one by one. - var out []*table.Table - for _, t := range s.tables { - if opt.pickTable(t) { - out = append(out, t) - } - } - return appendIteratorsReversed(iters, out, opt.Reverse) - } - - tables := opt.pickTables(s.tables) - if len(tables) == 0 { - return iters - } - return append(iters, table.NewConcatIterator(tables, opt.Reverse)) -} - -type levelHandlerRLocked struct{} - -// overlappingTables returns the tables that intersect with key range. Returns a half-interval. -// This function should already have acquired a read lock, and this is so important the caller must -// pass an empty parameter declaring such. -func (s *levelHandler) overlappingTables(_ levelHandlerRLocked, kr keyRange) (int, int) { - if len(kr.left) == 0 || len(kr.right) == 0 { - return 0, 0 - } - left := sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(kr.left, s.tables[i].Biggest()) <= 0 - }) - right := sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(kr.right, s.tables[i].Smallest()) < 0 - }) - return left, right -} diff --git a/vendor/github.com/dgraph-io/badger/v2/levels.go b/vendor/github.com/dgraph-io/badger/v2/levels.go deleted file mode 100644 index f99ba0a2..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/levels.go +++ /dev/null @@ -1,1200 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "sort" - "strings" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/trace" - - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/table" - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" -) - -type levelsController struct { - nextFileID uint64 // Atomic - - // The following are initialized once and const. - levels []*levelHandler - kv *DB - - cstatus compactStatus - // This is for getting timings between stalls. - lastUnstalled time.Time -} - -// revertToManifest checks that all necessary table files exist and removes all table files not -// referenced by the manifest. idMap is a set of table file id's that were read from the directory -// listing. -func revertToManifest(kv *DB, mf *Manifest, idMap map[uint64]struct{}) error { - // 1. Check all files in manifest exist. - for id := range mf.Tables { - if _, ok := idMap[id]; !ok { - return fmt.Errorf("file does not exist for table %d", id) - } - } - - // 2. Delete files that shouldn't exist. - for id := range idMap { - if _, ok := mf.Tables[id]; !ok { - kv.opt.Debugf("Table file %d not referenced in MANIFEST\n", id) - filename := table.NewFilename(id, kv.opt.Dir) - if err := os.Remove(filename); err != nil { - return y.Wrapf(err, "While removing table %d", id) - } - } - } - - return nil -} - -func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) { - y.AssertTrue(db.opt.NumLevelZeroTablesStall > db.opt.NumLevelZeroTables) - s := &levelsController{ - kv: db, - levels: make([]*levelHandler, db.opt.MaxLevels), - } - s.cstatus.levels = make([]*levelCompactStatus, db.opt.MaxLevels) - - for i := 0; i < db.opt.MaxLevels; i++ { - s.levels[i] = newLevelHandler(db, i) - switch i { - case 0: - // Do nothing. - case 1: - // Level 1 probably shouldn't be too much bigger than level 0. - s.levels[i].maxTotalSize = db.opt.LevelOneSize - default: - s.levels[i].maxTotalSize = s.levels[i-1].maxTotalSize * int64(db.opt.LevelSizeMultiplier) - } - s.cstatus.levels[i] = new(levelCompactStatus) - } - - if db.opt.InMemory { - return s, nil - } - // Compare manifest against directory, check for existent/non-existent files, and remove. - if err := revertToManifest(db, mf, getIDMap(db.opt.Dir)); err != nil { - return nil, err - } - - // Some files may be deleted. Let's reload. - var flags uint32 = y.Sync - if db.opt.ReadOnly { - flags |= y.ReadOnly - } - - var mu sync.Mutex - tables := make([][]*table.Table, db.opt.MaxLevels) - var maxFileID uint64 - - // We found that using 3 goroutines allows disk throughput to be utilized to its max. - // Disk utilization is the main thing we should focus on, while trying to read the data. That's - // the one factor that remains constant between HDD and SSD. - throttle := y.NewThrottle(3) - - start := time.Now() - var numOpened int32 - tick := time.NewTicker(3 * time.Second) - defer tick.Stop() - - for fileID, tf := range mf.Tables { - fname := table.NewFilename(fileID, db.opt.Dir) - select { - case <-tick.C: - db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened), - len(mf.Tables), time.Since(start).Round(time.Millisecond)) - default: - } - if err := throttle.Do(); err != nil { - closeAllTables(tables) - return nil, err - } - if fileID > maxFileID { - maxFileID = fileID - } - go func(fname string, tf TableManifest) { - var rerr error - defer func() { - throttle.Done(rerr) - atomic.AddInt32(&numOpened, 1) - }() - fd, err := y.OpenExistingFile(fname, flags) - if err != nil { - rerr = errors.Wrapf(err, "Opening file: %q", fname) - return - } - dk, err := db.registry.dataKey(tf.KeyID) - if err != nil { - rerr = errors.Wrapf(err, "Error while reading datakey") - return - } - topt := buildTableOptions(db.opt) - // Set compression from table manifest. - topt.Compression = tf.Compression - topt.DataKey = dk - topt.BlockCache = db.blockCache - topt.IndexCache = db.indexCache - t, err := table.OpenTable(fd, topt) - if err != nil { - if strings.HasPrefix(err.Error(), "CHECKSUM_MISMATCH:") { - db.opt.Errorf(err.Error()) - db.opt.Errorf("Ignoring table %s", fd.Name()) - // Do not set rerr. We will continue without this table. - } else { - rerr = errors.Wrapf(err, "Opening table: %q", fname) - } - return - } - - mu.Lock() - tables[tf.Level] = append(tables[tf.Level], t) - mu.Unlock() - }(fname, tf) - } - if err := throttle.Finish(); err != nil { - closeAllTables(tables) - return nil, err - } - db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened), - time.Since(start).Round(time.Millisecond)) - s.nextFileID = maxFileID + 1 - for i, tbls := range tables { - s.levels[i].initTables(tbls) - } - - // Make sure key ranges do not overlap etc. - if err := s.validate(); err != nil { - _ = s.cleanupLevels() - return nil, errors.Wrap(err, "Level validation") - } - - // Sync directory (because we have at least removed some files, or previously created the - // manifest file). - if err := syncDir(db.opt.Dir); err != nil { - _ = s.close() - return nil, err - } - - return s, nil -} - -// Closes the tables, for cleanup in newLevelsController. (We Close() instead of using DecrRef() -// because that would delete the underlying files.) We ignore errors, which is OK because tables -// are read-only. -func closeAllTables(tables [][]*table.Table) { - for _, tableSlice := range tables { - for _, table := range tableSlice { - _ = table.Close() - } - } -} - -func (s *levelsController) cleanupLevels() error { - var firstErr error - for _, l := range s.levels { - if err := l.close(); err != nil && firstErr == nil { - firstErr = err - } - } - return firstErr -} - -// dropTree picks all tables from all levels, creates a manifest changeset, -// applies it, and then decrements the refs of these tables, which would result -// in their deletion. -func (s *levelsController) dropTree() (int, error) { - // First pick all tables, so we can create a manifest changelog. - var all []*table.Table - for _, l := range s.levels { - l.RLock() - all = append(all, l.tables...) - l.RUnlock() - } - if len(all) == 0 { - return 0, nil - } - - // Generate the manifest changes. - changes := []*pb.ManifestChange{} - for _, table := range all { - // Add a delete change only if the table is not in memory. - if !table.IsInmemory { - changes = append(changes, newDeleteChange(table.ID())) - } - } - changeSet := pb.ManifestChangeSet{Changes: changes} - if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil { - return 0, err - } - - // Now that manifest has been successfully written, we can delete the tables. - for _, l := range s.levels { - l.Lock() - l.totalSize = 0 - l.tables = l.tables[:0] - l.Unlock() - } - for _, table := range all { - if err := table.DecrRef(); err != nil { - return 0, err - } - } - return len(all), nil -} - -// dropPrefix runs a L0->L1 compaction, and then runs same level compaction on the rest of the -// levels. For L0->L1 compaction, it runs compactions normally, but skips over all the keys with the -// provided prefix and also the internal move keys for the same prefix. -// For Li->Li compactions, it picks up the tables which would have the prefix. The -// tables who only have keys with this prefix are quickly dropped. The ones which have other keys -// are run through MergeIterator and compacted to create new tables. All the mechanisms of -// compactions apply, i.e. level sizes and MANIFEST are updated as in the normal flow. -func (s *levelsController) dropPrefixes(prefixes [][]byte) error { - // Internal move keys related to the given prefix should also be skipped. - for _, prefix := range prefixes { - key := make([]byte, 0, len(badgerMove)+len(prefix)) - key = append(key, badgerMove...) - key = append(key, prefix...) - prefixes = append(prefixes, key) - } - - opt := s.kv.opt - // Iterate levels in the reverse order because if we were to iterate from - // lower level (say level 0) to a higher level (say level 3) we could have - // a state in which level 0 is compacted and an older version of a key exists in lower level. - // At this point, if someone creates an iterator, they would see an old - // value for a key from lower levels. Iterating in reverse order ensures we - // drop the oldest data first so that lookups never return stale data. - for i := len(s.levels) - 1; i >= 0; i-- { - l := s.levels[i] - - l.RLock() - if l.level == 0 { - size := len(l.tables) - l.RUnlock() - - if size > 0 { - cp := compactionPriority{ - level: 0, - score: 1.74, - // A unique number greater than 1.0 does two things. Helps identify this - // function in logs, and forces a compaction. - dropPrefixes: prefixes, - } - if err := s.doCompact(174, cp); err != nil { - opt.Warningf("While compacting level 0: %v", err) - return nil - } - } - continue - } - - // Build a list of compaction tableGroups affecting all the prefixes we - // need to drop. We need to build tableGroups that satisfy the invariant that - // bottom tables are consecutive. - // tableGroup contains groups of consecutive tables. - var tableGroups [][]*table.Table - var tableGroup []*table.Table - - finishGroup := func() { - if len(tableGroup) > 0 { - tableGroups = append(tableGroups, tableGroup) - tableGroup = nil - } - } - - for _, table := range l.tables { - if containsAnyPrefixes(table.Smallest(), table.Biggest(), prefixes) { - tableGroup = append(tableGroup, table) - } else { - finishGroup() - } - } - finishGroup() - - l.RUnlock() - - if len(tableGroups) == 0 { - continue - } - - opt.Infof("Dropping prefix at level %d (%d tableGroups)", l.level, len(tableGroups)) - for _, operation := range tableGroups { - cd := compactDef{ - elog: trace.New(fmt.Sprintf("Badger.L%d", l.level), "Compact"), - thisLevel: l, - nextLevel: l, - top: nil, - bot: operation, - dropPrefixes: prefixes, - } - if err := s.runCompactDef(l.level, cd); err != nil { - opt.Warningf("While running compact def: %+v. Error: %v", cd, err) - return err - } - } - } - return nil -} - -func (s *levelsController) startCompact(lc *y.Closer) { - n := s.kv.opt.NumCompactors - lc.AddRunning(n - 1) - for i := 0; i < n; i++ { - // The worker with id=0 is dedicated to L0 and L1. This is not counted - // towards the user specified NumCompactors. - go s.runCompactor(i, lc) - } -} - -func (s *levelsController) runCompactor(id int, lc *y.Closer) { - defer lc.Done() - - randomDelay := time.NewTimer(time.Duration(rand.Int31n(1000)) * time.Millisecond) - select { - case <-randomDelay.C: - case <-lc.HasBeenClosed(): - randomDelay.Stop() - return - } - - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - for { - select { - // Can add a done channel or other stuff. - case <-ticker.C: - prios := s.pickCompactLevels() - loop: - for _, p := range prios { - if id == 0 && p.level > 1 { - // If I'm ID zero, I only compact L0 and L1. - continue - } - if id != 0 && p.level <= 1 { - // If I'm ID non-zero, I do NOT compact L0 and L1. - continue - } - err := s.doCompact(id, p) - switch err { - case nil: - break loop - case errFillTables: - // pass - default: - s.kv.opt.Warningf("While running doCompact: %v\n", err) - } - } - case <-lc.HasBeenClosed(): - return - } - } -} - -// Returns true if level zero may be compacted, without accounting for compactions that already -// might be happening. -func (s *levelsController) isLevel0Compactable() bool { - return s.levels[0].numTables() >= s.kv.opt.NumLevelZeroTables -} - -// Returns true if the non-zero level may be compacted. delSize provides the size of the tables -// which are currently being compacted so that we treat them as already having started being -// compacted (because they have been, yet their size is already counted in getTotalSize). -func (l *levelHandler) isCompactable(delSize int64) bool { - return l.getTotalSize()-delSize >= l.maxTotalSize -} - -type compactionPriority struct { - level int - score float64 - dropPrefixes [][]byte -} - -// pickCompactLevel determines which level to compact. -// Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction -func (s *levelsController) pickCompactLevels() (prios []compactionPriority) { - // This function must use identical criteria for guaranteeing compaction's progress that - // addLevel0Table uses. - - // cstatus is checked to see if level 0's tables are already being compacted - if !s.cstatus.overlapsWith(0, infRange) && s.isLevel0Compactable() { - pri := compactionPriority{ - level: 0, - score: float64(s.levels[0].numTables()) / float64(s.kv.opt.NumLevelZeroTables), - } - prios = append(prios, pri) - } - - for i, l := range s.levels[1:] { - // Don't consider those tables that are already being compacted right now. - delSize := s.cstatus.delSize(i + 1) - - if l.isCompactable(delSize) { - pri := compactionPriority{ - level: i + 1, - score: float64(l.getTotalSize()-delSize) / float64(l.maxTotalSize), - } - prios = append(prios, pri) - } - } - // We should continue to sort the compaction priorities by score. Now that we have a dedicated - // compactor for L0 and L1, we don't need to sort by level here. - sort.Slice(prios, func(i, j int) bool { - return prios[i].score > prios[j].score - }) - return prios -} - -// checkOverlap checks if the given tables overlap with any level from the given "lev" onwards. -func (s *levelsController) checkOverlap(tables []*table.Table, lev int) bool { - kr := getKeyRange(tables...) - for i, lh := range s.levels { - if i < lev { // Skip upper levels. - continue - } - lh.RLock() - left, right := lh.overlappingTables(levelHandlerRLocked{}, kr) - lh.RUnlock() - if right-left > 0 { - return true - } - } - return false -} - -// compactBuildTables merges topTables and botTables to form a list of new tables. -func (s *levelsController) compactBuildTables( - lev int, cd compactDef) ([]*table.Table, func() error, error) { - topTables := cd.top - botTables := cd.bot - - // Check overlap of the top level with the levels which are not being - // compacted in this compaction. - hasOverlap := s.checkOverlap(cd.allTables(), cd.nextLevel.level+1) - - // Try to collect stats so that we can inform value log about GC. That would help us find which - // value log file should be GCed. - discardStats := make(map[uint32]int64) - updateStats := func(vs y.ValueStruct) { - // We don't need to store/update discard stats when badger is running in Disk-less mode. - if s.kv.opt.InMemory { - return - } - if vs.Meta&bitValuePointer > 0 { - var vp valuePointer - vp.Decode(vs.Value) - discardStats[vp.Fid] += int64(vp.Len) - } - } - - // Create iterators across all the tables involved first. - var iters []y.Iterator - switch { - case lev == 0: - iters = appendIteratorsReversed(iters, topTables, false) - case len(topTables) > 0: - y.AssertTrue(len(topTables) == 1) - iters = []y.Iterator{topTables[0].NewIterator(false)} - } - - // Next level has level>=1 and we can use ConcatIterator as key ranges do not overlap. - var valid []*table.Table - -nextTable: - for _, table := range botTables { - if len(cd.dropPrefixes) > 0 { - for _, prefix := range cd.dropPrefixes { - if bytes.HasPrefix(table.Smallest(), prefix) && - bytes.HasPrefix(table.Biggest(), prefix) { - // All the keys in this table have the dropPrefix. So, this - // table does not need to be in the iterator and can be - // dropped immediately. - continue nextTable - } - } - } - valid = append(valid, table) - } - iters = append(iters, table.NewConcatIterator(valid, false)) - it := table.NewMergeIterator(iters, false) - defer it.Close() // Important to close the iterator to do ref counting. - - it.Rewind() - - // Pick a discard ts, so we can discard versions below this ts. We should - // never discard any versions starting from above this timestamp, because - // that would affect the snapshot view guarantee provided by transactions. - discardTs := s.kv.orc.discardAtOrBelow() - - var numBuilds, numVersions int - var lastKey, skipKey []byte - var vp valuePointer - var newTables []*table.Table - mu := new(sync.Mutex) // Guards newTables - - inflightBuilders := y.NewThrottle(5) - for it.Valid() { - timeStart := time.Now() - dk, err := s.kv.registry.latestDataKey() - if err != nil { - return nil, nil, - y.Wrapf(err, "Error while retrieving datakey in levelsController.compactBuildTables") - } - bopts := buildTableOptions(s.kv.opt) - bopts.DataKey = dk - // Builder does not need cache but the same options are used for opening table. - bopts.BlockCache = s.kv.blockCache - bopts.IndexCache = s.kv.indexCache - builder := table.NewTableBuilder(bopts) - var numKeys, numSkips uint64 - for ; it.Valid(); it.Next() { - // See if we need to skip the prefix. - if len(cd.dropPrefixes) > 0 && hasAnyPrefixes(it.Key(), cd.dropPrefixes) { - numSkips++ - updateStats(it.Value()) - continue - } - - // See if we need to skip this key. - if len(skipKey) > 0 { - if y.SameKey(it.Key(), skipKey) { - numSkips++ - updateStats(it.Value()) - continue - } else { - skipKey = skipKey[:0] - } - } - - if !y.SameKey(it.Key(), lastKey) { - if builder.ReachedCapacity(s.kv.opt.MaxTableSize) { - // Only break if we are on a different key, and have reached capacity. We want - // to ensure that all versions of the key are stored in the same sstable, and - // not divided across multiple tables at the same level. - break - } - lastKey = y.SafeCopy(lastKey, it.Key()) - numVersions = 0 - } - - vs := it.Value() - version := y.ParseTs(it.Key()) - // Do not discard entries inserted by merge operator. These entries will be - // discarded once they're merged - if version <= discardTs && vs.Meta&bitMergeEntry == 0 { - // Keep track of the number of versions encountered for this key. Only consider the - // versions which are below the minReadTs, otherwise, we might end up discarding the - // only valid version for a running transaction. - numVersions++ - - // Keep the current version and discard all the next versions if - // - The `discardEarlierVersions` bit is set OR - // - We've already processed `NumVersionsToKeep` number of versions - // (including the current item being processed) - lastValidVersion := vs.Meta&bitDiscardEarlierVersions > 0 || - numVersions == s.kv.opt.NumVersionsToKeep - - isExpired := isDeletedOrExpired(vs.Meta, vs.ExpiresAt) - - if isExpired || lastValidVersion { - // If this version of the key is deleted or expired, skip all the rest of the - // versions. Ensure that we're only removing versions below readTs. - skipKey = y.SafeCopy(skipKey, it.Key()) - - switch { - // Add the key to the table only if it has not expired. - // We don't want to add the deleted/expired keys. - case !isExpired && lastValidVersion: - // Add this key. We have set skipKey, so the following key versions - // would be skipped. - case hasOverlap: - // If this key range has overlap with lower levels, then keep the deletion - // marker with the latest version, discarding the rest. We have set skipKey, - // so the following key versions would be skipped. - default: - // If no overlap, we can skip all the versions, by continuing here. - numSkips++ - updateStats(vs) - continue // Skip adding this key. - } - } - } - numKeys++ - if vs.Meta&bitValuePointer > 0 { - vp.Decode(vs.Value) - } - builder.Add(it.Key(), vs, vp.Len) - } - // It was true that it.Valid() at least once in the loop above, which means we - // called Add() at least once, and builder is not Empty(). - s.kv.opt.Debugf("LOG Compact. Added %d keys. Skipped %d keys. Iteration took: %v", - numKeys, numSkips, time.Since(timeStart)) - if builder.Empty() { - continue - } - numBuilds++ - fileID := s.reserveFileID() - if err := inflightBuilders.Do(); err != nil { - // Can't return from here, until I decrRef all the tables that I built so far. - break - } - go func(builder *table.Builder) { - defer builder.Close() - defer inflightBuilders.Done(err) - - build := func(fileID uint64) (*table.Table, error) { - fd, err := y.CreateSyncedFile(table.NewFilename(fileID, s.kv.opt.Dir), true) - if err != nil { - return nil, errors.Wrapf(err, "While opening new table: %d", fileID) - } - - if _, err := fd.Write(builder.Finish()); err != nil { - return nil, errors.Wrapf(err, "Unable to write to file: %d", fileID) - } - tbl, err := table.OpenTable(fd, bopts) - // decrRef is added below. - return tbl, errors.Wrapf(err, "Unable to open table: %q", fd.Name()) - } - - var tbl *table.Table - var err error - if s.kv.opt.InMemory { - tbl, err = table.OpenInMemoryTable(builder.Finish(), fileID, &bopts) - } else { - tbl, err = build(fileID) - } - - // If we couldn't build the table, return fast. - if err != nil { - return - } - - mu.Lock() - newTables = append(newTables, tbl) - mu.Unlock() - }(builder) - } - - // Wait for all table builders to finish and also for newTables accumulator to finish. - err := inflightBuilders.Finish() - if err == nil { - // Ensure created files' directory entries are visible. We don't mind the extra latency - // from not doing this ASAP after all file creation has finished because this is a - // background operation. - err = s.kv.syncDir(s.kv.opt.Dir) - } - - if err != nil { - // An error happened. Delete all the newly created table files (by calling DecrRef - // -- we're the only holders of a ref). - _ = decrRefs(newTables) - return nil, nil, errors.Wrapf(err, "while running compactions for: %+v", cd) - } - - sort.Slice(newTables, func(i, j int) bool { - return y.CompareKeys(newTables[i].Biggest(), newTables[j].Biggest()) < 0 - }) - s.kv.vlog.updateDiscardStats(discardStats) - s.kv.opt.Debugf("Discard stats: %v", discardStats) - return newTables, func() error { return decrRefs(newTables) }, nil -} - -func buildChangeSet(cd *compactDef, newTables []*table.Table) pb.ManifestChangeSet { - changes := []*pb.ManifestChange{} - for _, table := range newTables { - changes = append(changes, - newCreateChange(table.ID(), cd.nextLevel.level, table.KeyID(), table.CompressionType())) - } - for _, table := range cd.top { - // Add a delete change only if the table is not in memory. - if !table.IsInmemory { - changes = append(changes, newDeleteChange(table.ID())) - } - } - for _, table := range cd.bot { - changes = append(changes, newDeleteChange(table.ID())) - } - return pb.ManifestChangeSet{Changes: changes} -} - -func hasAnyPrefixes(s []byte, listOfPrefixes [][]byte) bool { - for _, prefix := range listOfPrefixes { - if bytes.HasPrefix(s, prefix) { - return true - } - } - - return false -} - -func containsPrefix(smallValue, largeValue, prefix []byte) bool { - if bytes.HasPrefix(smallValue, prefix) { - return true - } - if bytes.HasPrefix(largeValue, prefix) { - return true - } - if bytes.Compare(prefix, smallValue) > 0 && - bytes.Compare(prefix, largeValue) < 0 { - return true - } - - return false -} - -func containsAnyPrefixes(smallValue, largeValue []byte, listOfPrefixes [][]byte) bool { - for _, prefix := range listOfPrefixes { - if containsPrefix(smallValue, largeValue, prefix) { - return true - } - } - - return false -} - -type compactDef struct { - elog trace.Trace - - thisLevel *levelHandler - nextLevel *levelHandler - - top []*table.Table - bot []*table.Table - - thisRange keyRange - nextRange keyRange - - thisSize int64 - - dropPrefixes [][]byte -} - -func (cd *compactDef) lockLevels() { - cd.thisLevel.RLock() - cd.nextLevel.RLock() -} - -func (cd *compactDef) unlockLevels() { - cd.nextLevel.RUnlock() - cd.thisLevel.RUnlock() -} - -func (cd *compactDef) allTables() []*table.Table { - ret := make([]*table.Table, 0, len(cd.top)+len(cd.bot)) - ret = append(ret, cd.top...) - ret = append(ret, cd.bot...) - return ret -} - -func (s *levelsController) fillTablesL0(cd *compactDef) bool { - cd.lockLevels() - defer cd.unlockLevels() - - cd.top = make([]*table.Table, len(cd.thisLevel.tables)) - copy(cd.top, cd.thisLevel.tables) - if len(cd.top) == 0 { - return false - } - cd.thisRange = infRange - - kr := getKeyRange(cd.top...) - left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, kr) - cd.bot = make([]*table.Table, right-left) - copy(cd.bot, cd.nextLevel.tables[left:right]) - - if len(cd.bot) == 0 { - cd.nextRange = kr - } else { - cd.nextRange = getKeyRange(cd.bot...) - } - - if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { - return false - } - - return true -} - -// sortByOverlap sorts tables in increasing order of overlap with next level. -func (s *levelsController) sortByOverlap(tables []*table.Table, cd *compactDef) { - if len(tables) == 0 || cd.nextLevel == nil { - return - } - - tableOverlap := make([]int, len(tables)) - for i := range tables { - // get key range for table - tableRange := getKeyRange(tables[i]) - // get overlap with next level - left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, tableRange) - tableOverlap[i] = right - left - } - - sort.Slice(tables, func(i, j int) bool { - return tableOverlap[i] < tableOverlap[j] - }) -} - -func (s *levelsController) fillTables(cd *compactDef) bool { - cd.lockLevels() - defer cd.unlockLevels() - - tables := make([]*table.Table, len(cd.thisLevel.tables)) - copy(tables, cd.thisLevel.tables) - if len(tables) == 0 { - return false - } - - // We want to pick files from current level in order of increasing overlap with next level - // tables. Idea here is to first compact file from current level which has least overlap with - // next level. This provides us better write amplification. - s.sortByOverlap(tables, cd) - - for _, t := range tables { - cd.thisSize = t.Size() - cd.thisRange = getKeyRange(t) - if s.cstatus.overlapsWith(cd.thisLevel.level, cd.thisRange) { - continue - } - cd.top = []*table.Table{t} - left, right := cd.nextLevel.overlappingTables(levelHandlerRLocked{}, cd.thisRange) - - // Sometimes below line(make([]*table.Table, right-left)) panics with error - // (runtime error: makeslice: len out of range). One of the reason for this can be when - // right < left. We don't know how to reproduce it as of now. We are just logging it so - // that we can get more context. - if right < left { - s.kv.opt.Errorf("right: %d is less than left: %d in overlappingTables for current "+ - "level: %d, next level: %d, key range(%s, %s)", right, left, cd.thisLevel.level, - cd.nextLevel.level, cd.thisRange.left, cd.thisRange.right) - - continue - } - - cd.bot = make([]*table.Table, right-left) - copy(cd.bot, cd.nextLevel.tables[left:right]) - - if len(cd.bot) == 0 { - cd.bot = []*table.Table{} - cd.nextRange = cd.thisRange - if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { - continue - } - return true - } - cd.nextRange = getKeyRange(cd.bot...) - - if s.cstatus.overlapsWith(cd.nextLevel.level, cd.nextRange) { - continue - } - if !s.cstatus.compareAndAdd(thisAndNextLevelRLocked{}, *cd) { - continue - } - return true - } - return false -} - -func (s *levelsController) runCompactDef(l int, cd compactDef) (err error) { - timeStart := time.Now() - - thisLevel := cd.thisLevel - nextLevel := cd.nextLevel - - // Table should never be moved directly between levels, always be rewritten to allow discarding - // invalid versions. - - newTables, decr, err := s.compactBuildTables(l, cd) - if err != nil { - return err - } - defer func() { - // Only assign to err, if it's not already nil. - if decErr := decr(); err == nil { - err = decErr - } - }() - changeSet := buildChangeSet(&cd, newTables) - - // We write to the manifest _before_ we delete files (and after we created files) - if err := s.kv.manifest.addChanges(changeSet.Changes); err != nil { - return err - } - - // See comment earlier in this function about the ordering of these ops, and the order in which - // we access levels when reading. - if err := nextLevel.replaceTables(cd.bot, newTables); err != nil { - return err - } - if err := thisLevel.deleteTables(cd.top); err != nil { - return err - } - - // Note: For level 0, while doCompact is running, it is possible that new tables are added. - // However, the tables are added only to the end, so it is ok to just delete the first table. - - s.kv.opt.Infof("LOG Compact %d->%d, del %d tables, add %d tables, took %v\n", - thisLevel.level, nextLevel.level, len(cd.top)+len(cd.bot), - len(newTables), time.Since(timeStart)) - return nil -} - -var errFillTables = errors.New("Unable to fill tables") - -// doCompact picks some table on level l and compacts it away to the next level. -func (s *levelsController) doCompact(id int, p compactionPriority) error { - l := p.level - y.AssertTrue(l+1 < s.kv.opt.MaxLevels) // Sanity check. - - cd := compactDef{ - elog: trace.New(fmt.Sprintf("Badger.L%d", l), "Compact"), - thisLevel: s.levels[l], - nextLevel: s.levels[l+1], - dropPrefixes: p.dropPrefixes, - } - cd.elog.SetMaxEvents(100) - defer cd.elog.Finish() - - s.kv.opt.Debugf("[Compactor: %d] Attempting to run compaction: %+v", id, p) - - // While picking tables to be compacted, both levels' tables are expected to - // remain unchanged. - if l == 0 { - if !s.fillTablesL0(&cd) { - return errFillTables - } - - } else { - if !s.fillTables(&cd) { - return errFillTables - } - } - defer s.cstatus.delete(cd) // Remove the ranges from compaction status. - - s.kv.opt.Infof("[Compactor: %d] Running compaction: %+v for level: %d\n", - id, p, cd.thisLevel.level) - s.cstatus.toLog(cd.elog) - if err := s.runCompactDef(l, cd); err != nil { - // This compaction couldn't be done successfully. - s.kv.opt.Warningf("[Compactor: %d] LOG Compact FAILED with error: %+v: %+v", id, err, cd) - return err - } - - s.cstatus.toLog(cd.elog) - s.kv.opt.Infof("[Compactor: %d] Compaction for level: %d DONE", id, cd.thisLevel.level) - return nil -} - -func (s *levelsController) addLevel0Table(t *table.Table) error { - // Add table to manifest file only if it is not opened in memory. We don't want to add a table - // to the manifest file if it exists only in memory. - if !t.IsInmemory { - // We update the manifest _before_ the table becomes part of a levelHandler, because at that - // point it could get used in some compaction. This ensures the manifest file gets updated in - // the proper order. (That means this update happens before that of some compaction which - // deletes the table.) - err := s.kv.manifest.addChanges([]*pb.ManifestChange{ - newCreateChange(t.ID(), 0, t.KeyID(), t.CompressionType()), - }) - if err != nil { - return err - } - } - - for !s.levels[0].tryAddLevel0Table(t) { - // Stall. Make sure all levels are healthy before we unstall. - var timeStart time.Time - { - s.kv.opt.Infof("STALLED STALLED STALLED: %v\n", time.Since(s.lastUnstalled)) - s.cstatus.RLock() - for i := 0; i < s.kv.opt.MaxLevels; i++ { - s.kv.opt.Debugf("level=%d. Status=%s Size=%d\n", - i, s.cstatus.levels[i].debug(), s.levels[i].getTotalSize()) - } - s.cstatus.RUnlock() - timeStart = time.Now() - } - // Before we unstall, we need to make sure that level 0 is healthy. Otherwise, we - // will very quickly fill up level 0 again. - for i := 0; ; i++ { - // It's crucial that this behavior replicates pickCompactLevels' behavior in - // computing compactability in order to guarantee progress. - // Break the loop once L0 has enough space to accommodate new tables. - if !s.isLevel0Compactable() { - break - } - time.Sleep(10 * time.Millisecond) - if i%100 == 0 { - prios := s.pickCompactLevels() - s.kv.opt.Debugf("Waiting to add level 0 table. Compaction priorities: %+v\n", prios) - i = 0 - } - } - { - s.kv.opt.Debugf("UNSTALLED UNSTALLED UNSTALLED: %v\n", time.Since(timeStart)) - s.lastUnstalled = time.Now() - } - } - - return nil -} - -func (s *levelsController) close() error { - err := s.cleanupLevels() - return errors.Wrap(err, "levelsController.Close") -} - -// get returns the found value if any. If not found, we return nil. -func (s *levelsController) get(key []byte, maxVs *y.ValueStruct, startLevel int) ( - y.ValueStruct, error) { - if s.kv.IsClosed() { - return y.ValueStruct{}, ErrDBClosed - } - // It's important that we iterate the levels from 0 on upward. The reason is, if we iterated - // in opposite order, or in parallel (naively calling all the h.RLock() in some order) we could - // read level L's tables post-compaction and level L+1's tables pre-compaction. (If we do - // parallelize this, we will need to call the h.RLock() function by increasing order of level - // number.) - version := y.ParseTs(key) - for _, h := range s.levels { - // Ignore all levels below startLevel. This is useful for GC when L0 is kept in memory. - if h.level < startLevel { - continue - } - vs, err := h.get(key) // Calls h.RLock() and h.RUnlock(). - if err != nil { - return y.ValueStruct{}, errors.Wrapf(err, "get key: %q", key) - } - if vs.Value == nil && vs.Meta == 0 { - continue - } - if maxVs == nil || vs.Version == version { - return vs, nil - } - if maxVs.Version < vs.Version { - *maxVs = vs - } - } - if maxVs != nil { - return *maxVs, nil - } - return y.ValueStruct{}, nil -} - -func appendIteratorsReversed(out []y.Iterator, th []*table.Table, reversed bool) []y.Iterator { - for i := len(th) - 1; i >= 0; i-- { - // This will increment the reference of the table handler. - out = append(out, th[i].NewIterator(reversed)) - } - return out -} - -// appendIterators appends iterators to an array of iterators, for merging. -// Note: This obtains references for the table handlers. Remember to close these iterators. -func (s *levelsController) appendIterators( - iters []y.Iterator, opt *IteratorOptions) []y.Iterator { - // Just like with get, it's important we iterate the levels from 0 on upward, to avoid missing - // data when there's a compaction. - for _, level := range s.levels { - iters = level.appendIterators(iters, opt) - } - return iters -} - -// TableInfo represents the information about a table. -type TableInfo struct { - ID uint64 - Level int - Left []byte - Right []byte - KeyCount uint64 // Number of keys in the table - EstimatedSz uint64 -} - -func (s *levelsController) getTableInfo(withKeysCount bool) (result []TableInfo) { - for _, l := range s.levels { - l.RLock() - for _, t := range l.tables { - var count uint64 - if withKeysCount { - it := t.NewIterator(false) - for it.Rewind(); it.Valid(); it.Next() { - count++ - } - it.Close() - } - - info := TableInfo{ - ID: t.ID(), - Level: l.level, - Left: t.Smallest(), - Right: t.Biggest(), - KeyCount: count, - EstimatedSz: t.EstimatedSize(), - } - result = append(result, info) - } - l.RUnlock() - } - sort.Slice(result, func(i, j int) bool { - if result[i].Level != result[j].Level { - return result[i].Level < result[j].Level - } - return result[i].ID < result[j].ID - }) - return -} - -// verifyChecksum verifies checksum for all tables on all levels. -func (s *levelsController) verifyChecksum() error { - var tables []*table.Table - for _, l := range s.levels { - l.RLock() - tables = tables[:0] - for _, t := range l.tables { - tables = append(tables, t) - t.IncrRef() - } - l.RUnlock() - - for _, t := range tables { - errChkVerify := t.VerifyChecksum() - if err := t.DecrRef(); err != nil { - s.kv.opt.Errorf("unable to decrease reference of table: %s while "+ - "verifying checksum with error: %s", t.Filename(), err) - } - - if errChkVerify != nil { - return errChkVerify - } - } - } - - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/logger.go b/vendor/github.com/dgraph-io/badger/v2/logger.go deleted file mode 100644 index c7b4cd6c..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/logger.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright 2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "log" - "os" -) - -// Logger is implemented by any logging system that is used for standard logs. -type Logger interface { - Errorf(string, ...interface{}) - Warningf(string, ...interface{}) - Infof(string, ...interface{}) - Debugf(string, ...interface{}) -} - -// Errorf logs an ERROR log message to the logger specified in opts or to the -// global logger if no logger is specified in opts. -func (opt *Options) Errorf(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Errorf(format, v...) -} - -// Infof logs an INFO message to the logger specified in opts. -func (opt *Options) Infof(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Infof(format, v...) -} - -// Warningf logs a WARNING message to the logger specified in opts. -func (opt *Options) Warningf(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Warningf(format, v...) -} - -// Debugf logs a DEBUG message to the logger specified in opts. -func (opt *Options) Debugf(format string, v ...interface{}) { - if opt.Logger == nil { - return - } - opt.Logger.Debugf(format, v...) -} - -type loggingLevel int - -const ( - DEBUG loggingLevel = iota - INFO - WARNING - ERROR -) - -type defaultLog struct { - *log.Logger - level loggingLevel -} - -func defaultLogger(level loggingLevel) *defaultLog { - return &defaultLog{Logger: log.New(os.Stderr, "badger ", log.LstdFlags), level: level} -} - -func (l *defaultLog) Errorf(f string, v ...interface{}) { - if l.level <= ERROR { - l.Printf("ERROR: "+f, v...) - } -} - -func (l *defaultLog) Warningf(f string, v ...interface{}) { - if l.level <= WARNING { - l.Printf("WARNING: "+f, v...) - } -} - -func (l *defaultLog) Infof(f string, v ...interface{}) { - if l.level <= INFO { - l.Printf("INFO: "+f, v...) - } -} - -func (l *defaultLog) Debugf(f string, v ...interface{}) { - if l.level <= DEBUG { - l.Printf("DEBUG: "+f, v...) - } -} diff --git a/vendor/github.com/dgraph-io/badger/v2/managed_db.go b/vendor/github.com/dgraph-io/badger/v2/managed_db.go deleted file mode 100644 index 23c79884..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/managed_db.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -// OpenManaged returns a new DB, which allows more control over setting -// transaction timestamps, aka managed mode. -// -// This is only useful for databases built on top of Badger (like Dgraph), and -// can be ignored by most users. -func OpenManaged(opts Options) (*DB, error) { - opts.managedTxns = true - return Open(opts) -} - -// NewTransactionAt follows the same logic as DB.NewTransaction(), but uses the -// provided read timestamp. -// -// This is only useful for databases built on top of Badger (like Dgraph), and -// can be ignored by most users. -func (db *DB) NewTransactionAt(readTs uint64, update bool) *Txn { - if !db.opt.managedTxns { - panic("Cannot use NewTransactionAt with managedDB=false. Use NewTransaction instead.") - } - txn := db.newTransaction(update, true) - txn.readTs = readTs - return txn -} - -// NewWriteBatchAt is similar to NewWriteBatch but it allows user to set the commit timestamp. -// NewWriteBatchAt is supposed to be used only in the managed mode. -func (db *DB) NewWriteBatchAt(commitTs uint64) *WriteBatch { - if !db.opt.managedTxns { - panic("cannot use NewWriteBatchAt with managedDB=false. Use NewWriteBatch instead") - } - - wb := db.newWriteBatch(true) - wb.commitTs = commitTs - wb.txn.commitTs = commitTs - return wb -} -func (db *DB) NewManagedWriteBatch() *WriteBatch { - if !db.opt.managedTxns { - panic("cannot use NewManagedWriteBatch with managedDB=false. Use NewWriteBatch instead") - } - - wb := db.newWriteBatch(true) - return wb -} - -// CommitAt commits the transaction, following the same logic as Commit(), but -// at the given commit timestamp. This will panic if not used with managed transactions. -// -// This is only useful for databases built on top of Badger (like Dgraph), and -// can be ignored by most users. -func (txn *Txn) CommitAt(commitTs uint64, callback func(error)) error { - if !txn.db.opt.managedTxns { - panic("Cannot use CommitAt with managedDB=false. Use Commit instead.") - } - txn.commitTs = commitTs - if callback == nil { - return txn.Commit() - } - txn.CommitWith(callback) - return nil -} - -// SetDiscardTs sets a timestamp at or below which, any invalid or deleted -// versions can be discarded from the LSM tree, and thence from the value log to -// reclaim disk space. Can only be used with managed transactions. -func (db *DB) SetDiscardTs(ts uint64) { - if !db.opt.managedTxns { - panic("Cannot use SetDiscardTs with managedDB=false.") - } - db.orc.setDiscardTs(ts) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/manifest.go b/vendor/github.com/dgraph-io/badger/v2/manifest.go deleted file mode 100644 index e987c12c..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/manifest.go +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/dgraph-io/badger/v2/options" - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" - "github.com/golang/protobuf/proto" - "github.com/pkg/errors" -) - -// Manifest represents the contents of the MANIFEST file in a Badger store. -// -// The MANIFEST file describes the startup state of the db -- all LSM files and what level they're -// at. -// -// It consists of a sequence of ManifestChangeSet objects. Each of these is treated atomically, -// and contains a sequence of ManifestChange's (file creations/deletions) which we use to -// reconstruct the manifest at startup. -type Manifest struct { - Levels []levelManifest - Tables map[uint64]TableManifest - - // Contains total number of creation and deletion changes in the manifest -- used to compute - // whether it'd be useful to rewrite the manifest. - Creations int - Deletions int -} - -func createManifest() Manifest { - levels := make([]levelManifest, 0) - return Manifest{ - Levels: levels, - Tables: make(map[uint64]TableManifest), - } -} - -// levelManifest contains information about LSM tree levels -// in the MANIFEST file. -type levelManifest struct { - Tables map[uint64]struct{} // Set of table id's -} - -// TableManifest contains information about a specific table -// in the LSM tree. -type TableManifest struct { - Level uint8 - KeyID uint64 - Compression options.CompressionType -} - -// manifestFile holds the file pointer (and other info) about the manifest file, which is a log -// file we append to. -type manifestFile struct { - fp *os.File - directory string - // We make this configurable so that unit tests can hit rewrite() code quickly - deletionsRewriteThreshold int - - // Guards appends, which includes access to the manifest field. - appendLock sync.Mutex - - // Used to track the current state of the manifest, used when rewriting. - manifest Manifest - - // Used to indicate if badger was opened in InMemory mode. - inMemory bool -} - -const ( - // ManifestFilename is the filename for the manifest file. - ManifestFilename = "MANIFEST" - manifestRewriteFilename = "MANIFEST-REWRITE" - manifestDeletionsRewriteThreshold = 10000 - manifestDeletionsRatio = 10 -) - -// asChanges returns a sequence of changes that could be used to recreate the Manifest in its -// present state. -func (m *Manifest) asChanges() []*pb.ManifestChange { - changes := make([]*pb.ManifestChange, 0, len(m.Tables)) - for id, tm := range m.Tables { - changes = append(changes, newCreateChange(id, int(tm.Level), tm.KeyID, tm.Compression)) - } - return changes -} - -func (m *Manifest) clone() Manifest { - changeSet := pb.ManifestChangeSet{Changes: m.asChanges()} - ret := createManifest() - y.Check(applyChangeSet(&ret, &changeSet)) - return ret -} - -// openOrCreateManifestFile opens a Badger manifest file if it exists, or creates one if -// doesn’t exists. -func openOrCreateManifestFile(opt Options) ( - ret *manifestFile, result Manifest, err error) { - if opt.InMemory { - return &manifestFile{inMemory: true}, Manifest{}, nil - } - return helpOpenOrCreateManifestFile(opt.Dir, opt.ReadOnly, manifestDeletionsRewriteThreshold) -} - -func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) ( - *manifestFile, Manifest, error) { - - path := filepath.Join(dir, ManifestFilename) - var flags uint32 - if readOnly { - flags |= y.ReadOnly - } - fp, err := y.OpenExistingFile(path, flags) // We explicitly sync in addChanges, outside the lock. - if err != nil { - if !os.IsNotExist(err) { - return nil, Manifest{}, err - } - if readOnly { - return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db") - } - m := createManifest() - fp, netCreations, err := helpRewrite(dir, &m) - if err != nil { - return nil, Manifest{}, err - } - y.AssertTrue(netCreations == 0) - mf := &manifestFile{ - fp: fp, - directory: dir, - manifest: m.clone(), - deletionsRewriteThreshold: deletionsThreshold, - } - return mf, m, nil - } - - manifest, truncOffset, err := ReplayManifestFile(fp) - if err != nil { - _ = fp.Close() - return nil, Manifest{}, err - } - - if !readOnly { - // Truncate file so we don't have a half-written entry at the end. - if err := fp.Truncate(truncOffset); err != nil { - _ = fp.Close() - return nil, Manifest{}, err - } - } - if _, err = fp.Seek(0, io.SeekEnd); err != nil { - _ = fp.Close() - return nil, Manifest{}, err - } - - mf := &manifestFile{ - fp: fp, - directory: dir, - manifest: manifest.clone(), - deletionsRewriteThreshold: deletionsThreshold, - } - return mf, manifest, nil -} - -func (mf *manifestFile) close() error { - if mf.inMemory { - return nil - } - return mf.fp.Close() -} - -// addChanges writes a batch of changes, atomically, to the file. By "atomically" that means when -// we replay the MANIFEST file, we'll either replay all the changes or none of them. (The truth of -// this depends on the filesystem -- some might append garbage data if a system crash happens at -// the wrong time.) -func (mf *manifestFile) addChanges(changesParam []*pb.ManifestChange) error { - if mf.inMemory { - return nil - } - changes := pb.ManifestChangeSet{Changes: changesParam} - buf, err := proto.Marshal(&changes) - if err != nil { - return err - } - - // Maybe we could use O_APPEND instead (on certain file systems) - mf.appendLock.Lock() - if err := applyChangeSet(&mf.manifest, &changes); err != nil { - mf.appendLock.Unlock() - return err - } - // Rewrite manifest if it'd shrink by 1/10 and it's big enough to care - if mf.manifest.Deletions > mf.deletionsRewriteThreshold && - mf.manifest.Deletions > manifestDeletionsRatio*(mf.manifest.Creations-mf.manifest.Deletions) { - if err := mf.rewrite(); err != nil { - mf.appendLock.Unlock() - return err - } - } else { - var lenCrcBuf [8]byte - binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(buf))) - binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(buf, y.CastagnoliCrcTable)) - buf = append(lenCrcBuf[:], buf...) - if _, err := mf.fp.Write(buf); err != nil { - mf.appendLock.Unlock() - return err - } - } - - mf.appendLock.Unlock() - return mf.fp.Sync() -} - -// Has to be 4 bytes. The value can never change, ever, anyway. -var magicText = [4]byte{'B', 'd', 'g', 'r'} - -// The magic version number. -const magicVersion = 7 - -func helpRewrite(dir string, m *Manifest) (*os.File, int, error) { - rewritePath := filepath.Join(dir, manifestRewriteFilename) - // We explicitly sync. - fp, err := y.OpenTruncFile(rewritePath, false) - if err != nil { - return nil, 0, err - } - - buf := make([]byte, 8) - copy(buf[0:4], magicText[:]) - binary.BigEndian.PutUint32(buf[4:8], magicVersion) - - netCreations := len(m.Tables) - changes := m.asChanges() - set := pb.ManifestChangeSet{Changes: changes} - - changeBuf, err := proto.Marshal(&set) - if err != nil { - fp.Close() - return nil, 0, err - } - var lenCrcBuf [8]byte - binary.BigEndian.PutUint32(lenCrcBuf[0:4], uint32(len(changeBuf))) - binary.BigEndian.PutUint32(lenCrcBuf[4:8], crc32.Checksum(changeBuf, y.CastagnoliCrcTable)) - buf = append(buf, lenCrcBuf[:]...) - buf = append(buf, changeBuf...) - if _, err := fp.Write(buf); err != nil { - fp.Close() - return nil, 0, err - } - if err := fp.Sync(); err != nil { - fp.Close() - return nil, 0, err - } - - // In Windows the files should be closed before doing a Rename. - if err = fp.Close(); err != nil { - return nil, 0, err - } - manifestPath := filepath.Join(dir, ManifestFilename) - if err := os.Rename(rewritePath, manifestPath); err != nil { - return nil, 0, err - } - fp, err = y.OpenExistingFile(manifestPath, 0) - if err != nil { - return nil, 0, err - } - if _, err := fp.Seek(0, io.SeekEnd); err != nil { - fp.Close() - return nil, 0, err - } - if err := syncDir(dir); err != nil { - fp.Close() - return nil, 0, err - } - - return fp, netCreations, nil -} - -// Must be called while appendLock is held. -func (mf *manifestFile) rewrite() error { - // In Windows the files should be closed before doing a Rename. - if err := mf.fp.Close(); err != nil { - return err - } - fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest) - if err != nil { - return err - } - mf.fp = fp - mf.manifest.Creations = netCreations - mf.manifest.Deletions = 0 - - return nil -} - -type countingReader struct { - wrapped *bufio.Reader - count int64 -} - -func (r *countingReader) Read(p []byte) (n int, err error) { - n, err = r.wrapped.Read(p) - r.count += int64(n) - return -} - -func (r *countingReader) ReadByte() (b byte, err error) { - b, err = r.wrapped.ReadByte() - if err == nil { - r.count++ - } - return -} - -var ( - errBadMagic = errors.New("manifest has bad magic") - errBadChecksum = errors.New("manifest has checksum mismatch") -) - -// ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one -// immutable copy and one mutable copy of the manifest. Easiest way is to construct two of them.) -// Also, returns the last offset after a completely read manifest entry -- the file must be -// truncated at that point before further appends are made (if there is a partial entry after -// that). In normal conditions, truncOffset is the file size. -func ReplayManifestFile(fp *os.File) (Manifest, int64, error) { - r := countingReader{wrapped: bufio.NewReader(fp)} - - var magicBuf [8]byte - if _, err := io.ReadFull(&r, magicBuf[:]); err != nil { - return Manifest{}, 0, errBadMagic - } - if !bytes.Equal(magicBuf[0:4], magicText[:]) { - return Manifest{}, 0, errBadMagic - } - version := y.BytesToU32(magicBuf[4:8]) - if version != magicVersion { - return Manifest{}, 0, - //nolint:lll - fmt.Errorf("manifest has unsupported version: %d (we support %d).\n"+ - "Please see https://github.com/dgraph-io/badger/blob/master/README.md#i-see-manifest-has-unsupported-version-x-we-support-y-error"+ - " on how to fix this.", - version, magicVersion) - } - - stat, err := fp.Stat() - if err != nil { - return Manifest{}, 0, err - } - - build := createManifest() - var offset int64 - for { - offset = r.count - var lenCrcBuf [8]byte - _, err := io.ReadFull(&r, lenCrcBuf[:]) - if err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } - return Manifest{}, 0, err - } - length := y.BytesToU32(lenCrcBuf[0:4]) - // Sanity check to ensure we don't over-allocate memory. - if length > uint32(stat.Size()) { - return Manifest{}, 0, errors.Errorf( - "Buffer length: %d greater than file size: %d. Manifest file might be corrupted", - length, stat.Size()) - } - var buf = make([]byte, length) - if _, err := io.ReadFull(&r, buf); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - break - } - return Manifest{}, 0, err - } - if crc32.Checksum(buf, y.CastagnoliCrcTable) != y.BytesToU32(lenCrcBuf[4:8]) { - return Manifest{}, 0, errBadChecksum - } - - var changeSet pb.ManifestChangeSet - if err := proto.Unmarshal(buf, &changeSet); err != nil { - return Manifest{}, 0, err - } - - if err := applyChangeSet(&build, &changeSet); err != nil { - return Manifest{}, 0, err - } - } - - return build, offset, nil -} - -func applyManifestChange(build *Manifest, tc *pb.ManifestChange) error { - switch tc.Op { - case pb.ManifestChange_CREATE: - if _, ok := build.Tables[tc.Id]; ok { - return fmt.Errorf("MANIFEST invalid, table %d exists", tc.Id) - } - build.Tables[tc.Id] = TableManifest{ - Level: uint8(tc.Level), - KeyID: tc.KeyId, - Compression: options.CompressionType(tc.Compression), - } - for len(build.Levels) <= int(tc.Level) { - build.Levels = append(build.Levels, levelManifest{make(map[uint64]struct{})}) - } - build.Levels[tc.Level].Tables[tc.Id] = struct{}{} - build.Creations++ - case pb.ManifestChange_DELETE: - tm, ok := build.Tables[tc.Id] - if !ok { - return fmt.Errorf("MANIFEST removes non-existing table %d", tc.Id) - } - delete(build.Levels[tm.Level].Tables, tc.Id) - delete(build.Tables, tc.Id) - build.Deletions++ - default: - return fmt.Errorf("MANIFEST file has invalid manifestChange op") - } - return nil -} - -// This is not a "recoverable" error -- opening the KV store fails because the MANIFEST file is -// just plain broken. -func applyChangeSet(build *Manifest, changeSet *pb.ManifestChangeSet) error { - for _, change := range changeSet.Changes { - if err := applyManifestChange(build, change); err != nil { - return err - } - } - return nil -} - -func newCreateChange( - id uint64, level int, keyID uint64, c options.CompressionType) *pb.ManifestChange { - return &pb.ManifestChange{ - Id: id, - Op: pb.ManifestChange_CREATE, - Level: uint32(level), - KeyId: keyID, - // Hard coding it, since we're supporting only AES for now. - EncryptionAlgo: pb.EncryptionAlgo_aes, - Compression: uint32(c), - } -} - -func newDeleteChange(id uint64) *pb.ManifestChange { - return &pb.ManifestChange{ - Id: id, - Op: pb.ManifestChange_DELETE, - } -} diff --git a/vendor/github.com/dgraph-io/badger/v2/merge.go b/vendor/github.com/dgraph-io/badger/v2/merge.go deleted file mode 100644 index 569b297d..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/merge.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "sync" - "time" - - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" -) - -// MergeOperator represents a Badger merge operator. -type MergeOperator struct { - sync.RWMutex - f MergeFunc - db *DB - key []byte - closer *y.Closer -} - -// MergeFunc accepts two byte slices, one representing an existing value, and -// another representing a new value that needs to be ‘merged’ into it. MergeFunc -// contains the logic to perform the ‘merge’ and return an updated value. -// MergeFunc could perform operations like integer addition, list appends etc. -// Note that the ordering of the operands is maintained. -type MergeFunc func(existingVal, newVal []byte) []byte - -// GetMergeOperator creates a new MergeOperator for a given key and returns a -// pointer to it. It also fires off a goroutine that performs a compaction using -// the merge function that runs periodically, as specified by dur. -func (db *DB) GetMergeOperator(key []byte, - f MergeFunc, dur time.Duration) *MergeOperator { - op := &MergeOperator{ - f: f, - db: db, - key: key, - closer: y.NewCloser(1), - } - - go op.runCompactions(dur) - return op -} - -var errNoMerge = errors.New("No need for merge") - -func (op *MergeOperator) iterateAndMerge() (newVal []byte, latest uint64, err error) { - txn := op.db.NewTransaction(false) - defer txn.Discard() - opt := DefaultIteratorOptions - opt.AllVersions = true - it := txn.NewKeyIterator(op.key, opt) - defer it.Close() - - var numVersions int - for it.Rewind(); it.Valid(); it.Next() { - item := it.Item() - numVersions++ - if numVersions == 1 { - // This should be the newVal, considering this is the latest version. - newVal, err = item.ValueCopy(newVal) - if err != nil { - return nil, 0, err - } - latest = item.Version() - } else { - if err := item.Value(func(oldVal []byte) error { - // The merge should always be on the newVal considering it has the merge result of - // the latest version. The value read should be the oldVal. - newVal = op.f(oldVal, newVal) - return nil - }); err != nil { - return nil, 0, err - } - } - if item.DiscardEarlierVersions() { - break - } - } - if numVersions == 0 { - return nil, latest, ErrKeyNotFound - } else if numVersions == 1 { - return newVal, latest, errNoMerge - } - return newVal, latest, nil -} - -func (op *MergeOperator) compact() error { - op.Lock() - defer op.Unlock() - val, version, err := op.iterateAndMerge() - if err == ErrKeyNotFound || err == errNoMerge { - return nil - } else if err != nil { - return err - } - entries := []*Entry{ - { - Key: y.KeyWithTs(op.key, version), - Value: val, - meta: bitDiscardEarlierVersions, - }, - } - // Write value back to the DB. It is important that we do not set the bitMergeEntry bit - // here. When compaction happens, all the older merged entries will be removed. - return op.db.batchSetAsync(entries, func(err error) { - if err != nil { - op.db.opt.Errorf("failed to insert the result of merge compaction: %s", err) - } - }) -} - -func (op *MergeOperator) runCompactions(dur time.Duration) { - ticker := time.NewTicker(dur) - defer op.closer.Done() - var stop bool - for { - select { - case <-op.closer.HasBeenClosed(): - stop = true - case <-ticker.C: // wait for tick - } - if err := op.compact(); err != nil { - op.db.opt.Errorf("failure while running merge operation: %s", err) - } - if stop { - ticker.Stop() - break - } - } -} - -// Add records a value in Badger which will eventually be merged by a background -// routine into the values that were recorded by previous invocations to Add(). -func (op *MergeOperator) Add(val []byte) error { - return op.db.Update(func(txn *Txn) error { - return txn.SetEntry(NewEntry(op.key, val).withMergeBit()) - }) -} - -// Get returns the latest value for the merge operator, which is derived by -// applying the merge function to all the values added so far. -// -// If Add has not been called even once, Get will return ErrKeyNotFound. -func (op *MergeOperator) Get() ([]byte, error) { - op.RLock() - defer op.RUnlock() - var existing []byte - err := op.db.View(func(txn *Txn) (err error) { - existing, _, err = op.iterateAndMerge() - return err - }) - if err == errNoMerge { - return existing, nil - } - return existing, err -} - -// Stop waits for any pending merge to complete and then stops the background -// goroutine. -func (op *MergeOperator) Stop() { - op.closer.SignalAndWait() -} diff --git a/vendor/github.com/dgraph-io/badger/v2/options.go b/vendor/github.com/dgraph-io/badger/v2/options.go deleted file mode 100644 index 700c8747..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/options.go +++ /dev/null @@ -1,657 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "time" - - "github.com/dgraph-io/badger/v2/options" - "github.com/dgraph-io/badger/v2/table" -) - -// Note: If you add a new option X make sure you also add a WithX method on Options. - -// Options are params for creating DB object. -// -// This package provides DefaultOptions which contains options that should -// work for most applications. Consider using that as a starting point before -// customizing it for your own needs. -// -// Each option X is documented on the WithX method. -type Options struct { - // Required options. - - Dir string - ValueDir string - - // Usually modified options. - - SyncWrites bool - TableLoadingMode options.FileLoadingMode - ValueLogLoadingMode options.FileLoadingMode - NumVersionsToKeep int - ReadOnly bool - Truncate bool - Logger Logger - Compression options.CompressionType - InMemory bool - - // Fine tuning options. - - MaxTableSize int64 - LevelSizeMultiplier int - MaxLevels int - ValueThreshold int - NumMemtables int - // Changing BlockSize across DB runs will not break badger. The block size is - // read from the block index stored at the end of the table. - BlockSize int - BloomFalsePositive float64 - KeepL0InMemory bool - BlockCacheSize int64 - IndexCacheSize int64 - LoadBloomsOnOpen bool - - NumLevelZeroTables int - NumLevelZeroTablesStall int - - LevelOneSize int64 - ValueLogFileSize int64 - ValueLogMaxEntries uint32 - - NumCompactors int - CompactL0OnClose bool - LogRotatesToFlush int32 - ZSTDCompressionLevel int - - // When set, checksum will be validated for each entry read from the value log file. - VerifyValueChecksum bool - - // Encryption related options. - EncryptionKey []byte // encryption key - EncryptionKeyRotationDuration time.Duration // key rotation duration - - // BypassLockGaurd will bypass the lock guard on badger. Bypassing lock - // guard can cause data corruption if multiple badger instances are using - // the same directory. Use this options with caution. - BypassLockGuard bool - - // ChecksumVerificationMode decides when db should verify checksums for SSTable blocks. - ChecksumVerificationMode options.ChecksumVerificationMode - - // DetectConflicts determines whether the transactions would be checked for - // conflicts. The transactions can be processed at a higher rate when - // conflict detection is disabled. - DetectConflicts bool - - // Transaction start and commit timestamps are managed by end-user. - // This is only useful for databases built on top of Badger (like Dgraph). - // Not recommended for most users. - managedTxns bool - - // 4. Flags for testing purposes - // ------------------------------ - maxBatchCount int64 // max entries in batch - maxBatchSize int64 // max batch size in bytes -} - -// DefaultOptions sets a list of recommended options for good performance. -// Feel free to modify these to suit your needs with the WithX methods. -func DefaultOptions(path string) Options { - return Options{ - Dir: path, - ValueDir: path, - LevelOneSize: 256 << 20, - LevelSizeMultiplier: 10, - TableLoadingMode: options.MemoryMap, - ValueLogLoadingMode: options.MemoryMap, - // table.MemoryMap to mmap() the tables. - // table.Nothing to not preload the tables. - MaxLevels: 7, - MaxTableSize: 64 << 20, - NumCompactors: 2, // Run at least 2 compactors. One is dedicated for L0. - NumLevelZeroTables: 5, - NumLevelZeroTablesStall: 15, - NumMemtables: 5, - BloomFalsePositive: 0.01, - BlockSize: 4 * 1024, - SyncWrites: true, - NumVersionsToKeep: 1, - CompactL0OnClose: true, - KeepL0InMemory: false, - VerifyValueChecksum: false, - Compression: options.None, - BlockCacheSize: 0, - IndexCacheSize: 0, - LoadBloomsOnOpen: true, - - // The following benchmarks were done on a 4 KB block size (default block size). The - // compression is ratio supposed to increase with increasing compression level but since the - // input for compression algorithm is small (4 KB), we don't get significant benefit at - // level 3. - // NOTE: The benchmarks are with DataDog ZSTD that requires CGO. Hence, no longer valid. - // no_compression-16 10 502848865 ns/op 165.46 MB/s - - // zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93 - // zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72 - // zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38 - // Benchmark code can be found in table/builder_test.go file - ZSTDCompressionLevel: 1, - - // Nothing to read/write value log using standard File I/O - // MemoryMap to mmap() the value log files - // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32. - // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems. - ValueLogFileSize: 1<<30 - 1, - - ValueLogMaxEntries: 1000000, - ValueThreshold: 1 << 10, // 1 KB. - Truncate: false, - Logger: defaultLogger(INFO), - LogRotatesToFlush: 2, - EncryptionKey: []byte{}, - EncryptionKeyRotationDuration: 10 * 24 * time.Hour, // Default 10 days. - DetectConflicts: true, - } -} - -func buildTableOptions(opt Options) table.Options { - return table.Options{ - BlockSize: opt.BlockSize, - BloomFalsePositive: opt.BloomFalsePositive, - LoadBloomsOnOpen: opt.LoadBloomsOnOpen, - LoadingMode: opt.TableLoadingMode, - ChkMode: opt.ChecksumVerificationMode, - Compression: opt.Compression, - ZSTDCompressionLevel: opt.ZSTDCompressionLevel, - } -} - -const ( - maxValueThreshold = (1 << 20) // 1 MB -) - -// LSMOnlyOptions follows from DefaultOptions, but sets a higher ValueThreshold -// so values would be collocated with the LSM tree, with value log largely acting -// as a write-ahead log only. These options would reduce the disk usage of value -// log, and make Badger act more like a typical LSM tree. -func LSMOnlyOptions(path string) Options { - // Let's not set any other options, because they can cause issues with the - // size of key-value a user can pass to Badger. For e.g., if we set - // ValueLogFileSize to 64MB, a user can't pass a value more than that. - // Setting it to ValueLogMaxEntries to 1000, can generate too many files. - // These options are better configured on a usage basis, than broadly here. - // The ValueThreshold is the most important setting a user needs to do to - // achieve a heavier usage of LSM tree. - // NOTE: If a user does not want to set 64KB as the ValueThreshold because - // of performance reasons, 1KB would be a good option too, allowing - // values smaller than 1KB to be collocated with the keys in the LSM tree. - return DefaultOptions(path).WithValueThreshold(maxValueThreshold /* 1 MB */) -} - -// WithDir returns a new Options value with Dir set to the given value. -// -// Dir is the path of the directory where key data will be stored in. -// If it doesn't exist, Badger will try to create it for you. -// This is set automatically to be the path given to `DefaultOptions`. -func (opt Options) WithDir(val string) Options { - opt.Dir = val - return opt -} - -// WithValueDir returns a new Options value with ValueDir set to the given value. -// -// ValueDir is the path of the directory where value data will be stored in. -// If it doesn't exist, Badger will try to create it for you. -// This is set automatically to be the path given to `DefaultOptions`. -func (opt Options) WithValueDir(val string) Options { - opt.ValueDir = val - return opt -} - -// WithLoggingLevel returns a new Options value with logging level of the -// default logger set to the given value. -// LoggingLevel sets the level of logging. It should be one of DEBUG, INFO, -// WARNING or ERROR levels. -// -// The default value of LoggingLevel is INFO. -func (opt Options) WithLoggingLevel(val loggingLevel) Options { - opt.Logger = defaultLogger(val) - return opt -} - -// WithSyncWrites returns a new Options value with SyncWrites set to the given value. -// -// When SyncWrites is true all writes are synced to disk. Setting this to false would achieve better -// performance, but may cause data loss in case of crash. -// -// The default value of SyncWrites is true. -func (opt Options) WithSyncWrites(val bool) Options { - opt.SyncWrites = val - return opt -} - -// WithTableLoadingMode returns a new Options value with TableLoadingMode set to the given value. -// -// TableLoadingMode indicates which file loading mode should be used for the LSM tree data files. -// -// The default value of TableLoadingMode is options.MemoryMap. -func (opt Options) WithTableLoadingMode(val options.FileLoadingMode) Options { - opt.TableLoadingMode = val - return opt -} - -// WithValueLogLoadingMode returns a new Options value with ValueLogLoadingMode set to the given -// value. -// -// ValueLogLoadingMode indicates which file loading mode should be used for the value log data -// files. -// -// The default value of ValueLogLoadingMode is options.MemoryMap. -func (opt Options) WithValueLogLoadingMode(val options.FileLoadingMode) Options { - opt.ValueLogLoadingMode = val - return opt -} - -// WithNumVersionsToKeep returns a new Options value with NumVersionsToKeep set to the given value. -// -// NumVersionsToKeep sets how many versions to keep per key at most. -// -// The default value of NumVersionsToKeep is 1. -func (opt Options) WithNumVersionsToKeep(val int) Options { - opt.NumVersionsToKeep = val - return opt -} - -// WithReadOnly returns a new Options value with ReadOnly set to the given value. -// -// When ReadOnly is true the DB will be opened on read-only mode. -// Multiple processes can open the same Badger DB. -// Note: if the DB being opened had crashed before and has vlog data to be replayed, -// ReadOnly will cause Open to fail with an appropriate message. -// -// The default value of ReadOnly is false. -func (opt Options) WithReadOnly(val bool) Options { - opt.ReadOnly = val - return opt -} - -// WithTruncate returns a new Options value with Truncate set to the given value. -// -// Truncate indicates whether value log files should be truncated to delete corrupt data, if any. -// This option is ignored when ReadOnly is true. -// -// The default value of Truncate is false. -func (opt Options) WithTruncate(val bool) Options { - opt.Truncate = val - return opt -} - -// WithLogger returns a new Options value with Logger set to the given value. -// -// Logger provides a way to configure what logger each value of badger.DB uses. -// -// The default value of Logger writes to stderr using the log package from the Go standard library. -func (opt Options) WithLogger(val Logger) Options { - opt.Logger = val - return opt -} - -// WithMaxTableSize returns a new Options value with MaxTableSize set to the given value. -// -// MaxTableSize sets the maximum size in bytes for each LSM table or file. -// -// The default value of MaxTableSize is 64MB. -func (opt Options) WithMaxTableSize(val int64) Options { - opt.MaxTableSize = val - return opt -} - -// WithLevelSizeMultiplier returns a new Options value with LevelSizeMultiplier set to the given -// value. -// -// LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM. -// Once a level grows to be larger than this ratio allowed, the compaction process will be -// triggered. -// -// The default value of LevelSizeMultiplier is 10. -func (opt Options) WithLevelSizeMultiplier(val int) Options { - opt.LevelSizeMultiplier = val - return opt -} - -// WithMaxLevels returns a new Options value with MaxLevels set to the given value. -// -// Maximum number of levels of compaction allowed in the LSM. -// -// The default value of MaxLevels is 7. -func (opt Options) WithMaxLevels(val int) Options { - opt.MaxLevels = val - return opt -} - -// WithValueThreshold returns a new Options value with ValueThreshold set to the given value. -// -// ValueThreshold sets the threshold used to decide whether a value is stored directly in the LSM -// tree or separately in the log value files. -// -// The default value of ValueThreshold is 1 KB, but LSMOnlyOptions sets it to maxValueThreshold. -func (opt Options) WithValueThreshold(val int) Options { - opt.ValueThreshold = val - return opt -} - -// WithNumMemtables returns a new Options value with NumMemtables set to the given value. -// -// NumMemtables sets the maximum number of tables to keep in memory before stalling. -// -// The default value of NumMemtables is 5. -func (opt Options) WithNumMemtables(val int) Options { - opt.NumMemtables = val - return opt -} - -// WithBloomFalsePositive returns a new Options value with BloomFalsePositive set -// to the given value. -// -// BloomFalsePositive sets the false positive probability of the bloom filter in any SSTable. -// Before reading a key from table, the bloom filter is checked for key existence. -// BloomFalsePositive might impact read performance of DB. Lower BloomFalsePositive value might -// consume more memory. -// -// The default value of BloomFalsePositive is 0.01. -func (opt Options) WithBloomFalsePositive(val float64) Options { - opt.BloomFalsePositive = val - return opt -} - -// WithBlockSize returns a new Options value with BlockSize set to the given value. -// -// BlockSize sets the size of any block in SSTable. SSTable is divided into multiple blocks -// internally. Each block is compressed using prefix diff encoding. -// -// The default value of BlockSize is 4KB. -func (opt Options) WithBlockSize(val int) Options { - opt.BlockSize = val - return opt -} - -// WithNumLevelZeroTables returns a new Options value with NumLevelZeroTables set to the given -// value. -// -// NumLevelZeroTables sets the maximum number of Level 0 tables before compaction starts. -// -// The default value of NumLevelZeroTables is 5. -func (opt Options) WithNumLevelZeroTables(val int) Options { - opt.NumLevelZeroTables = val - return opt -} - -// WithNumLevelZeroTablesStall returns a new Options value with NumLevelZeroTablesStall set to the -// given value. -// -// NumLevelZeroTablesStall sets the number of Level 0 tables that once reached causes the DB to -// stall until compaction succeeds. -// -// The default value of NumLevelZeroTablesStall is 10. -func (opt Options) WithNumLevelZeroTablesStall(val int) Options { - opt.NumLevelZeroTablesStall = val - return opt -} - -// WithLevelOneSize returns a new Options value with LevelOneSize set to the given value. -// -// LevelOneSize sets the maximum total size for Level 1. -// -// The default value of LevelOneSize is 20MB. -func (opt Options) WithLevelOneSize(val int64) Options { - opt.LevelOneSize = val - return opt -} - -// WithValueLogFileSize returns a new Options value with ValueLogFileSize set to the given value. -// -// ValueLogFileSize sets the maximum size of a single value log file. -// -// The default value of ValueLogFileSize is 1GB. -func (opt Options) WithValueLogFileSize(val int64) Options { - opt.ValueLogFileSize = val - return opt -} - -// WithValueLogMaxEntries returns a new Options value with ValueLogMaxEntries set to the given -// value. -// -// ValueLogMaxEntries sets the maximum number of entries a value log file can hold approximately. -// A actual size limit of a value log file is the minimum of ValueLogFileSize and -// ValueLogMaxEntries. -// -// The default value of ValueLogMaxEntries is one million (1000000). -func (opt Options) WithValueLogMaxEntries(val uint32) Options { - opt.ValueLogMaxEntries = val - return opt -} - -// WithNumCompactors returns a new Options value with NumCompactors set to the given value. -// -// NumCompactors sets the number of compaction workers to run concurrently. -// Setting this to zero stops compactions, which could eventually cause writes to block forever. -// -// The default value of NumCompactors is 2. One is dedicated just for L0. -func (opt Options) WithNumCompactors(val int) Options { - opt.NumCompactors = val - return opt -} - -// WithCompactL0OnClose returns a new Options value with CompactL0OnClose set to the given value. -// -// CompactL0OnClose determines whether Level 0 should be compacted before closing the DB. -// This ensures that both reads and writes are efficient when the DB is opened later. -// CompactL0OnClose is set to true if KeepL0InMemory is set to true. -// -// The default value of CompactL0OnClose is true. -func (opt Options) WithCompactL0OnClose(val bool) Options { - opt.CompactL0OnClose = val - return opt -} - -// WithLogRotatesToFlush returns a new Options value with LogRotatesToFlush set to the given value. -// -// LogRotatesToFlush sets the number of value log file rotates after which the Memtables are -// flushed to disk. This is useful in write loads with fewer keys and larger values. This work load -// would fill up the value logs quickly, while not filling up the Memtables. Thus, on a crash -// and restart, the value log head could cause the replay of a good number of value log files -// which can slow things on start. -// -// The default value of LogRotatesToFlush is 2. -func (opt Options) WithLogRotatesToFlush(val int32) Options { - opt.LogRotatesToFlush = val - return opt -} - -// WithEncryptionKey return a new Options value with EncryptionKey set to the given value. -// -// EncryptionKey is used to encrypt the data with AES. Type of AES is used based on the key -// size. For example 16 bytes will use AES-128. 24 bytes will use AES-192. 32 bytes will -// use AES-256. -func (opt Options) WithEncryptionKey(key []byte) Options { - opt.EncryptionKey = key - return opt -} - -// WithEncryptionKeyRotationDuration returns new Options value with the duration set to -// the given value. -// -// Key Registry will use this duration to create new keys. If the previous generated -// key exceed the given duration. Then the key registry will create new key. -func (opt Options) WithEncryptionKeyRotationDuration(d time.Duration) Options { - opt.EncryptionKeyRotationDuration = d - return opt -} - -// WithKeepL0InMemory returns a new Options value with KeepL0InMemory set to the given value. -// -// When KeepL0InMemory is set to true we will keep all Level 0 tables in memory. This leads to -// better performance in writes as well as compactions. In case of DB crash, the value log replay -// will take longer to complete since memtables and all level 0 tables will have to be recreated. -// This option also sets CompactL0OnClose option to true. -// -// The default value of KeepL0InMemory is false. -func (opt Options) WithKeepL0InMemory(val bool) Options { - opt.KeepL0InMemory = val - return opt -} - -// WithCompression returns a new Options value with Compression set to the given value. -// -// When compression is enabled, every block will be compressed using the specified algorithm. -// This option doesn't affect existing tables. Only the newly created tables will be compressed. -// -// The default compression algorithm used is zstd when built with Cgo. Without Cgo, the default is -// snappy. Compression is enabled by default. -func (opt Options) WithCompression(cType options.CompressionType) Options { - opt.Compression = cType - return opt -} - -// WithVerifyValueChecksum returns a new Options value with VerifyValueChecksum set to -// the given value. -// -// When VerifyValueChecksum is set to true, checksum will be verified for every entry read -// from the value log. If the value is stored in SST (value size less than value threshold) then the -// checksum validation will not be done. -// -// The default value of VerifyValueChecksum is False. -func (opt Options) WithVerifyValueChecksum(val bool) Options { - opt.VerifyValueChecksum = val - return opt -} - -// WithChecksumVerificationMode returns a new Options value with ChecksumVerificationMode set to -// the given value. -// -// ChecksumVerificationMode indicates when the db should verify checksums for SSTable blocks. -// -// The default value of VerifyValueChecksum is options.NoVerification. -func (opt Options) WithChecksumVerificationMode(cvMode options.ChecksumVerificationMode) Options { - opt.ChecksumVerificationMode = cvMode - return opt -} - -// WithBlockCacheSize returns a new Options value with BlockCacheSize set to the given value. -// -// This value specifies how much data cache should hold in memory. A small size -// of cache means lower memory consumption and lookups/iterations would take -// longer. It is recommended to use a cache if you're using compression or encryption. -// If compression and encryption both are disabled, adding a cache will lead to -// unnecessary overhead which will affect the read performance. Setting size to -// zero disables the cache altogether. -// -// Default value of BlockCacheSize is zero. -func (opt Options) WithBlockCacheSize(size int64) Options { - opt.BlockCacheSize = size - return opt -} - -// WithInMemory returns a new Options value with Inmemory mode set to the given value. -// -// When badger is running in InMemory mode, everything is stored in memory. No value/sst files are -// created. In case of a crash all data will be lost. -func (opt Options) WithInMemory(b bool) Options { - opt.InMemory = b - return opt -} - -// WithZSTDCompressionLevel returns a new Options value with ZSTDCompressionLevel set -// to the given value. -// -// The ZSTD compression algorithm supports 20 compression levels. The higher the compression -// level, the better is the compression ratio but lower is the performance. Lower levels -// have better performance and higher levels have better compression ratios. -// We recommend using level 1 ZSTD Compression Level. Any level higher than 1 seems to -// deteriorate badger's performance. -// The following benchmarks were done on a 4 KB block size (default block size). The compression is -// ratio supposed to increase with increasing compression level but since the input for compression -// algorithm is small (4 KB), we don't get significant benefit at level 3. It is advised to write -// your own benchmarks before choosing a compression algorithm or level. -// -// NOTE: The benchmarks are with DataDog ZSTD that requires CGO. Hence, no longer valid. -// no_compression-16 10 502848865 ns/op 165.46 MB/s - -// zstd_compression/level_1-16 7 739037966 ns/op 112.58 MB/s 2.93 -// zstd_compression/level_3-16 7 756950250 ns/op 109.91 MB/s 2.72 -// zstd_compression/level_15-16 1 11135686219 ns/op 7.47 MB/s 4.38 -// Benchmark code can be found in table/builder_test.go file -func (opt Options) WithZSTDCompressionLevel(cLevel int) Options { - opt.ZSTDCompressionLevel = cLevel - return opt -} - -// WithBypassLockGuard returns a new Options value with BypassLockGuard -// set to the given value. -// -// When BypassLockGuard option is set, badger will not acquire a lock on the -// directory. This could lead to data corruption if multiple badger instances -// write to the same data directory. Use this option with caution. -// -// The default value of BypassLockGuard is false. -func (opt Options) WithBypassLockGuard(b bool) Options { - opt.BypassLockGuard = b - return opt -} - -// WithLoadBloomsOnOpen returns a new Options value with LoadBloomsOnOpen set to the given value. -// -// Badger uses bloom filters to speed up key lookups. When LoadBloomsOnOpen is set -// to false, bloom filters will be loaded lazily and not on DB open. Set this -// option to false to reduce the time taken to open the DB. -// -// The default value of LoadBloomsOnOpen is true. -func (opt Options) WithLoadBloomsOnOpen(b bool) Options { - opt.LoadBloomsOnOpen = b - return opt -} - -// WithIndexCacheSize returns a new Options value with IndexCacheSize set to -// the given value. -// -// This value specifies how much memory should be used by table indices. These -// indices include the block offsets and the bloomfilters. Badger uses bloom -// filters to speed up lookups. Each table has its own bloom -// filter and each bloom filter is approximately of 5 MB. -// -// Zero value for IndexCacheSize means all the indices will be kept in -// memory and the cache is disabled. -// -// The default value of IndexCacheSize is 0 which means all indices are kept in -// memory. -func (opt Options) WithIndexCacheSize(size int64) Options { - opt.IndexCacheSize = size - return opt -} - -// WithDetectConflicts returns a new Options value with DetectConflicts set to the given value. -// -// Detect conflicts options determines if the transactions would be checked for -// conflicts before committing them. When this option is set to false -// (detectConflicts=false) badger can process transactions at a higher rate. -// Setting this options to false might be useful when the user application -// deals with conflict detection and resolution. -// -// The default value of Detect conflicts is True. -func (opt Options) WithDetectConflicts(b bool) Options { - opt.DetectConflicts = b - return opt -} diff --git a/vendor/github.com/dgraph-io/badger/v2/options/options.go b/vendor/github.com/dgraph-io/badger/v2/options/options.go deleted file mode 100644 index 564f780f..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/options/options.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package options - -// FileLoadingMode specifies how data in LSM table files and value log files should -// be loaded. -type FileLoadingMode int - -const ( - // FileIO indicates that files must be loaded using standard I/O - FileIO FileLoadingMode = iota - // LoadToRAM indicates that file must be loaded into RAM - LoadToRAM - // MemoryMap indicates that that the file must be memory-mapped - MemoryMap -) - -// ChecksumVerificationMode tells when should DB verify checksum for SSTable blocks. -type ChecksumVerificationMode int - -const ( - // NoVerification indicates DB should not verify checksum for SSTable blocks. - NoVerification ChecksumVerificationMode = iota - // OnTableRead indicates checksum should be verified while opening SSTtable. - OnTableRead - // OnBlockRead indicates checksum should be verified on every SSTable block read. - OnBlockRead - // OnTableAndBlockRead indicates checksum should be verified - // on SSTable opening and on every block read. - OnTableAndBlockRead -) - -// CompressionType specifies how a block should be compressed. -type CompressionType uint32 - -const ( - // None mode indicates that a block is not compressed. - None CompressionType = 0 - // Snappy mode indicates that a block is compressed using Snappy algorithm. - Snappy CompressionType = 1 - // ZSTD mode indicates that a block is compressed using ZSTD algorithm. - ZSTD CompressionType = 2 -) diff --git a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.pb.go b/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.pb.go deleted file mode 100644 index edacec75..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.pb.go +++ /dev/null @@ -1,2531 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: badgerpb2.proto - -package pb - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type EncryptionAlgo int32 - -const ( - EncryptionAlgo_aes EncryptionAlgo = 0 -) - -var EncryptionAlgo_name = map[int32]string{ - 0: "aes", -} - -var EncryptionAlgo_value = map[string]int32{ - "aes": 0, -} - -func (x EncryptionAlgo) String() string { - return proto.EnumName(EncryptionAlgo_name, int32(x)) -} - -func (EncryptionAlgo) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{0} -} - -type ManifestChange_Operation int32 - -const ( - ManifestChange_CREATE ManifestChange_Operation = 0 - ManifestChange_DELETE ManifestChange_Operation = 1 -) - -var ManifestChange_Operation_name = map[int32]string{ - 0: "CREATE", - 1: "DELETE", -} - -var ManifestChange_Operation_value = map[string]int32{ - "CREATE": 0, - "DELETE": 1, -} - -func (x ManifestChange_Operation) String() string { - return proto.EnumName(ManifestChange_Operation_name, int32(x)) -} - -func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{3, 0} -} - -type Checksum_Algorithm int32 - -const ( - Checksum_CRC32C Checksum_Algorithm = 0 - Checksum_XXHash64 Checksum_Algorithm = 1 -) - -var Checksum_Algorithm_name = map[int32]string{ - 0: "CRC32C", - 1: "XXHash64", -} - -var Checksum_Algorithm_value = map[string]int32{ - "CRC32C": 0, - "XXHash64": 1, -} - -func (x Checksum_Algorithm) String() string { - return proto.EnumName(Checksum_Algorithm_name, int32(x)) -} - -func (Checksum_Algorithm) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{6, 0} -} - -type KV struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"` - Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` - Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"` - // Stream id is used to identify which stream the KV came from. - StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` - // Stream done is used to indicate end of stream. - StreamDone bool `protobuf:"varint,11,opt,name=stream_done,json=streamDone,proto3" json:"stream_done,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KV) Reset() { *m = KV{} } -func (m *KV) String() string { return proto.CompactTextString(m) } -func (*KV) ProtoMessage() {} -func (*KV) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{0} -} -func (m *KV) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KV.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KV) XXX_Merge(src proto.Message) { - xxx_messageInfo_KV.Merge(m, src) -} -func (m *KV) XXX_Size() int { - return m.Size() -} -func (m *KV) XXX_DiscardUnknown() { - xxx_messageInfo_KV.DiscardUnknown(m) -} - -var xxx_messageInfo_KV proto.InternalMessageInfo - -func (m *KV) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *KV) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *KV) GetUserMeta() []byte { - if m != nil { - return m.UserMeta - } - return nil -} - -func (m *KV) GetVersion() uint64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *KV) GetExpiresAt() uint64 { - if m != nil { - return m.ExpiresAt - } - return 0 -} - -func (m *KV) GetMeta() []byte { - if m != nil { - return m.Meta - } - return nil -} - -func (m *KV) GetStreamId() uint32 { - if m != nil { - return m.StreamId - } - return 0 -} - -func (m *KV) GetStreamDone() bool { - if m != nil { - return m.StreamDone - } - return false -} - -type KVList struct { - Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KVList) Reset() { *m = KVList{} } -func (m *KVList) String() string { return proto.CompactTextString(m) } -func (*KVList) ProtoMessage() {} -func (*KVList) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{1} -} -func (m *KVList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KVList.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KVList) XXX_Merge(src proto.Message) { - xxx_messageInfo_KVList.Merge(m, src) -} -func (m *KVList) XXX_Size() int { - return m.Size() -} -func (m *KVList) XXX_DiscardUnknown() { - xxx_messageInfo_KVList.DiscardUnknown(m) -} - -var xxx_messageInfo_KVList proto.InternalMessageInfo - -func (m *KVList) GetKv() []*KV { - if m != nil { - return m.Kv - } - return nil -} - -type ManifestChangeSet struct { - // A set of changes that are applied atomically. - Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} } -func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) } -func (*ManifestChangeSet) ProtoMessage() {} -func (*ManifestChangeSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{2} -} -func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ManifestChangeSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManifestChangeSet.Merge(m, src) -} -func (m *ManifestChangeSet) XXX_Size() int { - return m.Size() -} -func (m *ManifestChangeSet) XXX_DiscardUnknown() { - xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m) -} - -var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo - -func (m *ManifestChangeSet) GetChanges() []*ManifestChange { - if m != nil { - return m.Changes - } - return nil -} - -type ManifestChange struct { - Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"` - Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=badgerpb2.ManifestChange_Operation" json:"Op,omitempty"` - Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"` - KeyId uint64 `protobuf:"varint,4,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` - EncryptionAlgo EncryptionAlgo `protobuf:"varint,5,opt,name=encryption_algo,json=encryptionAlgo,proto3,enum=badgerpb2.EncryptionAlgo" json:"encryption_algo,omitempty"` - Compression uint32 `protobuf:"varint,6,opt,name=compression,proto3" json:"compression,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ManifestChange) Reset() { *m = ManifestChange{} } -func (m *ManifestChange) String() string { return proto.CompactTextString(m) } -func (*ManifestChange) ProtoMessage() {} -func (*ManifestChange) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{3} -} -func (m *ManifestChange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ManifestChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManifestChange.Merge(m, src) -} -func (m *ManifestChange) XXX_Size() int { - return m.Size() -} -func (m *ManifestChange) XXX_DiscardUnknown() { - xxx_messageInfo_ManifestChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ManifestChange proto.InternalMessageInfo - -func (m *ManifestChange) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *ManifestChange) GetOp() ManifestChange_Operation { - if m != nil { - return m.Op - } - return ManifestChange_CREATE -} - -func (m *ManifestChange) GetLevel() uint32 { - if m != nil { - return m.Level - } - return 0 -} - -func (m *ManifestChange) GetKeyId() uint64 { - if m != nil { - return m.KeyId - } - return 0 -} - -func (m *ManifestChange) GetEncryptionAlgo() EncryptionAlgo { - if m != nil { - return m.EncryptionAlgo - } - return EncryptionAlgo_aes -} - -func (m *ManifestChange) GetCompression() uint32 { - if m != nil { - return m.Compression - } - return 0 -} - -type BlockOffset struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Offset uint32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` - Len uint32 `protobuf:"varint,3,opt,name=len,proto3" json:"len,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlockOffset) Reset() { *m = BlockOffset{} } -func (m *BlockOffset) String() string { return proto.CompactTextString(m) } -func (*BlockOffset) ProtoMessage() {} -func (*BlockOffset) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{4} -} -func (m *BlockOffset) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlockOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlockOffset.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlockOffset) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockOffset.Merge(m, src) -} -func (m *BlockOffset) XXX_Size() int { - return m.Size() -} -func (m *BlockOffset) XXX_DiscardUnknown() { - xxx_messageInfo_BlockOffset.DiscardUnknown(m) -} - -var xxx_messageInfo_BlockOffset proto.InternalMessageInfo - -func (m *BlockOffset) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *BlockOffset) GetOffset() uint32 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *BlockOffset) GetLen() uint32 { - if m != nil { - return m.Len - } - return 0 -} - -type TableIndex struct { - Offsets []*BlockOffset `protobuf:"bytes,1,rep,name=offsets,proto3" json:"offsets,omitempty"` - BloomFilter []byte `protobuf:"bytes,2,opt,name=bloom_filter,json=bloomFilter,proto3" json:"bloom_filter,omitempty"` - EstimatedSize uint64 `protobuf:"varint,3,opt,name=estimated_size,json=estimatedSize,proto3" json:"estimated_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TableIndex) Reset() { *m = TableIndex{} } -func (m *TableIndex) String() string { return proto.CompactTextString(m) } -func (*TableIndex) ProtoMessage() {} -func (*TableIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{5} -} -func (m *TableIndex) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TableIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TableIndex.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TableIndex) XXX_Merge(src proto.Message) { - xxx_messageInfo_TableIndex.Merge(m, src) -} -func (m *TableIndex) XXX_Size() int { - return m.Size() -} -func (m *TableIndex) XXX_DiscardUnknown() { - xxx_messageInfo_TableIndex.DiscardUnknown(m) -} - -var xxx_messageInfo_TableIndex proto.InternalMessageInfo - -func (m *TableIndex) GetOffsets() []*BlockOffset { - if m != nil { - return m.Offsets - } - return nil -} - -func (m *TableIndex) GetBloomFilter() []byte { - if m != nil { - return m.BloomFilter - } - return nil -} - -func (m *TableIndex) GetEstimatedSize() uint64 { - if m != nil { - return m.EstimatedSize - } - return 0 -} - -type Checksum struct { - Algo Checksum_Algorithm `protobuf:"varint,1,opt,name=algo,proto3,enum=badgerpb2.Checksum_Algorithm" json:"algo,omitempty"` - Sum uint64 `protobuf:"varint,2,opt,name=sum,proto3" json:"sum,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Checksum) Reset() { *m = Checksum{} } -func (m *Checksum) String() string { return proto.CompactTextString(m) } -func (*Checksum) ProtoMessage() {} -func (*Checksum) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{6} -} -func (m *Checksum) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Checksum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Checksum.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Checksum) XXX_Merge(src proto.Message) { - xxx_messageInfo_Checksum.Merge(m, src) -} -func (m *Checksum) XXX_Size() int { - return m.Size() -} -func (m *Checksum) XXX_DiscardUnknown() { - xxx_messageInfo_Checksum.DiscardUnknown(m) -} - -var xxx_messageInfo_Checksum proto.InternalMessageInfo - -func (m *Checksum) GetAlgo() Checksum_Algorithm { - if m != nil { - return m.Algo - } - return Checksum_CRC32C -} - -func (m *Checksum) GetSum() uint64 { - if m != nil { - return m.Sum - } - return 0 -} - -type DataKey struct { - KeyId uint64 `protobuf:"varint,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Iv []byte `protobuf:"bytes,3,opt,name=iv,proto3" json:"iv,omitempty"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DataKey) Reset() { *m = DataKey{} } -func (m *DataKey) String() string { return proto.CompactTextString(m) } -func (*DataKey) ProtoMessage() {} -func (*DataKey) Descriptor() ([]byte, []int) { - return fileDescriptor_e63e84f9f0d3998c, []int{7} -} -func (m *DataKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DataKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DataKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DataKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_DataKey.Merge(m, src) -} -func (m *DataKey) XXX_Size() int { - return m.Size() -} -func (m *DataKey) XXX_DiscardUnknown() { - xxx_messageInfo_DataKey.DiscardUnknown(m) -} - -var xxx_messageInfo_DataKey proto.InternalMessageInfo - -func (m *DataKey) GetKeyId() uint64 { - if m != nil { - return m.KeyId - } - return 0 -} - -func (m *DataKey) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *DataKey) GetIv() []byte { - if m != nil { - return m.Iv - } - return nil -} - -func (m *DataKey) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func init() { - proto.RegisterEnum("badgerpb2.EncryptionAlgo", EncryptionAlgo_name, EncryptionAlgo_value) - proto.RegisterEnum("badgerpb2.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value) - proto.RegisterEnum("badgerpb2.Checksum_Algorithm", Checksum_Algorithm_name, Checksum_Algorithm_value) - proto.RegisterType((*KV)(nil), "badgerpb2.KV") - proto.RegisterType((*KVList)(nil), "badgerpb2.KVList") - proto.RegisterType((*ManifestChangeSet)(nil), "badgerpb2.ManifestChangeSet") - proto.RegisterType((*ManifestChange)(nil), "badgerpb2.ManifestChange") - proto.RegisterType((*BlockOffset)(nil), "badgerpb2.BlockOffset") - proto.RegisterType((*TableIndex)(nil), "badgerpb2.TableIndex") - proto.RegisterType((*Checksum)(nil), "badgerpb2.Checksum") - proto.RegisterType((*DataKey)(nil), "badgerpb2.DataKey") -} - -func init() { proto.RegisterFile("badgerpb2.proto", fileDescriptor_e63e84f9f0d3998c) } - -var fileDescriptor_e63e84f9f0d3998c = []byte{ - // 689 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0xcd, 0x6e, 0xda, 0x40, - 0x10, 0xc6, 0xc6, 0xe1, 0x67, 0x08, 0x84, 0xae, 0xda, 0xc8, 0x51, 0x15, 0x4a, 0x1c, 0x45, 0x45, - 0x95, 0x0a, 0x2d, 0x54, 0xbd, 0x13, 0x42, 0x15, 0x44, 0x22, 0xa4, 0x4d, 0x14, 0x45, 0xbd, 0xa0, - 0xc5, 0x1e, 0xc0, 0xc2, 0x7f, 0xf2, 0x2e, 0x56, 0xc8, 0x13, 0xf4, 0xd2, 0x7b, 0x1f, 0xa9, 0xc7, - 0x1e, 0xfa, 0x00, 0x55, 0xfa, 0x22, 0x95, 0xd7, 0x86, 0x82, 0xd4, 0xde, 0x66, 0xbe, 0xf9, 0x76, - 0x67, 0xbf, 0x6f, 0xc6, 0x86, 0x83, 0x09, 0xb3, 0x66, 0x18, 0x06, 0x93, 0x76, 0x33, 0x08, 0x7d, - 0xe1, 0x93, 0xe2, 0x06, 0x30, 0x7e, 0x2a, 0xa0, 0x0e, 0xef, 0x48, 0x15, 0xb2, 0x0b, 0x5c, 0xe9, - 0x4a, 0x5d, 0x69, 0xec, 0xd3, 0x38, 0x24, 0xcf, 0x61, 0x2f, 0x62, 0xce, 0x12, 0x75, 0x55, 0x62, - 0x49, 0x42, 0x5e, 0x42, 0x71, 0xc9, 0x31, 0x1c, 0xbb, 0x28, 0x98, 0x9e, 0x95, 0x95, 0x42, 0x0c, - 0x5c, 0xa3, 0x60, 0x44, 0x87, 0x7c, 0x84, 0x21, 0xb7, 0x7d, 0x4f, 0xd7, 0xea, 0x4a, 0x43, 0xa3, - 0xeb, 0x94, 0x1c, 0x03, 0xe0, 0x43, 0x60, 0x87, 0xc8, 0xc7, 0x4c, 0xe8, 0x7b, 0xb2, 0x58, 0x4c, - 0x91, 0xae, 0x20, 0x04, 0x34, 0x79, 0x61, 0x4e, 0x5e, 0x28, 0xe3, 0xb8, 0x13, 0x17, 0x21, 0x32, - 0x77, 0x6c, 0x5b, 0x3a, 0xd4, 0x95, 0x46, 0x99, 0x16, 0x12, 0x60, 0x60, 0x91, 0x57, 0x50, 0x4a, - 0x8b, 0x96, 0xef, 0xa1, 0x5e, 0xaa, 0x2b, 0x8d, 0x02, 0x85, 0x04, 0xba, 0xf0, 0x3d, 0x34, 0x5e, - 0x43, 0x6e, 0x78, 0x77, 0x65, 0x73, 0x41, 0x8e, 0x41, 0x5d, 0x44, 0xba, 0x52, 0xcf, 0x36, 0x4a, - 0xed, 0x72, 0xf3, 0xaf, 0x13, 0xc3, 0x3b, 0xaa, 0x2e, 0x22, 0xe3, 0x12, 0x9e, 0x5d, 0x33, 0xcf, - 0x9e, 0x22, 0x17, 0xbd, 0x39, 0xf3, 0x66, 0x78, 0x83, 0x82, 0x74, 0x20, 0x6f, 0xca, 0x84, 0xa7, - 0x07, 0x8f, 0xb6, 0x0e, 0xee, 0xd2, 0xe9, 0x9a, 0x69, 0x7c, 0x55, 0xa1, 0xb2, 0x5b, 0x23, 0x15, - 0x50, 0x07, 0x96, 0x34, 0x55, 0xa3, 0xea, 0xc0, 0x22, 0x1d, 0x50, 0x47, 0x81, 0x34, 0xb4, 0xd2, - 0x3e, 0xfd, 0xef, 0x95, 0xcd, 0x51, 0x80, 0x21, 0x13, 0xb6, 0xef, 0x51, 0x75, 0x14, 0xc4, 0x83, - 0xb8, 0xc2, 0x08, 0x1d, 0x69, 0x77, 0x99, 0x26, 0x09, 0x79, 0x01, 0xb9, 0x05, 0xae, 0x62, 0x6f, - 0x12, 0xab, 0xf7, 0x16, 0xb8, 0x1a, 0x58, 0xe4, 0x1c, 0x0e, 0xd0, 0x33, 0xc3, 0x55, 0x10, 0x1f, - 0x1f, 0x33, 0x67, 0xe6, 0x4b, 0xb7, 0x2b, 0x3b, 0x0a, 0xfa, 0x1b, 0x46, 0xd7, 0x99, 0xf9, 0xb4, - 0x82, 0x3b, 0x39, 0xa9, 0x43, 0xc9, 0xf4, 0xdd, 0x20, 0x44, 0x2e, 0x47, 0x99, 0x93, 0x6d, 0xb7, - 0x21, 0xe3, 0x14, 0x8a, 0x9b, 0x37, 0x12, 0x80, 0x5c, 0x8f, 0xf6, 0xbb, 0xb7, 0xfd, 0x6a, 0x26, - 0x8e, 0x2f, 0xfa, 0x57, 0xfd, 0xdb, 0x7e, 0x55, 0x31, 0x06, 0x50, 0x3a, 0x77, 0x7c, 0x73, 0x31, - 0x9a, 0x4e, 0x39, 0x8a, 0x7f, 0x6c, 0xd8, 0x21, 0xe4, 0x7c, 0x59, 0x93, 0x8e, 0x94, 0x69, 0x9a, - 0xc5, 0x4c, 0x07, 0xbd, 0x54, 0x6e, 0x1c, 0x1a, 0x5f, 0x14, 0x80, 0x5b, 0x36, 0x71, 0x70, 0xe0, - 0x59, 0xf8, 0x40, 0xde, 0x41, 0x3e, 0xa1, 0xae, 0xc7, 0x73, 0xb8, 0x25, 0x6e, 0xab, 0x27, 0x5d, - 0xd3, 0xc8, 0x09, 0xec, 0x4f, 0x1c, 0xdf, 0x77, 0xc7, 0x53, 0xdb, 0x11, 0x18, 0xa6, 0x3b, 0x5d, - 0x92, 0xd8, 0x27, 0x09, 0x91, 0x33, 0xa8, 0x20, 0x17, 0xb6, 0xcb, 0x04, 0x5a, 0x63, 0x6e, 0x3f, - 0xa2, 0x7c, 0x80, 0x46, 0xcb, 0x1b, 0xf4, 0xc6, 0x7e, 0x44, 0x23, 0x82, 0x42, 0x6f, 0x8e, 0xe6, - 0x82, 0x2f, 0x5d, 0xf2, 0x1e, 0x34, 0xe9, 0xb0, 0x22, 0x1d, 0x3e, 0xde, 0x7a, 0xc4, 0x9a, 0xd2, - 0x8c, 0x0d, 0x0d, 0x6d, 0x31, 0x77, 0xa9, 0xa4, 0xc6, 0xda, 0xf8, 0xd2, 0x95, 0xfd, 0x35, 0x1a, - 0x87, 0xc6, 0x19, 0x14, 0x37, 0xa4, 0xc4, 0xcb, 0x5e, 0xa7, 0xdd, 0xab, 0x66, 0xc8, 0x3e, 0x14, - 0xee, 0xef, 0x2f, 0x19, 0x9f, 0x7f, 0xfc, 0x50, 0x55, 0x0c, 0x13, 0xf2, 0x17, 0x4c, 0xb0, 0x21, - 0xae, 0xb6, 0x46, 0xaf, 0x6c, 0x8f, 0x9e, 0x80, 0x66, 0x31, 0xc1, 0x52, 0x6d, 0x32, 0x8e, 0x17, - 0xd0, 0x8e, 0xd2, 0xef, 0x54, 0xb5, 0xa3, 0xf8, 0x3b, 0x34, 0x43, 0x94, 0x12, 0x99, 0x90, 0x9b, - 0x93, 0xa5, 0xc5, 0x14, 0xe9, 0x8a, 0x37, 0x47, 0x50, 0xd9, 0xdd, 0x0d, 0x92, 0x87, 0x2c, 0x43, - 0x5e, 0xcd, 0x9c, 0x77, 0xbe, 0x3f, 0xd5, 0x94, 0x1f, 0x4f, 0x35, 0xe5, 0xd7, 0x53, 0x4d, 0xf9, - 0xf6, 0xbb, 0x96, 0xf9, 0x7c, 0x32, 0xb3, 0xc5, 0x7c, 0x39, 0x69, 0x9a, 0xbe, 0xdb, 0xb2, 0x66, - 0x21, 0x0b, 0xe6, 0x6f, 0x6d, 0xbf, 0x95, 0x78, 0xd0, 0x8a, 0xda, 0xad, 0x60, 0x32, 0xc9, 0xc9, - 0xdf, 0x4d, 0xe7, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x88, 0x22, 0x82, 0x98, 0x81, 0x04, 0x00, - 0x00, -} - -func (m *KV) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KV) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KV) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.StreamDone { - i-- - if m.StreamDone { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - if m.StreamId != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.StreamId)) - i-- - dAtA[i] = 0x50 - } - if len(m.Meta) > 0 { - i -= len(m.Meta) - copy(dAtA[i:], m.Meta) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Meta))) - i-- - dAtA[i] = 0x32 - } - if m.ExpiresAt != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.ExpiresAt)) - i-- - dAtA[i] = 0x28 - } - if m.Version != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x20 - } - if len(m.UserMeta) > 0 { - i -= len(m.UserMeta) - copy(dAtA[i:], m.UserMeta) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.UserMeta))) - i-- - dAtA[i] = 0x1a - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *KVList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KVList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KVList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Kv) > 0 { - for iNdEx := len(m.Kv) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Kv[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBadgerpb2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManifestChangeSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Changes) > 0 { - for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBadgerpb2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ManifestChange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManifestChange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Compression != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Compression)) - i-- - dAtA[i] = 0x30 - } - if m.EncryptionAlgo != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.EncryptionAlgo)) - i-- - dAtA[i] = 0x28 - } - if m.KeyId != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.KeyId)) - i-- - dAtA[i] = 0x20 - } - if m.Level != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Level)) - i-- - dAtA[i] = 0x18 - } - if m.Op != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Op)) - i-- - dAtA[i] = 0x10 - } - if m.Id != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *BlockOffset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlockOffset) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlockOffset) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Len != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Len)) - i-- - dAtA[i] = 0x18 - } - if m.Offset != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x10 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *TableIndex) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableIndex) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TableIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.EstimatedSize != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.EstimatedSize)) - i-- - dAtA[i] = 0x18 - } - if len(m.BloomFilter) > 0 { - i -= len(m.BloomFilter) - copy(dAtA[i:], m.BloomFilter) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.BloomFilter))) - i-- - dAtA[i] = 0x12 - } - if len(m.Offsets) > 0 { - for iNdEx := len(m.Offsets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Offsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBadgerpb2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Checksum) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Checksum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Checksum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Sum != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Sum)) - i-- - dAtA[i] = 0x10 - } - if m.Algo != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.Algo)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DataKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DataKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DataKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.CreatedAt != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - if len(m.Iv) > 0 { - i -= len(m.Iv) - copy(dAtA[i:], m.Iv) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Iv))) - i-- - dAtA[i] = 0x1a - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintBadgerpb2(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if m.KeyId != 0 { - i = encodeVarintBadgerpb2(dAtA, i, uint64(m.KeyId)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintBadgerpb2(dAtA []byte, offset int, v uint64) int { - offset -= sovBadgerpb2(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *KV) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - l = len(m.UserMeta) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - if m.Version != 0 { - n += 1 + sovBadgerpb2(uint64(m.Version)) - } - if m.ExpiresAt != 0 { - n += 1 + sovBadgerpb2(uint64(m.ExpiresAt)) - } - l = len(m.Meta) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - if m.StreamId != 0 { - n += 1 + sovBadgerpb2(uint64(m.StreamId)) - } - if m.StreamDone { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *KVList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Kv) > 0 { - for _, e := range m.Kv { - l = e.Size() - n += 1 + l + sovBadgerpb2(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ManifestChangeSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Changes) > 0 { - for _, e := range m.Changes { - l = e.Size() - n += 1 + l + sovBadgerpb2(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ManifestChange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sovBadgerpb2(uint64(m.Id)) - } - if m.Op != 0 { - n += 1 + sovBadgerpb2(uint64(m.Op)) - } - if m.Level != 0 { - n += 1 + sovBadgerpb2(uint64(m.Level)) - } - if m.KeyId != 0 { - n += 1 + sovBadgerpb2(uint64(m.KeyId)) - } - if m.EncryptionAlgo != 0 { - n += 1 + sovBadgerpb2(uint64(m.EncryptionAlgo)) - } - if m.Compression != 0 { - n += 1 + sovBadgerpb2(uint64(m.Compression)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BlockOffset) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovBadgerpb2(uint64(m.Offset)) - } - if m.Len != 0 { - n += 1 + sovBadgerpb2(uint64(m.Len)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TableIndex) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Offsets) > 0 { - for _, e := range m.Offsets { - l = e.Size() - n += 1 + l + sovBadgerpb2(uint64(l)) - } - } - l = len(m.BloomFilter) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - if m.EstimatedSize != 0 { - n += 1 + sovBadgerpb2(uint64(m.EstimatedSize)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Checksum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Algo != 0 { - n += 1 + sovBadgerpb2(uint64(m.Algo)) - } - if m.Sum != 0 { - n += 1 + sovBadgerpb2(uint64(m.Sum)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DataKey) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.KeyId != 0 { - n += 1 + sovBadgerpb2(uint64(m.KeyId)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - l = len(m.Iv) - if l > 0 { - n += 1 + l + sovBadgerpb2(uint64(l)) - } - if m.CreatedAt != 0 { - n += 1 + sovBadgerpb2(uint64(m.CreatedAt)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovBadgerpb2(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozBadgerpb2(x uint64) (n int) { - return sovBadgerpb2(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *KV) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KV: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...) - if m.UserMeta == nil { - m.UserMeta = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) - } - m.ExpiresAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpiresAt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...) - if m.Meta == nil { - m.Meta = []byte{} - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) - } - m.StreamId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StreamId |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamDone", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StreamDone = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KVList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KVList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kv = append(m.Kv, &KV{}) - if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Changes = append(m.Changes, &ManifestChange{}) - if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManifestChange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - m.Op = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Op |= ManifestChange_Operation(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) - } - m.Level = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Level |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyId", wireType) - } - m.KeyId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeyId |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EncryptionAlgo", wireType) - } - m.EncryptionAlgo = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EncryptionAlgo |= EncryptionAlgo(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Compression", wireType) - } - m.Compression = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Compression |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlockOffset) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlockOffset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlockOffset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Len", wireType) - } - m.Len = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Len |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableIndex) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableIndex: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableIndex: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Offsets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Offsets = append(m.Offsets, &BlockOffset{}) - if err := m.Offsets[len(m.Offsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BloomFilter", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BloomFilter = append(m.BloomFilter[:0], dAtA[iNdEx:postIndex]...) - if m.BloomFilter == nil { - m.BloomFilter = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EstimatedSize", wireType) - } - m.EstimatedSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EstimatedSize |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Checksum) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Checksum: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Checksum: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Algo", wireType) - } - m.Algo = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Algo |= Checksum_Algorithm(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - m.Sum = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Sum |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DataKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DataKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DataKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyId", wireType) - } - m.KeyId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeyId |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Iv", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb2 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Iv = append(m.Iv[:0], dAtA[iNdEx:postIndex]...) - if m.Iv == nil { - m.Iv = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthBadgerpb2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipBadgerpb2(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBadgerpb2 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthBadgerpb2 - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupBadgerpb2 - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthBadgerpb2 - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthBadgerpb2 = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowBadgerpb2 = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupBadgerpb2 = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.proto b/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.proto deleted file mode 100644 index 00fad8c3..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/pb/badgerpb2.proto +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Use protos/gen.sh to generate .pb.go files. -syntax = "proto3"; - -package badgerpb2; - -option go_package = "github.com/dgraph-io/badger/v2/pb"; - -message KV { - bytes key = 1; - bytes value = 2; - bytes user_meta = 3; - uint64 version = 4; - uint64 expires_at = 5; - bytes meta = 6; - - // Stream id is used to identify which stream the KV came from. - uint32 stream_id = 10; - // Stream done is used to indicate end of stream. - bool stream_done = 11; -} - -message KVList { - repeated KV kv = 1; -} - -message ManifestChangeSet { - // A set of changes that are applied atomically. - repeated ManifestChange changes = 1; -} - -enum EncryptionAlgo { - aes = 0; -} - -message ManifestChange { - uint64 Id = 1; // Table ID. - enum Operation { - CREATE = 0; - DELETE = 1; - } - Operation Op = 2; - uint32 Level = 3; // Only used for CREATE. - uint64 key_id = 4; - EncryptionAlgo encryption_algo = 5; - uint32 compression = 6; // Only used for CREATE Op. -} - -message BlockOffset { - bytes key = 1; - uint32 offset = 2; - uint32 len = 3; -} - -message TableIndex { - repeated BlockOffset offsets = 1; - bytes bloom_filter = 2; - uint64 estimated_size = 3; -} - -message Checksum { - enum Algorithm { - CRC32C = 0; - XXHash64 = 1; - } - Algorithm algo = 1; // For storing type of Checksum algorithm used - uint64 sum = 2; -} - -message DataKey { - uint64 key_id = 1; - bytes data = 2; - bytes iv = 3; - int64 created_at = 4; -} diff --git a/vendor/github.com/dgraph-io/badger/v2/pb/gen.sh b/vendor/github.com/dgraph-io/badger/v2/pb/gen.sh deleted file mode 100644 index 0b017692..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/pb/gen.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Run this script from its directory, so that badgerpb2.proto is where it's expected to -# be. - -# You might need to go get -v github.com/gogo/protobuf/... - -protoc --gofast_out=plugins=grpc:. --gofast_opt=paths=source_relative -I=. badgerpb2.proto diff --git a/vendor/github.com/dgraph-io/badger/v2/publisher.go b/vendor/github.com/dgraph-io/badger/v2/publisher.go deleted file mode 100644 index bc5c6e8c..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/publisher.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "sync" - - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/trie" - "github.com/dgraph-io/badger/v2/y" -) - -type subscriber struct { - prefixes [][]byte - sendCh chan<- *pb.KVList - subCloser *y.Closer -} - -type publisher struct { - sync.Mutex - pubCh chan requests - subscribers map[uint64]subscriber - nextID uint64 - indexer *trie.Trie -} - -func newPublisher() *publisher { - return &publisher{ - pubCh: make(chan requests, 1000), - subscribers: make(map[uint64]subscriber), - nextID: 0, - indexer: trie.NewTrie(), - } -} - -func (p *publisher) listenForUpdates(c *y.Closer) { - defer func() { - p.cleanSubscribers() - c.Done() - }() - slurp := func(batch requests) { - for { - select { - case reqs := <-p.pubCh: - batch = append(batch, reqs...) - default: - p.publishUpdates(batch) - return - } - } - } - for { - select { - case <-c.HasBeenClosed(): - return - case reqs := <-p.pubCh: - slurp(reqs) - } - } -} - -func (p *publisher) publishUpdates(reqs requests) { - p.Lock() - defer func() { - p.Unlock() - // Release all the request. - reqs.DecrRef() - }() - batchedUpdates := make(map[uint64]*pb.KVList) - for _, req := range reqs { - for _, e := range req.Entries { - ids := p.indexer.Get(e.Key) - if len(ids) > 0 { - k := y.SafeCopy(nil, e.Key) - kv := &pb.KV{ - Key: y.ParseKey(k), - Value: y.SafeCopy(nil, e.Value), - Meta: []byte{e.UserMeta}, - ExpiresAt: e.ExpiresAt, - Version: y.ParseTs(k), - } - for id := range ids { - if _, ok := batchedUpdates[id]; !ok { - batchedUpdates[id] = &pb.KVList{} - } - batchedUpdates[id].Kv = append(batchedUpdates[id].Kv, kv) - } - } - } - } - - for id, kvs := range batchedUpdates { - p.subscribers[id].sendCh <- kvs - } -} - -func (p *publisher) newSubscriber(c *y.Closer, prefixes ...[]byte) (<-chan *pb.KVList, uint64) { - p.Lock() - defer p.Unlock() - ch := make(chan *pb.KVList, 1000) - id := p.nextID - // Increment next ID. - p.nextID++ - p.subscribers[id] = subscriber{ - prefixes: prefixes, - sendCh: ch, - subCloser: c, - } - for _, prefix := range prefixes { - p.indexer.Add(prefix, id) - } - return ch, id -} - -// cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB. -func (p *publisher) cleanSubscribers() { - p.Lock() - defer p.Unlock() - for id, s := range p.subscribers { - for _, prefix := range s.prefixes { - p.indexer.Delete(prefix, id) - } - delete(p.subscribers, id) - s.subCloser.SignalAndWait() - } -} - -func (p *publisher) deleteSubscriber(id uint64) { - p.Lock() - defer p.Unlock() - if s, ok := p.subscribers[id]; ok { - for _, prefix := range s.prefixes { - p.indexer.Delete(prefix, id) - } - } - delete(p.subscribers, id) -} - -func (p *publisher) sendUpdates(reqs requests) { - if p.noOfSubscribers() != 0 { - reqs.IncrRef() - p.pubCh <- reqs - } -} - -func (p *publisher) noOfSubscribers() int { - p.Lock() - defer p.Unlock() - return len(p.subscribers) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/skl/README.md b/vendor/github.com/dgraph-io/badger/v2/skl/README.md deleted file mode 100644 index e22e4590..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/skl/README.md +++ /dev/null @@ -1,113 +0,0 @@ -This is much better than `skiplist` and `slist`. - -``` -BenchmarkReadWrite/frac_0-8 3000000 537 ns/op -BenchmarkReadWrite/frac_1-8 3000000 503 ns/op -BenchmarkReadWrite/frac_2-8 3000000 492 ns/op -BenchmarkReadWrite/frac_3-8 3000000 475 ns/op -BenchmarkReadWrite/frac_4-8 3000000 440 ns/op -BenchmarkReadWrite/frac_5-8 5000000 442 ns/op -BenchmarkReadWrite/frac_6-8 5000000 380 ns/op -BenchmarkReadWrite/frac_7-8 5000000 338 ns/op -BenchmarkReadWrite/frac_8-8 5000000 294 ns/op -BenchmarkReadWrite/frac_9-8 10000000 268 ns/op -BenchmarkReadWrite/frac_10-8 100000000 26.3 ns/op -``` - -And even better than a simple map with read-write lock: - -``` -BenchmarkReadWriteMap/frac_0-8 2000000 774 ns/op -BenchmarkReadWriteMap/frac_1-8 2000000 647 ns/op -BenchmarkReadWriteMap/frac_2-8 3000000 605 ns/op -BenchmarkReadWriteMap/frac_3-8 3000000 603 ns/op -BenchmarkReadWriteMap/frac_4-8 3000000 556 ns/op -BenchmarkReadWriteMap/frac_5-8 3000000 472 ns/op -BenchmarkReadWriteMap/frac_6-8 3000000 476 ns/op -BenchmarkReadWriteMap/frac_7-8 3000000 457 ns/op -BenchmarkReadWriteMap/frac_8-8 5000000 444 ns/op -BenchmarkReadWriteMap/frac_9-8 5000000 361 ns/op -BenchmarkReadWriteMap/frac_10-8 10000000 212 ns/op -``` - -# Node Pooling - -Command used - -``` -rm -Rf tmp && /usr/bin/time -l ./populate -keys_mil 10 -``` - -For pprof results, we run without using /usr/bin/time. There are four runs below. - -Results seem to vary quite a bit between runs. - -## Before node pooling - -``` -1311.53MB of 1338.69MB total (97.97%) -Dropped 30 nodes (cum <= 6.69MB) -Showing top 10 nodes out of 37 (cum >= 12.50MB) - flat flat% sum% cum cum% - 523.04MB 39.07% 39.07% 523.04MB 39.07% github.com/dgraph-io/badger/skl.(*Skiplist).Put - 184.51MB 13.78% 52.85% 184.51MB 13.78% runtime.stringtoslicebyte - 166.01MB 12.40% 65.25% 689.04MB 51.47% github.com/dgraph-io/badger/mem.(*Table).Put - 165MB 12.33% 77.58% 165MB 12.33% runtime.convT2E - 116.92MB 8.73% 86.31% 116.92MB 8.73% bytes.makeSlice - 62.50MB 4.67% 90.98% 62.50MB 4.67% main.newValue - 34.50MB 2.58% 93.56% 34.50MB 2.58% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV - 25.50MB 1.90% 95.46% 100.06MB 7.47% github.com/dgraph-io/badger/y.(*MergeIterator).Next - 21.06MB 1.57% 97.04% 21.06MB 1.57% github.com/dgraph-io/badger/table.(*Table).read - 12.50MB 0.93% 97.97% 12.50MB 0.93% github.com/dgraph-io/badger/table.header.Encode - - 128.31 real 329.37 user 17.11 sys -3355660288 maximum resident set size - 0 average shared memory size - 0 average unshared data size - 0 average unshared stack size - 2203080 page reclaims - 764 page faults - 0 swaps - 275 block input operations - 76 block output operations - 0 messages sent - 0 messages received - 0 signals received - 49173 voluntary context switches - 599922 involuntary context switches -``` - -## After node pooling - -``` -1963.13MB of 2026.09MB total (96.89%) -Dropped 29 nodes (cum <= 10.13MB) -Showing top 10 nodes out of 41 (cum >= 185.62MB) - flat flat% sum% cum cum% - 658.05MB 32.48% 32.48% 658.05MB 32.48% github.com/dgraph-io/badger/skl.glob..func1 - 297.51MB 14.68% 47.16% 297.51MB 14.68% runtime.convT2E - 257.51MB 12.71% 59.87% 257.51MB 12.71% runtime.stringtoslicebyte - 249.01MB 12.29% 72.16% 1007.06MB 49.70% github.com/dgraph-io/badger/mem.(*Table).Put - 142.43MB 7.03% 79.19% 142.43MB 7.03% bytes.makeSlice - 100MB 4.94% 84.13% 758.05MB 37.41% github.com/dgraph-io/badger/skl.newNode - 99.50MB 4.91% 89.04% 99.50MB 4.91% main.newValue - 75MB 3.70% 92.74% 75MB 3.70% github.com/dgraph-io/badger/table.(*BlockIterator).parseKV - 44.62MB 2.20% 94.94% 44.62MB 2.20% github.com/dgraph-io/badger/table.(*Table).read - 39.50MB 1.95% 96.89% 185.62MB 9.16% github.com/dgraph-io/badger/y.(*MergeIterator).Next - - 135.58 real 374.29 user 17.65 sys -3740614656 maximum resident set size - 0 average shared memory size - 0 average unshared data size - 0 average unshared stack size - 2276566 page reclaims - 770 page faults - 0 swaps - 128 block input operations - 90 block output operations - 0 messages sent - 0 messages received - 0 signals received - 46434 voluntary context switches - 597049 involuntary context switches -``` diff --git a/vendor/github.com/dgraph-io/badger/v2/skl/arena.go b/vendor/github.com/dgraph-io/badger/v2/skl/arena.go deleted file mode 100644 index 9267b158..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/skl/arena.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package skl - -import ( - "sync/atomic" - "unsafe" - - "github.com/dgraph-io/badger/v2/y" -) - -const ( - offsetSize = int(unsafe.Sizeof(uint32(0))) - - // Always align nodes on 64-bit boundaries, even on 32-bit architectures, - // so that the node.value field is 64-bit aligned. This is necessary because - // node.getValueOffset uses atomic.LoadUint64, which expects its input - // pointer to be 64-bit aligned. - nodeAlign = int(unsafe.Sizeof(uint64(0))) - 1 -) - -// Arena should be lock-free. -type Arena struct { - n uint32 - buf []byte -} - -// newArena returns a new arena. -func newArena(n int64) *Arena { - // Don't store data at position 0 in order to reserve offset=0 as a kind - // of nil pointer. - out := &Arena{ - n: 1, - buf: make([]byte, n), - } - return out -} - -func (s *Arena) size() int64 { - return int64(atomic.LoadUint32(&s.n)) -} - -func (s *Arena) reset() { - atomic.StoreUint32(&s.n, 0) -} - -// putNode allocates a node in the arena. The node is aligned on a pointer-sized -// boundary. The arena offset of the node is returned. -func (s *Arena) putNode(height int) uint32 { - // Compute the amount of the tower that will never be used, since the height - // is less than maxHeight. - unusedSize := (maxHeight - height) * offsetSize - - // Pad the allocation with enough bytes to ensure pointer alignment. - l := uint32(MaxNodeSize - unusedSize + nodeAlign) - n := atomic.AddUint32(&s.n, l) - y.AssertTruef(int(n) <= len(s.buf), - "Arena too small, toWrite:%d newTotal:%d limit:%d", - l, n, len(s.buf)) - - // Return the aligned offset. - m := (n - l + uint32(nodeAlign)) & ^uint32(nodeAlign) - return m -} - -// Put will *copy* val into arena. To make better use of this, reuse your input -// val buffer. Returns an offset into buf. User is responsible for remembering -// size of val. We could also store this size inside arena but the encoding and -// decoding will incur some overhead. -func (s *Arena) putVal(v y.ValueStruct) uint32 { - l := uint32(v.EncodedSize()) - n := atomic.AddUint32(&s.n, l) - y.AssertTruef(int(n) <= len(s.buf), - "Arena too small, toWrite:%d newTotal:%d limit:%d", - l, n, len(s.buf)) - m := n - l - v.Encode(s.buf[m:]) - return m -} - -func (s *Arena) putKey(key []byte) uint32 { - l := uint32(len(key)) - n := atomic.AddUint32(&s.n, l) - y.AssertTruef(int(n) <= len(s.buf), - "Arena too small, toWrite:%d newTotal:%d limit:%d", - l, n, len(s.buf)) - m := n - l - y.AssertTrue(len(key) == copy(s.buf[m:n], key)) - return m -} - -// getNode returns a pointer to the node located at offset. If the offset is -// zero, then the nil node pointer is returned. -func (s *Arena) getNode(offset uint32) *node { - if offset == 0 { - return nil - } - - return (*node)(unsafe.Pointer(&s.buf[offset])) -} - -// getKey returns byte slice at offset. -func (s *Arena) getKey(offset uint32, size uint16) []byte { - return s.buf[offset : offset+uint32(size)] -} - -// getVal returns byte slice at offset. The given size should be just the value -// size and should NOT include the meta bytes. -func (s *Arena) getVal(offset uint32, size uint32) (ret y.ValueStruct) { - ret.Decode(s.buf[offset : offset+size]) - return -} - -// getNodeOffset returns the offset of node in the arena. If the node pointer is -// nil, then the zero offset is returned. -func (s *Arena) getNodeOffset(nd *node) uint32 { - if nd == nil { - return 0 - } - - return uint32(uintptr(unsafe.Pointer(nd)) - uintptr(unsafe.Pointer(&s.buf[0]))) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/skl/skl.go b/vendor/github.com/dgraph-io/badger/v2/skl/skl.go deleted file mode 100644 index 43694f14..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/skl/skl.go +++ /dev/null @@ -1,517 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* -Adapted from RocksDB inline skiplist. - -Key differences: -- No optimization for sequential inserts (no "prev"). -- No custom comparator. -- Support overwrites. This requires care when we see the same key when inserting. - For RocksDB or LevelDB, overwrites are implemented as a newer sequence number in the key, so - there is no need for values. We don't intend to support versioning. In-place updates of values - would be more efficient. -- We discard all non-concurrent code. -- We do not support Splices. This simplifies the code a lot. -- No AllocateNode or other pointer arithmetic. -- We combine the findLessThan, findGreaterOrEqual, etc into one function. -*/ - -package skl - -import ( - "math" - "sync/atomic" - "unsafe" - - "github.com/dgraph-io/badger/v2/y" - "github.com/dgraph-io/ristretto/z" -) - -const ( - maxHeight = 20 - heightIncrease = math.MaxUint32 / 3 -) - -// MaxNodeSize is the memory footprint of a node of maximum height. -const MaxNodeSize = int(unsafe.Sizeof(node{})) - -type node struct { - // Multiple parts of the value are encoded as a single uint64 so that it - // can be atomically loaded and stored: - // value offset: uint32 (bits 0-31) - // value size : uint16 (bits 32-63) - value uint64 - - // A byte slice is 24 bytes. We are trying to save space here. - keyOffset uint32 // Immutable. No need to lock to access key. - keySize uint16 // Immutable. No need to lock to access key. - - // Height of the tower. - height uint16 - - // Most nodes do not need to use the full height of the tower, since the - // probability of each successive level decreases exponentially. Because - // these elements are never accessed, they do not need to be allocated. - // Therefore, when a node is allocated in the arena, its memory footprint - // is deliberately truncated to not include unneeded tower elements. - // - // All accesses to elements should use CAS operations, with no need to lock. - tower [maxHeight]uint32 -} - -// Skiplist maps keys to values (in memory) -type Skiplist struct { - height int32 // Current height. 1 <= height <= kMaxHeight. CAS. - head *node - ref int32 - arena *Arena -} - -// IncrRef increases the refcount -func (s *Skiplist) IncrRef() { - atomic.AddInt32(&s.ref, 1) -} - -// DecrRef decrements the refcount, deallocating the Skiplist when done using it -func (s *Skiplist) DecrRef() { - newRef := atomic.AddInt32(&s.ref, -1) - if newRef > 0 { - return - } - - s.arena.reset() - // Indicate we are closed. Good for testing. Also, lets GC reclaim memory. Race condition - // here would suggest we are accessing skiplist when we are supposed to have no reference! - s.arena = nil - // Since the head references the arena's buf, as long as the head is kept around - // GC can't release the buf. - s.head = nil -} - -func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node { - // The base level is already allocated in the node struct. - offset := arena.putNode(height) - node := arena.getNode(offset) - node.keyOffset = arena.putKey(key) - node.keySize = uint16(len(key)) - node.height = uint16(height) - node.value = encodeValue(arena.putVal(v), v.EncodedSize()) - return node -} - -func encodeValue(valOffset uint32, valSize uint32) uint64 { - return uint64(valSize)<<32 | uint64(valOffset) -} - -func decodeValue(value uint64) (valOffset uint32, valSize uint32) { - valOffset = uint32(value) - valSize = uint32(value >> 32) - return -} - -// NewSkiplist makes a new empty skiplist, with a given arena size -func NewSkiplist(arenaSize int64) *Skiplist { - arena := newArena(arenaSize) - head := newNode(arena, nil, y.ValueStruct{}, maxHeight) - return &Skiplist{ - height: 1, - head: head, - arena: arena, - ref: 1, - } -} - -func (s *node) getValueOffset() (uint32, uint32) { - value := atomic.LoadUint64(&s.value) - return decodeValue(value) -} - -func (s *node) key(arena *Arena) []byte { - return arena.getKey(s.keyOffset, s.keySize) -} - -func (s *node) setValue(arena *Arena, v y.ValueStruct) { - valOffset := arena.putVal(v) - value := encodeValue(valOffset, v.EncodedSize()) - atomic.StoreUint64(&s.value, value) -} - -func (s *node) getNextOffset(h int) uint32 { - return atomic.LoadUint32(&s.tower[h]) -} - -func (s *node) casNextOffset(h int, old, val uint32) bool { - return atomic.CompareAndSwapUint32(&s.tower[h], old, val) -} - -// Returns true if key is strictly > n.key. -// If n is nil, this is an "end" marker and we return false. -//func (s *Skiplist) keyIsAfterNode(key []byte, n *node) bool { -// y.AssertTrue(n != s.head) -// return n != nil && y.CompareKeys(key, n.key) > 0 -//} - -func (s *Skiplist) randomHeight() int { - h := 1 - for h < maxHeight && z.FastRand() <= heightIncrease { - h++ - } - return h -} - -func (s *Skiplist) getNext(nd *node, height int) *node { - return s.arena.getNode(nd.getNextOffset(height)) -} - -// findNear finds the node near to key. -// If less=true, it finds rightmost node such that node.key < key (if allowEqual=false) or -// node.key <= key (if allowEqual=true). -// If less=false, it finds leftmost node such that node.key > key (if allowEqual=false) or -// node.key >= key (if allowEqual=true). -// Returns the node found. The bool returned is true if the node has key equal to given key. -func (s *Skiplist) findNear(key []byte, less bool, allowEqual bool) (*node, bool) { - x := s.head - level := int(s.getHeight() - 1) - for { - // Assume x.key < key. - next := s.getNext(x, level) - if next == nil { - // x.key < key < END OF LIST - if level > 0 { - // Can descend further to iterate closer to the end. - level-- - continue - } - // Level=0. Cannot descend further. Let's return something that makes sense. - if !less { - return nil, false - } - // Try to return x. Make sure it is not a head node. - if x == s.head { - return nil, false - } - return x, false - } - - nextKey := next.key(s.arena) - cmp := y.CompareKeys(key, nextKey) - if cmp > 0 { - // x.key < next.key < key. We can continue to move right. - x = next - continue - } - if cmp == 0 { - // x.key < key == next.key. - if allowEqual { - return next, true - } - if !less { - // We want >, so go to base level to grab the next bigger note. - return s.getNext(next, 0), false - } - // We want <. If not base level, we should go closer in the next level. - if level > 0 { - level-- - continue - } - // On base level. Return x. - if x == s.head { - return nil, false - } - return x, false - } - // cmp < 0. In other words, x.key < key < next. - if level > 0 { - level-- - continue - } - // At base level. Need to return something. - if !less { - return next, false - } - // Try to return x. Make sure it is not a head node. - if x == s.head { - return nil, false - } - return x, false - } -} - -// findSpliceForLevel returns (outBefore, outAfter) with outBefore.key <= key <= outAfter.key. -// The input "before" tells us where to start looking. -// If we found a node with the same key, then we return outBefore = outAfter. -// Otherwise, outBefore.key < key < outAfter.key. -func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*node, *node) { - for { - // Assume before.key < key. - next := s.getNext(before, level) - if next == nil { - return before, next - } - nextKey := next.key(s.arena) - cmp := y.CompareKeys(key, nextKey) - if cmp == 0 { - // Equality case. - return next, next - } - if cmp < 0 { - // before.key < key < next.key. We are done for this level. - return before, next - } - before = next // Keep moving right on this level. - } -} - -func (s *Skiplist) getHeight() int32 { - return atomic.LoadInt32(&s.height) -} - -// Put inserts the key-value pair. -func (s *Skiplist) Put(key []byte, v y.ValueStruct) { - // Since we allow overwrite, we may not need to create a new node. We might not even need to - // increase the height. Let's defer these actions. - - listHeight := s.getHeight() - var prev [maxHeight + 1]*node - var next [maxHeight + 1]*node - prev[listHeight] = s.head - next[listHeight] = nil - for i := int(listHeight) - 1; i >= 0; i-- { - // Use higher level to speed up for current level. - prev[i], next[i] = s.findSpliceForLevel(key, prev[i+1], i) - if prev[i] == next[i] { - prev[i].setValue(s.arena, v) - return - } - } - - // We do need to create a new node. - height := s.randomHeight() - x := newNode(s.arena, key, v, height) - - // Try to increase s.height via CAS. - listHeight = s.getHeight() - for height > int(listHeight) { - if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) { - // Successfully increased skiplist.height. - break - } - listHeight = s.getHeight() - } - - // We always insert from the base level and up. After you add a node in base level, we cannot - // create a node in the level above because it would have discovered the node in the base level. - for i := 0; i < height; i++ { - for { - if prev[i] == nil { - y.AssertTrue(i > 1) // This cannot happen in base level. - // We haven't computed prev, next for this level because height exceeds old listHeight. - // For these levels, we expect the lists to be sparse, so we can just search from head. - prev[i], next[i] = s.findSpliceForLevel(key, s.head, i) - // Someone adds the exact same key before we are able to do so. This can only happen on - // the base level. But we know we are not on the base level. - y.AssertTrue(prev[i] != next[i]) - } - nextOffset := s.arena.getNodeOffset(next[i]) - x.tower[i] = nextOffset - if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) { - // Managed to insert x between prev[i] and next[i]. Go to the next level. - break - } - // CAS failed. We need to recompute prev and next. - // It is unlikely to be helpful to try to use a different level as we redo the search, - // because it is unlikely that lots of nodes are inserted between prev[i] and next[i]. - prev[i], next[i] = s.findSpliceForLevel(key, prev[i], i) - if prev[i] == next[i] { - y.AssertTruef(i == 0, "Equality can happen only on base level: %d", i) - prev[i].setValue(s.arena, v) - return - } - } - } -} - -// Empty returns if the Skiplist is empty. -func (s *Skiplist) Empty() bool { - return s.findLast() == nil -} - -// findLast returns the last element. If head (empty list), we return nil. All the find functions -// will NEVER return the head nodes. -func (s *Skiplist) findLast() *node { - n := s.head - level := int(s.getHeight()) - 1 - for { - next := s.getNext(n, level) - if next != nil { - n = next - continue - } - if level == 0 { - if n == s.head { - return nil - } - return n - } - level-- - } -} - -// Get gets the value associated with the key. It returns a valid value if it finds equal or earlier -// version of the same key. -func (s *Skiplist) Get(key []byte) y.ValueStruct { - n, _ := s.findNear(key, false, true) // findGreaterOrEqual. - if n == nil { - return y.ValueStruct{} - } - - nextKey := s.arena.getKey(n.keyOffset, n.keySize) - if !y.SameKey(key, nextKey) { - return y.ValueStruct{} - } - - valOffset, valSize := n.getValueOffset() - vs := s.arena.getVal(valOffset, valSize) - vs.Version = y.ParseTs(nextKey) - return vs -} - -// NewIterator returns a skiplist iterator. You have to Close() the iterator. -func (s *Skiplist) NewIterator() *Iterator { - s.IncrRef() - return &Iterator{list: s} -} - -// MemSize returns the size of the Skiplist in terms of how much memory is used within its internal -// arena. -func (s *Skiplist) MemSize() int64 { return s.arena.size() } - -// Iterator is an iterator over skiplist object. For new objects, you just -// need to initialize Iterator.list. -type Iterator struct { - list *Skiplist - n *node -} - -// Close frees the resources held by the iterator -func (s *Iterator) Close() error { - s.list.DecrRef() - return nil -} - -// Valid returns true iff the iterator is positioned at a valid node. -func (s *Iterator) Valid() bool { return s.n != nil } - -// Key returns the key at the current position. -func (s *Iterator) Key() []byte { - return s.list.arena.getKey(s.n.keyOffset, s.n.keySize) -} - -// Value returns value. -func (s *Iterator) Value() y.ValueStruct { - valOffset, valSize := s.n.getValueOffset() - return s.list.arena.getVal(valOffset, valSize) -} - -// Next advances to the next position. -func (s *Iterator) Next() { - y.AssertTrue(s.Valid()) - s.n = s.list.getNext(s.n, 0) -} - -// Prev advances to the previous position. -func (s *Iterator) Prev() { - y.AssertTrue(s.Valid()) - s.n, _ = s.list.findNear(s.Key(), true, false) // find <. No equality allowed. -} - -// Seek advances to the first entry with a key >= target. -func (s *Iterator) Seek(target []byte) { - s.n, _ = s.list.findNear(target, false, true) // find >=. -} - -// SeekForPrev finds an entry with key <= target. -func (s *Iterator) SeekForPrev(target []byte) { - s.n, _ = s.list.findNear(target, true, true) // find <=. -} - -// SeekToFirst seeks position at the first entry in list. -// Final state of iterator is Valid() iff list is not empty. -func (s *Iterator) SeekToFirst() { - s.n = s.list.getNext(s.list.head, 0) -} - -// SeekToLast seeks position at the last entry in list. -// Final state of iterator is Valid() iff list is not empty. -func (s *Iterator) SeekToLast() { - s.n = s.list.findLast() -} - -// UniIterator is a unidirectional memtable iterator. It is a thin wrapper around -// Iterator. We like to keep Iterator as before, because it is more powerful and -// we might support bidirectional iterators in the future. -type UniIterator struct { - iter *Iterator - reversed bool -} - -// NewUniIterator returns a UniIterator. -func (s *Skiplist) NewUniIterator(reversed bool) *UniIterator { - return &UniIterator{ - iter: s.NewIterator(), - reversed: reversed, - } -} - -// Next implements y.Interface -func (s *UniIterator) Next() { - if !s.reversed { - s.iter.Next() - } else { - s.iter.Prev() - } -} - -// Rewind implements y.Interface -func (s *UniIterator) Rewind() { - if !s.reversed { - s.iter.SeekToFirst() - } else { - s.iter.SeekToLast() - } -} - -// Seek implements y.Interface -func (s *UniIterator) Seek(key []byte) { - if !s.reversed { - s.iter.Seek(key) - } else { - s.iter.SeekForPrev(key) - } -} - -// Key implements y.Interface -func (s *UniIterator) Key() []byte { return s.iter.Key() } - -// Value implements y.Interface -func (s *UniIterator) Value() y.ValueStruct { return s.iter.Value() } - -// Valid implements y.Interface -func (s *UniIterator) Valid() bool { return s.iter.Valid() } - -// Close implements y.Interface (and frees up the iter's resources) -func (s *UniIterator) Close() error { return s.iter.Close() } diff --git a/vendor/github.com/dgraph-io/badger/v2/stream.go b/vendor/github.com/dgraph-io/badger/v2/stream.go deleted file mode 100644 index e238bcd3..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/stream.go +++ /dev/null @@ -1,411 +0,0 @@ -/* - * Copyright 2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "context" - "math" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" - humanize "github.com/dustin/go-humanize" - "github.com/golang/protobuf/proto" -) - -const pageSize = 4 << 20 // 4MB - -// maxStreamSize is the maximum allowed size of a stream batch. This is a soft limit -// as a single list that is still over the limit will have to be sent as is since it -// cannot be split further. This limit prevents the framework from creating batches -// so big that sending them causes issues (e.g running into the max size gRPC limit). -var maxStreamSize = uint64(100 << 20) // 100MB - -// Stream provides a framework to concurrently iterate over a snapshot of Badger, pick up -// key-values, batch them up and call Send. Stream does concurrent iteration over many smaller key -// ranges. It does NOT send keys in lexicographical sorted order. To get keys in sorted -// order, use Iterator. -type Stream struct { - // Prefix to only iterate over certain range of keys. If set to nil (default), Stream would - // iterate over the entire DB. - Prefix []byte - - // Number of goroutines to use for iterating over key ranges. Defaults to 16. - NumGo int - - // Badger would produce log entries in Infof to indicate the progress of Stream. LogPrefix can - // be used to help differentiate them from other activities. Default is "Badger.Stream". - LogPrefix string - - // ChooseKey is invoked each time a new key is encountered. Note that this is not called - // on every version of the value, only the first encountered version (i.e. the highest version - // of the value a key has). ChooseKey can be left nil to select all keys. - // - // Note: Calls to ChooseKey are concurrent. - ChooseKey func(item *Item) bool - - // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It - // is upto the caller to iterate over the versions and generate zero, one or more KVs. It - // is expected that the user would advance the iterator to go through the versions of the - // values. However, the user MUST immediately return from this function on the first encounter - // with a mismatching key. See example usage in ToList function. Can be left nil to use ToList - // function by default. - // - // Note: Calls to KeyToList are concurrent. - KeyToList func(key []byte, itr *Iterator) (*pb.KVList, error) - - // This is the method where Stream sends the final output. All calls to Send are done by a - // single goroutine, i.e. logic within Send method can expect single threaded execution. - Send func(*pb.KVList) error - - readTs uint64 - db *DB - rangeCh chan keyRange - kvChan chan *pb.KVList - nextStreamId uint32 -} - -// ToList is a default implementation of KeyToList. It picks up all valid versions of the key, -// skipping over deleted or expired keys. -func (st *Stream) ToList(key []byte, itr *Iterator) (*pb.KVList, error) { - list := &pb.KVList{} - for ; itr.Valid(); itr.Next() { - item := itr.Item() - if item.IsDeletedOrExpired() { - break - } - if !bytes.Equal(key, item.Key()) { - // Break out on the first encounter with another key. - break - } - - valCopy, err := item.ValueCopy(nil) - if err != nil { - return nil, err - } - kv := &pb.KV{ - Key: item.KeyCopy(nil), - Value: valCopy, - UserMeta: []byte{item.UserMeta()}, - Version: item.Version(), - ExpiresAt: item.ExpiresAt(), - } - list.Kv = append(list.Kv, kv) - if st.db.opt.NumVersionsToKeep == 1 { - break - } - - if item.DiscardEarlierVersions() { - break - } - } - return list, nil -} - -// keyRange is [start, end), including start, excluding end. Do ensure that the start, -// end byte slices are owned by keyRange struct. -func (st *Stream) produceRanges(ctx context.Context) { - splits := st.db.KeySplits(st.Prefix) - - // We don't need to create more key ranges than NumGo goroutines. This way, we will have limited - // number of "streams" coming out, which then helps limit the memory used by SSWriter. - { - pickEvery := int(math.Floor(float64(len(splits)) / float64(st.NumGo))) - if pickEvery < 1 { - pickEvery = 1 - } - filtered := splits[:0] - for i, split := range splits { - if (i+1)%pickEvery == 0 { - filtered = append(filtered, split) - } - } - splits = filtered - } - - start := y.SafeCopy(nil, st.Prefix) - for _, key := range splits { - st.rangeCh <- keyRange{left: start, right: y.SafeCopy(nil, []byte(key))} - start = y.SafeCopy(nil, []byte(key)) - } - // Edge case: prefix is empty and no splits exist. In that case, we should have at least one - // keyRange output. - st.rangeCh <- keyRange{left: start} - close(st.rangeCh) -} - -// produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan. -func (st *Stream) produceKVs(ctx context.Context, threadId int) error { - var size int - var txn *Txn - if st.readTs > 0 { - txn = st.db.NewTransactionAt(st.readTs, false) - } else { - txn = st.db.NewTransaction(false) - } - defer txn.Discard() - - iterate := func(kr keyRange) error { - iterOpts := DefaultIteratorOptions - iterOpts.AllVersions = true - iterOpts.Prefix = st.Prefix - iterOpts.PrefetchValues = false - itr := txn.NewIterator(iterOpts) - itr.ThreadId = threadId - defer itr.Close() - - // This unique stream id is used to identify all the keys from this iteration. - streamId := atomic.AddUint32(&st.nextStreamId, 1) - - outList := new(pb.KVList) - - sendIt := func() error { - select { - case st.kvChan <- outList: - case <-ctx.Done(): - return ctx.Err() - } - outList = new(pb.KVList) - size = 0 - return nil - } - var prevKey []byte - for itr.Seek(kr.left); itr.Valid(); { - // it.Valid would only return true for keys with the provided Prefix in iterOpts. - item := itr.Item() - if bytes.Equal(item.Key(), prevKey) { - itr.Next() - continue - } - prevKey = append(prevKey[:0], item.Key()...) - - // Check if we reached the end of the key range. - if len(kr.right) > 0 && bytes.Compare(item.Key(), kr.right) >= 0 { - break - } - // Check if we should pick this key. - if st.ChooseKey != nil && !st.ChooseKey(item) { - continue - } - - // Now convert to key value. - list, err := st.KeyToList(item.KeyCopy(nil), itr) - if err != nil { - return err - } - if list == nil || len(list.Kv) == 0 { - continue - } - for _, kv := range list.Kv { - size += proto.Size(kv) - kv.StreamId = streamId - outList.Kv = append(outList.Kv, kv) - - if size < pageSize { - continue - } - if err := sendIt(); err != nil { - return err - } - } - } - if len(outList.Kv) > 0 { - // TODO: Think of a way to indicate that a stream is over. - if err := sendIt(); err != nil { - return err - } - } - return nil - } - - for { - select { - case kr, ok := <-st.rangeCh: - if !ok { - // Done with the keys. - return nil - } - if err := iterate(kr); err != nil { - return err - } - case <-ctx.Done(): - return ctx.Err() - } - } -} - -func (st *Stream) streamKVs(ctx context.Context) error { - var count int - var bytesSent uint64 - t := time.NewTicker(time.Second) - defer t.Stop() - now := time.Now() - - sendBatch := func(batch *pb.KVList) error { - sz := uint64(proto.Size(batch)) - bytesSent += sz - count += len(batch.Kv) - t := time.Now() - if err := st.Send(batch); err != nil { - return err - } - st.db.opt.Infof("%s Created batch of size: %s in %s.\n", - st.LogPrefix, humanize.Bytes(sz), time.Since(t)) - return nil - } - - slurp := func(batch *pb.KVList) error { - loop: - for { - // Send the batch immediately if it already exceeds the maximum allowed size. - // If the size of the batch exceeds maxStreamSize, break from the loop to - // avoid creating a batch that is so big that certain limits are reached. - sz := uint64(proto.Size(batch)) - if sz > maxStreamSize { - break loop - } - select { - case kvs, ok := <-st.kvChan: - if !ok { - break loop - } - y.AssertTrue(kvs != nil) - batch.Kv = append(batch.Kv, kvs.Kv...) - default: - break loop - } - } - return sendBatch(batch) - } - -outer: - for { - var batch *pb.KVList - select { - case <-ctx.Done(): - return ctx.Err() - - case <-t.C: - dur := time.Since(now) - durSec := uint64(dur.Seconds()) - if durSec == 0 { - continue - } - speed := bytesSent / durSec - st.db.opt.Infof("%s Time elapsed: %s, bytes sent: %s, speed: %s/sec\n", st.LogPrefix, - y.FixedDuration(dur), humanize.Bytes(bytesSent), humanize.Bytes(speed)) - - case kvs, ok := <-st.kvChan: - if !ok { - break outer - } - y.AssertTrue(kvs != nil) - batch = kvs - - // Otherwise, slurp more keys into this batch. - if err := slurp(batch); err != nil { - return err - } - } - } - - st.db.opt.Infof("%s Sent %d keys\n", st.LogPrefix, count) - return nil -} - -// Orchestrate runs Stream. It picks up ranges from the SSTables, then runs NumGo number of -// goroutines to iterate over these ranges and batch up KVs in lists. It concurrently runs a single -// goroutine to pick these lists, batch them up further and send to Output.Send. Orchestrate also -// spits logs out to Infof, using provided LogPrefix. Note that all calls to Output.Send -// are serial. In case any of these steps encounter an error, Orchestrate would stop execution and -// return that error. Orchestrate can be called multiple times, but in serial order. -func (st *Stream) Orchestrate(ctx context.Context) error { - st.rangeCh = make(chan keyRange, 3) // Contains keys for posting lists. - - // kvChan should only have a small capacity to ensure that we don't buffer up too much data if - // sending is slow. Page size is set to 4MB, which is used to lazily cap the size of each - // KVList. To get 128MB buffer, we can set the channel size to 32. - st.kvChan = make(chan *pb.KVList, 32) - - if st.KeyToList == nil { - st.KeyToList = st.ToList - } - - // Picks up ranges from Badger, and sends them to rangeCh. - go st.produceRanges(ctx) - - errCh := make(chan error, 1) // Stores error by consumeKeys. - var wg sync.WaitGroup - for i := 0; i < st.NumGo; i++ { - wg.Add(1) - - go func(threadId int) { - defer wg.Done() - // Picks up ranges from rangeCh, generates KV lists, and sends them to kvChan. - if err := st.produceKVs(ctx, threadId); err != nil { - select { - case errCh <- err: - default: - } - } - }(i) - } - - // Pick up key-values from kvChan and send to stream. - kvErr := make(chan error, 1) - go func() { - // Picks up KV lists from kvChan, and sends them to Output. - kvErr <- st.streamKVs(ctx) - }() - wg.Wait() // Wait for produceKVs to be over. - close(st.kvChan) // Now we can close kvChan. - - select { - case err := <-errCh: // Check error from produceKVs. - return err - default: - } - - // Wait for key streaming to be over. - err := <-kvErr - return err -} - -func (db *DB) newStream() *Stream { - return &Stream{db: db, NumGo: 16, LogPrefix: "Badger.Stream"} -} - -// NewStream creates a new Stream. -func (db *DB) NewStream() *Stream { - if db.opt.managedTxns { - panic("This API can not be called in managed mode.") - } - return db.newStream() -} - -// NewStreamAt creates a new Stream at a particular timestamp. Should only be used with managed DB. -func (db *DB) NewStreamAt(readTs uint64) *Stream { - if !db.opt.managedTxns { - panic("This API can only be called in managed mode.") - } - stream := db.newStream() - stream.readTs = readTs - return stream -} diff --git a/vendor/github.com/dgraph-io/badger/v2/stream_writer.go b/vendor/github.com/dgraph-io/badger/v2/stream_writer.go deleted file mode 100644 index 38ffba22..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/stream_writer.go +++ /dev/null @@ -1,485 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "encoding/hex" - "fmt" - "math" - "sync" - - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/table" - "github.com/dgraph-io/badger/v2/y" - humanize "github.com/dustin/go-humanize" - "github.com/pkg/errors" -) - -const headStreamId uint32 = math.MaxUint32 - -// StreamWriter is used to write data coming from multiple streams. The streams must not have any -// overlapping key ranges. Within each stream, the keys must be sorted. Badger Stream framework is -// capable of generating such an output. So, this StreamWriter can be used at the other end to build -// BadgerDB at a much faster pace by writing SSTables (and value logs) directly to LSM tree levels -// without causing any compactions at all. This is way faster than using batched writer or using -// transactions, but only applicable in situations where the keys are pre-sorted and the DB is being -// bootstrapped. Existing data would get deleted when using this writer. So, this is only useful -// when restoring from backup or replicating DB across servers. -// -// StreamWriter should not be called on in-use DB instances. It is designed only to bootstrap new -// DBs. -type StreamWriter struct { - writeLock sync.Mutex - db *DB - done func() - throttle *y.Throttle - maxVersion uint64 - writers map[uint32]*sortedWriter - maxHead valuePointer -} - -// NewStreamWriter creates a StreamWriter. Right after creating StreamWriter, Prepare must be -// called. The memory usage of a StreamWriter is directly proportional to the number of streams -// possible. So, efforts must be made to keep the number of streams low. Stream framework would -// typically use 16 goroutines and hence create 16 streams. -func (db *DB) NewStreamWriter() *StreamWriter { - return &StreamWriter{ - db: db, - // throttle shouldn't make much difference. Memory consumption is based on the number of - // concurrent streams being processed. - throttle: y.NewThrottle(16), - writers: make(map[uint32]*sortedWriter), - } -} - -// Prepare should be called before writing any entry to StreamWriter. It deletes all data present in -// existing DB, stops compactions and any writes being done by other means. Be very careful when -// calling Prepare, because it could result in permanent data loss. Not calling Prepare would result -// in a corrupt Badger instance. -func (sw *StreamWriter) Prepare() error { - sw.writeLock.Lock() - defer sw.writeLock.Unlock() - - var err error - sw.done, err = sw.db.dropAll() - return err -} - -// Write writes KVList to DB. Each KV within the list contains the stream id which StreamWriter -// would use to demux the writes. Write is thread safe and can be called concurrently by multiple -// goroutines. -func (sw *StreamWriter) Write(kvs *pb.KVList) error { - if len(kvs.GetKv()) == 0 { - return nil - } - - // closedStreams keeps track of all streams which are going to be marked as done. We are - // keeping track of all streams so that we can close them at the end, after inserting all - // the valid kvs. - closedStreams := make(map[uint32]struct{}) - streamReqs := make(map[uint32]*request) - for _, kv := range kvs.Kv { - if kv.StreamDone { - closedStreams[kv.StreamId] = struct{}{} - continue - } - - // Panic if some kv comes after stream has been marked as closed. - if _, ok := closedStreams[kv.StreamId]; ok { - panic(fmt.Sprintf("write performed on closed stream: %d", kv.StreamId)) - } - - var meta, userMeta byte - if len(kv.Meta) > 0 { - meta = kv.Meta[0] - } - if len(kv.UserMeta) > 0 { - userMeta = kv.UserMeta[0] - } - if sw.maxVersion < kv.Version { - sw.maxVersion = kv.Version - } - e := &Entry{ - Key: y.KeyWithTs(kv.Key, kv.Version), - Value: kv.Value, - UserMeta: userMeta, - ExpiresAt: kv.ExpiresAt, - meta: meta, - } - // If the value can be collocated with the key in LSM tree, we can skip - // writing the value to value log. - e.skipVlog = sw.db.shouldWriteValueToLSM(*e) - req := streamReqs[kv.StreamId] - if req == nil { - req = &request{} - streamReqs[kv.StreamId] = req - } - req.Entries = append(req.Entries, e) - } - all := make([]*request, 0, len(streamReqs)) - for _, req := range streamReqs { - all = append(all, req) - } - - sw.writeLock.Lock() - defer sw.writeLock.Unlock() - - // We are writing all requests to vlog even if some request belongs to already closed stream. - // It is safe to do because we are panicking while writing to sorted writer, which will be nil - // for closed stream. At restart, stream writer will drop all the data in Prepare function. - if err := sw.db.vlog.write(all); err != nil { - return err - } - - for streamID, req := range streamReqs { - writer, ok := sw.writers[streamID] - if !ok { - var err error - writer, err = sw.newWriter(streamID) - if err != nil { - return errors.Wrapf(err, "failed to create writer with ID %d", streamID) - } - sw.writers[streamID] = writer - } - - if writer == nil { - panic(fmt.Sprintf("write performed on closed stream: %d", streamID)) - } - - writer.reqCh <- req - } - - // Now we can close any streams if required. We will make writer for - // the closed streams as nil. - for streamId := range closedStreams { - writer, ok := sw.writers[streamId] - if !ok { - sw.db.opt.Logger.Warningf("Trying to close stream: %d, but no sorted "+ - "writer found for it", streamId) - continue - } - - writer.closer.SignalAndWait() - if err := writer.Done(); err != nil { - return err - } - - if sw.maxHead.Less(writer.head) { - sw.maxHead = writer.head - } - - sw.writers[streamId] = nil - } - return nil -} - -// Flush is called once we are done writing all the entries. It syncs DB directories. It also -// updates Oracle with maxVersion found in all entries (if DB is not managed). -func (sw *StreamWriter) Flush() error { - sw.writeLock.Lock() - defer sw.writeLock.Unlock() - - defer sw.done() - - for _, writer := range sw.writers { - if writer != nil { - writer.closer.SignalAndWait() - } - } - - for _, writer := range sw.writers { - if writer == nil { - continue - } - if err := writer.Done(); err != nil { - return err - } - if sw.maxHead.Less(writer.head) { - sw.maxHead = writer.head - } - } - - // Encode and write the value log head into a new table. - data := sw.maxHead.Encode() - headWriter, err := sw.newWriter(headStreamId) - if err != nil { - return errors.Wrap(err, "failed to create head writer") - } - if err := headWriter.Add( - y.KeyWithTs(head, sw.maxVersion), - y.ValueStruct{Value: data}); err != nil { - return err - } - - headWriter.closer.SignalAndWait() - - if err := headWriter.Done(); err != nil { - return err - } - - if !sw.db.opt.managedTxns { - if sw.db.orc != nil { - sw.db.orc.Stop() - } - sw.db.orc = newOracle(sw.db.opt) - sw.db.orc.nextTxnTs = sw.maxVersion - sw.db.orc.txnMark.Done(sw.maxVersion) - sw.db.orc.readMark.Done(sw.maxVersion) - sw.db.orc.incrementNextTs() - } - - // Wait for all files to be written. - if err := sw.throttle.Finish(); err != nil { - return err - } - - // Sort tables at the end. - for _, l := range sw.db.lc.levels { - l.sortTables() - } - - // Now sync the directories, so all the files are registered. - if sw.db.opt.ValueDir != sw.db.opt.Dir { - if err := sw.db.syncDir(sw.db.opt.ValueDir); err != nil { - return err - } - } - if err := sw.db.syncDir(sw.db.opt.Dir); err != nil { - return err - } - return sw.db.lc.validate() -} - -type sortedWriter struct { - db *DB - throttle *y.Throttle - - builder *table.Builder - lastKey []byte - streamID uint32 - reqCh chan *request - head valuePointer - // Have separate closer for each writer, as it can be closed at any time. - closer *y.Closer -} - -func (sw *StreamWriter) newWriter(streamID uint32) (*sortedWriter, error) { - dk, err := sw.db.registry.latestDataKey() - if err != nil { - return nil, err - } - - bopts := buildTableOptions(sw.db.opt) - bopts.DataKey = dk - w := &sortedWriter{ - db: sw.db, - streamID: streamID, - throttle: sw.throttle, - builder: table.NewTableBuilder(bopts), - reqCh: make(chan *request, 3), - closer: y.NewCloser(1), - } - - go w.handleRequests() - return w, nil -} - -func (w *sortedWriter) handleRequests() { - defer w.closer.Done() - - process := func(req *request) { - for i, e := range req.Entries { - // If badger is running in InMemory mode, len(req.Ptrs) == 0. - if i < len(req.Ptrs) { - vptr := req.Ptrs[i] - if !vptr.IsZero() { - y.AssertTrue(w.head.Less(vptr)) - w.head = vptr - } - } - var vs y.ValueStruct - if e.skipVlog { - vs = y.ValueStruct{ - Value: e.Value, - Meta: e.meta, - UserMeta: e.UserMeta, - ExpiresAt: e.ExpiresAt, - } - } else { - vptr := req.Ptrs[i] - vs = y.ValueStruct{ - Value: vptr.Encode(), - Meta: e.meta | bitValuePointer, - UserMeta: e.UserMeta, - ExpiresAt: e.ExpiresAt, - } - } - if err := w.Add(e.Key, vs); err != nil { - panic(err) - } - } - } - - for { - select { - case req := <-w.reqCh: - process(req) - case <-w.closer.HasBeenClosed(): - close(w.reqCh) - for req := range w.reqCh { - process(req) - } - return - } - } -} - -// Add adds key and vs to sortedWriter. -func (w *sortedWriter) Add(key []byte, vs y.ValueStruct) error { - if len(w.lastKey) > 0 && y.CompareKeys(key, w.lastKey) <= 0 { - return errors.Errorf("keys not in sorted order (last key: %s, key: %s)", - hex.Dump(w.lastKey), hex.Dump(key)) - } - - sameKey := y.SameKey(key, w.lastKey) - // Same keys should go into the same SSTable. - if !sameKey && w.builder.ReachedCapacity(w.db.opt.MaxTableSize) { - if err := w.send(false); err != nil { - return err - } - } - - w.lastKey = y.SafeCopy(w.lastKey, key) - var vp valuePointer - if vs.Meta&bitValuePointer > 0 { - vp.Decode(vs.Value) - } - w.builder.Add(key, vs, vp.Len) - return nil -} - -func (w *sortedWriter) send(done bool) error { - if err := w.throttle.Do(); err != nil { - return err - } - go func(builder *table.Builder) { - err := w.createTable(builder) - w.throttle.Done(err) - }(w.builder) - // If done is true, this indicates we can close the writer. - // No need to allocate underlying TableBuilder now. - if done { - w.builder = nil - return nil - } - - dk, err := w.db.registry.latestDataKey() - if err != nil { - return y.Wrapf(err, "Error while retriving datakey in sortedWriter.send") - } - bopts := buildTableOptions(w.db.opt) - bopts.DataKey = dk - w.builder = table.NewTableBuilder(bopts) - return nil -} - -// Done is called once we are done writing all keys and valueStructs -// to sortedWriter. It completes writing current SST to disk. -func (w *sortedWriter) Done() error { - if w.builder.Empty() { - // Assign builder as nil, so that underlying memory can be garbage collected. - w.builder = nil - return nil - } - - return w.send(true) -} - -func (w *sortedWriter) createTable(builder *table.Builder) error { - data := builder.Finish() - if len(data) == 0 { - return nil - } - fileID := w.db.lc.reserveFileID() - opts := buildTableOptions(w.db.opt) - opts.DataKey = builder.DataKey() - opts.BlockCache = w.db.blockCache - opts.IndexCache = w.db.indexCache - var tbl *table.Table - if w.db.opt.InMemory { - var err error - if tbl, err = table.OpenInMemoryTable(data, fileID, &opts); err != nil { - return err - } - } else { - fd, err := y.CreateSyncedFile(table.NewFilename(fileID, w.db.opt.Dir), true) - if err != nil { - return err - } - if _, err := fd.Write(data); err != nil { - return err - } - if tbl, err = table.OpenTable(fd, opts); err != nil { - return err - } - } - lc := w.db.lc - - var lhandler *levelHandler - // We should start the levels from 1, because we need level 0 to set the !badger!head key. We - // cannot mix up this key with other keys from the DB, otherwise we would introduce a range - // overlap violation. - y.AssertTrue(len(lc.levels) > 1) - for _, l := range lc.levels[1:] { - ratio := float64(l.getTotalSize()) / float64(l.maxTotalSize) - if ratio < 1.0 { - lhandler = l - break - } - } - if lhandler == nil { - // If we're exceeding the size of the lowest level, shove it in the lowest level. Can't do - // better than that. - lhandler = lc.levels[len(lc.levels)-1] - } - if w.streamID == headStreamId { - // This is a special !badger!head key. We should store it at level 0, separate from all the - // other keys to avoid an overlap. - lhandler = lc.levels[0] - } - // Now that table can be opened successfully, let's add this to the MANIFEST. - change := &pb.ManifestChange{ - Id: tbl.ID(), - KeyId: tbl.KeyID(), - Op: pb.ManifestChange_CREATE, - Level: uint32(lhandler.level), - Compression: uint32(tbl.CompressionType()), - } - if err := w.db.manifest.addChanges([]*pb.ManifestChange{change}); err != nil { - return err - } - - // We are not calling lhandler.replaceTables() here, as it sorts tables on every addition. - // We can sort all tables only once during Flush() call. - lhandler.addTable(tbl) - - // Release the ref held by OpenTable. - _ = tbl.DecrRef() - w.db.opt.Infof("Table created: %d at level: %d for stream: %d. Size: %s\n", - fileID, lhandler.level, w.streamID, humanize.Bytes(uint64(tbl.Size()))) - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/structs.go b/vendor/github.com/dgraph-io/badger/v2/structs.go deleted file mode 100644 index 469cdc48..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/structs.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "encoding/binary" - "fmt" - "time" - "unsafe" -) - -type valuePointer struct { - Fid uint32 - Len uint32 - Offset uint32 -} - -const vptrSize = unsafe.Sizeof(valuePointer{}) - -func (p valuePointer) Less(o valuePointer) bool { - if p.Fid != o.Fid { - return p.Fid < o.Fid - } - if p.Offset != o.Offset { - return p.Offset < o.Offset - } - return p.Len < o.Len -} - -func (p valuePointer) IsZero() bool { - return p.Fid == 0 && p.Offset == 0 && p.Len == 0 -} - -// Encode encodes Pointer into byte buffer. -func (p valuePointer) Encode() []byte { - b := make([]byte, vptrSize) - // Copy over the content from p to b. - *(*valuePointer)(unsafe.Pointer(&b[0])) = p - return b -} - -// Decode decodes the value pointer into the provided byte buffer. -func (p *valuePointer) Decode(b []byte) { - // Copy over data from b into p. Using *p=unsafe.pointer(...) leads to - // pointer alignment issues. See https://github.com/dgraph-io/badger/issues/1096 - // and comment https://github.com/dgraph-io/badger/pull/1097#pullrequestreview-307361714 - copy(((*[vptrSize]byte)(unsafe.Pointer(p))[:]), b[:vptrSize]) -} - -// header is used in value log as a header before Entry. -type header struct { - klen uint32 - vlen uint32 - expiresAt uint64 - meta byte - userMeta byte -} - -const ( - // Maximum possible size of the header. The maximum size of header struct will be 18 but the - // maximum size of varint encoded header will be 21. - maxHeaderSize = 21 -) - -// Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The -// function will panic if out []byte isn't large enough to hold all the values. -// The encoded header looks like -// +------+----------+------------+--------------+-----------+ -// | Meta | UserMeta | Key Length | Value Length | ExpiresAt | -// +------+----------+------------+--------------+-----------+ -func (h header) Encode(out []byte) int { - out[0], out[1] = h.meta, h.userMeta - index := 2 - index += binary.PutUvarint(out[index:], uint64(h.klen)) - index += binary.PutUvarint(out[index:], uint64(h.vlen)) - index += binary.PutUvarint(out[index:], h.expiresAt) - return index -} - -// Decode decodes the given header from the provided byte slice. -// Returns the number of bytes read. -func (h *header) Decode(buf []byte) int { - h.meta, h.userMeta = buf[0], buf[1] - index := 2 - klen, count := binary.Uvarint(buf[index:]) - h.klen = uint32(klen) - index += count - vlen, count := binary.Uvarint(buf[index:]) - h.vlen = uint32(vlen) - index += count - h.expiresAt, count = binary.Uvarint(buf[index:]) - return index + count -} - -// DecodeFrom reads the header from the hashReader. -// Returns the number of bytes read. -func (h *header) DecodeFrom(reader *hashReader) (int, error) { - var err error - h.meta, err = reader.ReadByte() - if err != nil { - return 0, err - } - h.userMeta, err = reader.ReadByte() - if err != nil { - return 0, err - } - klen, err := binary.ReadUvarint(reader) - if err != nil { - return 0, err - } - h.klen = uint32(klen) - vlen, err := binary.ReadUvarint(reader) - if err != nil { - return 0, err - } - h.vlen = uint32(vlen) - h.expiresAt, err = binary.ReadUvarint(reader) - if err != nil { - return 0, err - } - return reader.bytesRead, nil -} - -// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by -// the user to set data. -type Entry struct { - Key []byte - Value []byte - UserMeta byte - ExpiresAt uint64 // time.Unix - meta byte - version uint64 - - // Fields maintained internally. - offset uint32 - skipVlog bool - hlen int // Length of the header. -} - -func (e *Entry) estimateSize(threshold int) int { - if len(e.Value) < threshold { - return len(e.Key) + len(e.Value) + 2 // Meta, UserMeta - } - return len(e.Key) + 12 + 2 // 12 for ValuePointer, 2 for metas. -} - -func (e Entry) print(prefix string) { - fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d", - prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value)) -} - -// NewEntry creates a new entry with key and value passed in args. This newly created entry can be -// set in a transaction by calling txn.SetEntry(). All other properties of Entry can be set by -// calling WithMeta, WithDiscard, WithTTL methods on it. -// This function uses key and value reference, hence users must -// not modify key and value until the end of transaction. -func NewEntry(key, value []byte) *Entry { - return &Entry{ - Key: key, - Value: value, - } -} - -// WithMeta adds meta data to Entry e. This byte is stored alongside the key -// and can be used as an aid to interpret the value or store other contextual -// bits corresponding to the key-value pair of entry. -func (e *Entry) WithMeta(meta byte) *Entry { - e.UserMeta = meta - return e -} - -// WithDiscard adds a marker to Entry e. This means all the previous versions of the key (of the -// Entry) will be eligible for garbage collection. -// This method is only useful if you have set a higher limit for options.NumVersionsToKeep. The -// default setting is 1, in which case, this function doesn't add any more benefit. If however, you -// have a higher setting for NumVersionsToKeep (in Dgraph, we set it to infinity), you can use this -// method to indicate that all the older versions can be discarded and removed during compactions. -func (e *Entry) WithDiscard() *Entry { - e.meta = bitDiscardEarlierVersions - return e -} - -// WithTTL adds time to live duration to Entry e. Entry stored with a TTL would automatically expire -// after the time has elapsed, and will be eligible for garbage collection. -func (e *Entry) WithTTL(dur time.Duration) *Entry { - e.ExpiresAt = uint64(time.Now().Add(dur).Unix()) - return e -} - -// withMergeBit sets merge bit in entry's metadata. This -// function is called by MergeOperator's Add method. -func (e *Entry) withMergeBit() *Entry { - e.meta = bitMergeEntry - return e -} diff --git a/vendor/github.com/dgraph-io/badger/v2/table/README.md b/vendor/github.com/dgraph-io/badger/v2/table/README.md deleted file mode 100644 index 19276079..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/table/README.md +++ /dev/null @@ -1,108 +0,0 @@ -Size of table is 123,217,667 bytes for all benchmarks. - -# BenchmarkRead -``` -$ go test -bench ^BenchmarkRead$ -run ^$ -count 3 -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger/table -BenchmarkRead-16 10 154074944 ns/op -BenchmarkRead-16 10 154340411 ns/op -BenchmarkRead-16 10 151914489 ns/op -PASS -ok github.com/dgraph-io/badger/table 22.467s -``` - -Size of table is 123,217,667 bytes, which is ~118MB. - -The rate is ~762MB/s using LoadToRAM (when table is in RAM). - -To read a 64MB table, this would take ~0.084s, which is negligible. - -# BenchmarkReadAndBuild -```go -$ go test -bench BenchmarkReadAndBuild -run ^$ -count 3 -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger/table -BenchmarkReadAndBuild-16 1 1026755231 ns/op -BenchmarkReadAndBuild-16 1 1009543316 ns/op -BenchmarkReadAndBuild-16 1 1039920546 ns/op -PASS -ok github.com/dgraph-io/badger/table 12.081s -``` - -The rate is ~123MB/s. To build a 64MB table, this would take ~0.56s. Note that this -does NOT include the flushing of the table to disk. All we are doing above is -reading one table (which is in RAM) and write one table in memory. - -The table building takes 0.56-0.084s ~ 0.4823s. - -# BenchmarkReadMerged -Below, we merge 5 tables. The total size remains unchanged at ~122M. - -```go -$ go test -bench ReadMerged -run ^$ -count 3 -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger/table -BenchmarkReadMerged-16 2 977588975 ns/op -BenchmarkReadMerged-16 2 982140738 ns/op -BenchmarkReadMerged-16 2 962046017 ns/op -PASS -ok github.com/dgraph-io/badger/table 27.433s -``` - -The rate is ~120MB/s. To read a 64MB table using merge iterator, this would take ~0.53s. - -# BenchmarkRandomRead - -```go -go test -bench BenchmarkRandomRead$ -run ^$ -count 3 -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger/table -BenchmarkRandomRead-16 500000 2645 ns/op -BenchmarkRandomRead-16 500000 2648 ns/op -BenchmarkRandomRead-16 500000 2614 ns/op -PASS -ok github.com/dgraph-io/badger/table 50.850s -``` -For random read benchmarking, we are randomly reading a key and verifying its value. - -# DB Open benchmark -1. Create badger DB with 2 billion key-value pairs (about 380GB of data) -``` -badger fill -m 2000 --dir="/tmp/data" --sorted -``` -2. Clear buffers and swap memory -``` -free -mh && sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && sudo swapoff -a && sudo swapon -a && free -mh -``` -Also flush disk buffers -``` -blockdev --flushbufs /dev/nvme0n1p4 -``` -3. Run the benchmark -``` -go test -run=^$ github.com/dgraph-io/badger -bench ^BenchmarkDBOpen$ -benchdir="/tmp/data" -v - -badger 2019/06/04 17:15:56 INFO: 126 tables out of 1028 opened in 3.017s -badger 2019/06/04 17:15:59 INFO: 257 tables out of 1028 opened in 6.014s -badger 2019/06/04 17:16:02 INFO: 387 tables out of 1028 opened in 9.017s -badger 2019/06/04 17:16:05 INFO: 516 tables out of 1028 opened in 12.025s -badger 2019/06/04 17:16:08 INFO: 645 tables out of 1028 opened in 15.013s -badger 2019/06/04 17:16:11 INFO: 775 tables out of 1028 opened in 18.008s -badger 2019/06/04 17:16:14 INFO: 906 tables out of 1028 opened in 21.003s -badger 2019/06/04 17:16:17 INFO: All 1028 tables opened in 23.851s -badger 2019/06/04 17:16:17 INFO: Replaying file id: 1998 at offset: 332000 -badger 2019/06/04 17:16:17 INFO: Replay took: 9.81µs -goos: linux -goarch: amd64 -pkg: github.com/dgraph-io/badger -BenchmarkDBOpen-16 1 23930082140 ns/op -PASS -ok github.com/dgraph-io/badger 24.076s - -``` -It takes about 23.851s to open a DB with 2 billion sorted key-value entries. diff --git a/vendor/github.com/dgraph-io/badger/v2/table/builder.go b/vendor/github.com/dgraph-io/badger/v2/table/builder.go deleted file mode 100644 index 0e22ee1e..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/table/builder.go +++ /dev/null @@ -1,356 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "bytes" - "crypto/aes" - "math" - "unsafe" - - "github.com/dgryski/go-farm" - "github.com/golang/protobuf/proto" - "github.com/golang/snappy" - "github.com/pkg/errors" - - "github.com/dgraph-io/badger/v2/options" - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" - "github.com/dgraph-io/ristretto/z" -) - -func newBuffer(sz int) *bytes.Buffer { - b := new(bytes.Buffer) - b.Grow(sz) - return b -} - -type header struct { - overlap uint16 // Overlap with base key. - diff uint16 // Length of the diff. -} - -const headerSize = uint16(unsafe.Sizeof(header{})) - -// Encode encodes the header. -func (h header) Encode() []byte { - var b [4]byte - *(*header)(unsafe.Pointer(&b[0])) = h - return b[:] -} - -// Decode decodes the header. -func (h *header) Decode(buf []byte) { - // Copy over data from buf into h. Using *h=unsafe.pointer(...) leads to - // pointer alignment issues. See https://github.com/dgraph-io/badger/issues/1096 - // and comment https://github.com/dgraph-io/badger/pull/1097#pullrequestreview-307361714 - copy(((*[headerSize]byte)(unsafe.Pointer(h))[:]), buf[:headerSize]) -} - -// Builder is used in building a table. -type Builder struct { - // Typically tens or hundreds of meg. This is for one single file. - buf *bytes.Buffer - - baseKey []byte // Base key for the current block. - baseOffset uint32 // Offset for the current block. - entryOffsets []uint32 // Offsets of entries present in current block. - tableIndex *pb.TableIndex - keyHashes []uint64 // Used for building the bloomfilter. - opt *Options -} - -// NewTableBuilder makes a new TableBuilder. -func NewTableBuilder(opts Options) *Builder { - return &Builder{ - buf: newBuffer(1 << 20), - tableIndex: &pb.TableIndex{}, - keyHashes: make([]uint64, 0, 1024), // Avoid some malloc calls. - opt: &opts, - } -} - -// Close closes the TableBuilder. -func (b *Builder) Close() {} - -// Empty returns whether it's empty. -func (b *Builder) Empty() bool { return b.buf.Len() == 0 } - -// keyDiff returns a suffix of newKey that is different from b.baseKey. -func (b *Builder) keyDiff(newKey []byte) []byte { - var i int - for i = 0; i < len(newKey) && i < len(b.baseKey); i++ { - if newKey[i] != b.baseKey[i] { - break - } - } - return newKey[i:] -} - -func (b *Builder) addHelper(key []byte, v y.ValueStruct, vpLen uint64) { - b.keyHashes = append(b.keyHashes, farm.Fingerprint64(y.ParseKey(key))) - - // diffKey stores the difference of key with baseKey. - var diffKey []byte - if len(b.baseKey) == 0 { - // Make a copy. Builder should not keep references. Otherwise, caller has to be very careful - // and will have to make copies of keys every time they add to builder, which is even worse. - b.baseKey = append(b.baseKey[:0], key...) - diffKey = key - } else { - diffKey = b.keyDiff(key) - } - - y.AssertTrue(len(key)-len(diffKey) <= math.MaxUint16) - y.AssertTrue(len(diffKey) <= math.MaxUint16) - - h := header{ - overlap: uint16(len(key) - len(diffKey)), - diff: uint16(len(diffKey)), - } - - // store current entry's offset - y.AssertTrue(uint32(b.buf.Len()) < math.MaxUint32) - b.entryOffsets = append(b.entryOffsets, uint32(b.buf.Len())-b.baseOffset) - - // Layout: header, diffKey, value. - b.buf.Write(h.Encode()) - b.buf.Write(diffKey) // We only need to store the key difference. - - v.EncodeTo(b.buf) - // Size of KV on SST. - sstSz := uint64(uint32(headerSize) + uint32(len(diffKey)) + v.EncodedSize()) - // Total estimated size = size on SST + size on vlog (length of value pointer). - b.tableIndex.EstimatedSize += (sstSz + vpLen) -} - -/* -Structure of Block. -+-------------------+---------------------+--------------------+--------------+------------------+ -| Entry1 | Entry2 | Entry3 | Entry4 | Entry5 | -+-------------------+---------------------+--------------------+--------------+------------------+ -| Entry6 | ... | ... | ... | EntryN | -+-------------------+---------------------+--------------------+--------------+------------------+ -| Block Meta(contains list of offsets used| Block Meta Size | Block | Checksum Size | -| to perform binary search in the block) | (4 Bytes) | Checksum | (4 Bytes) | -+-----------------------------------------+--------------------+--------------+------------------+ -*/ -// In case the data is encrypted, the "IV" is added to the end of the block. -func (b *Builder) finishBlock() { - b.buf.Write(y.U32SliceToBytes(b.entryOffsets)) - b.buf.Write(y.U32ToBytes(uint32(len(b.entryOffsets)))) - - blockBuf := b.buf.Bytes()[b.baseOffset:] // Store checksum for current block. - b.writeChecksum(blockBuf) - - // Compress the block. - if b.opt.Compression != options.None { - var err error - // TODO: Find a way to reuse buffers. Current implementation creates a - // new buffer for each compressData call. - blockBuf, err = b.compressData(b.buf.Bytes()[b.baseOffset:]) - y.Check(err) - // Truncate already written data. - b.buf.Truncate(int(b.baseOffset)) - // Write compressed data. - b.buf.Write(blockBuf) - } - if b.shouldEncrypt() { - block := b.buf.Bytes()[b.baseOffset:] - eBlock, err := b.encrypt(block) - y.Check(y.Wrapf(err, "Error while encrypting block in table builder.")) - // We're rewriting the block, after encrypting. - b.buf.Truncate(int(b.baseOffset)) - b.buf.Write(eBlock) - } - - // TODO(Ashish):Add padding: If we want to make block as multiple of OS pages, we can - // implement padding. This might be useful while using direct I/O. - - // Add key to the block index - bo := &pb.BlockOffset{ - Key: y.Copy(b.baseKey), - Offset: b.baseOffset, - Len: uint32(b.buf.Len()) - b.baseOffset, - } - b.tableIndex.Offsets = append(b.tableIndex.Offsets, bo) -} - -func (b *Builder) shouldFinishBlock(key []byte, value y.ValueStruct) bool { - // If there is no entry till now, we will return false. - if len(b.entryOffsets) <= 0 { - return false - } - - // Integer overflow check for statements below. - y.AssertTrue((uint32(len(b.entryOffsets))+1)*4+4+8+4 < math.MaxUint32) - // We should include current entry also in size, that's why +1 to len(b.entryOffsets). - entriesOffsetsSize := uint32((len(b.entryOffsets)+1)*4 + - 4 + // size of list - 8 + // Sum64 in checksum proto - 4) // checksum length - estimatedSize := uint32(b.buf.Len()) - b.baseOffset + uint32(6 /*header size for entry*/) + - uint32(len(key)) + uint32(value.EncodedSize()) + entriesOffsetsSize - - if b.shouldEncrypt() { - // IV is added at the end of the block, while encrypting. - // So, size of IV is added to estimatedSize. - estimatedSize += aes.BlockSize - } - return estimatedSize > uint32(b.opt.BlockSize) -} - -// Add adds a key-value pair to the block. -func (b *Builder) Add(key []byte, value y.ValueStruct, valueLen uint32) { - if b.shouldFinishBlock(key, value) { - b.finishBlock() - // Start a new block. Initialize the block. - b.baseKey = []byte{} - y.AssertTrue(uint32(b.buf.Len()) < math.MaxUint32) - b.baseOffset = uint32(b.buf.Len()) - b.entryOffsets = b.entryOffsets[:0] - } - b.addHelper(key, value, uint64(valueLen)) -} - -// TODO: vvv this was the comment on ReachedCapacity. -// FinalSize returns the *rough* final size of the array, counting the header which is -// not yet written. -// TODO: Look into why there is a discrepancy. I suspect it is because of Write(empty, empty) -// at the end. The diff can vary. - -// ReachedCapacity returns true if we... roughly (?) reached capacity? -func (b *Builder) ReachedCapacity(cap int64) bool { - blocksSize := b.buf.Len() + // length of current buffer - len(b.entryOffsets)*4 + // all entry offsets size - 4 + // count of all entry offsets - 8 + // checksum bytes - 4 // checksum length - estimateSz := blocksSize + - 4 + // Index length - 5*(len(b.tableIndex.Offsets)) // approximate index size - - return int64(estimateSz) > cap -} - -// Finish finishes the table by appending the index. -/* -The table structure looks like -+---------+------------+-----------+---------------+ -| Block 1 | Block 2 | Block 3 | Block 4 | -+---------+------------+-----------+---------------+ -| Block 5 | Block 6 | Block ... | Block N | -+---------+------------+-----------+---------------+ -| Index | Index Size | Checksum | Checksum Size | -+---------+------------+-----------+---------------+ -*/ -// In case the data is encrypted, the "IV" is added to the end of the index. -func (b *Builder) Finish() []byte { - bf := z.NewBloomFilter(float64(len(b.keyHashes)), b.opt.BloomFalsePositive) - for _, h := range b.keyHashes { - bf.Add(h) - } - // Add bloom filter to the index. - b.tableIndex.BloomFilter = bf.JSONMarshal() - - b.finishBlock() // This will never start a new block. - - index, err := proto.Marshal(b.tableIndex) - y.Check(err) - - if b.shouldEncrypt() { - index, err = b.encrypt(index) - y.Check(err) - } - // Write index the file. - n, err := b.buf.Write(index) - y.Check(err) - - y.AssertTrue(uint32(n) < math.MaxUint32) - // Write index size. - _, err = b.buf.Write(y.U32ToBytes(uint32(n))) - y.Check(err) - - b.writeChecksum(index) - return b.buf.Bytes() -} - -func (b *Builder) writeChecksum(data []byte) { - // Build checksum for the index. - checksum := pb.Checksum{ - // TODO: The checksum type should be configurable from the - // options. - // We chose to use CRC32 as the default option because - // it performed better compared to xxHash64. - // See the BenchmarkChecksum in table_test.go file - // Size => 1024 B 2048 B - // CRC32 => 63.7 ns/op 112 ns/op - // xxHash64 => 87.5 ns/op 158 ns/op - Sum: y.CalculateChecksum(data, pb.Checksum_CRC32C), - Algo: pb.Checksum_CRC32C, - } - - // Write checksum to the file. - chksum, err := proto.Marshal(&checksum) - y.Check(err) - n, err := b.buf.Write(chksum) - y.Check(err) - - y.AssertTrue(uint32(n) < math.MaxUint32) - // Write checksum size. - _, err = b.buf.Write(y.U32ToBytes(uint32(n))) - y.Check(err) -} - -// DataKey returns datakey of the builder. -func (b *Builder) DataKey() *pb.DataKey { - return b.opt.DataKey -} - -// encrypt will encrypt the given data and appends IV to the end of the encrypted data. -// This should be only called only after checking shouldEncrypt method. -func (b *Builder) encrypt(data []byte) ([]byte, error) { - iv, err := y.GenerateIV() - if err != nil { - return data, y.Wrapf(err, "Error while generating IV in Builder.encrypt") - } - data, err = y.XORBlock(data, b.DataKey().Data, iv) - if err != nil { - return data, y.Wrapf(err, "Error while encrypting in Builder.encrypt") - } - data = append(data, iv...) - return data, nil -} - -// shouldEncrypt tells us whether to encrypt the data or not. -// We encrypt only if the data key exist. Otherwise, not. -func (b *Builder) shouldEncrypt() bool { - return b.opt.DataKey != nil -} - -// compressData compresses the given data. -func (b *Builder) compressData(data []byte) ([]byte, error) { - switch b.opt.Compression { - case options.None: - return data, nil - case options.Snappy: - return snappy.Encode(nil, data), nil - case options.ZSTD: - return y.ZSTDCompress(nil, data, b.opt.ZSTDCompressionLevel) - } - return nil, errors.New("Unsupported compression type") -} diff --git a/vendor/github.com/dgraph-io/badger/v2/table/iterator.go b/vendor/github.com/dgraph-io/badger/v2/table/iterator.go deleted file mode 100644 index 8f46fe1b..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/table/iterator.go +++ /dev/null @@ -1,524 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "bytes" - "io" - "sort" - - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" -) - -type blockIterator struct { - data []byte - idx int // Idx of the entry inside a block - err error - baseKey []byte - key []byte - val []byte - entryOffsets []uint32 - - // prevOverlap stores the overlap of the previous key with the base key. - // This avoids unnecessary copy of base key when the overlap is same for multiple keys. - prevOverlap uint16 -} - -func (itr *blockIterator) setBlock(b *block) { - itr.err = nil - itr.idx = 0 - itr.baseKey = itr.baseKey[:0] - itr.prevOverlap = 0 - itr.key = itr.key[:0] - itr.val = itr.val[:0] - // Drop the index from the block. We don't need it anymore. - itr.data = b.data[:b.entriesIndexStart] - itr.entryOffsets = b.entryOffsets -} - -// setIdx sets the iterator to the entry at index i and set it's key and value. -func (itr *blockIterator) setIdx(i int) { - itr.idx = i - if i >= len(itr.entryOffsets) || i < 0 { - itr.err = io.EOF - return - } - itr.err = nil - startOffset := int(itr.entryOffsets[i]) - - // Set base key. - if len(itr.baseKey) == 0 { - var baseHeader header - baseHeader.Decode(itr.data) - itr.baseKey = itr.data[headerSize : headerSize+baseHeader.diff] - } - var endOffset int - // idx points to the last entry in the block. - if itr.idx+1 == len(itr.entryOffsets) { - endOffset = len(itr.data) - } else { - // idx point to some entry other than the last one in the block. - // EndOffset of the current entry is the start offset of the next entry. - endOffset = int(itr.entryOffsets[itr.idx+1]) - } - - entryData := itr.data[startOffset:endOffset] - var h header - h.Decode(entryData) - // Header contains the length of key overlap and difference compared to the base key. If the key - // before this one had the same or better key overlap, we can avoid copying that part into - // itr.key. But, if the overlap was lesser, we could copy over just that portion. - if h.overlap > itr.prevOverlap { - itr.key = append(itr.key[:itr.prevOverlap], itr.baseKey[itr.prevOverlap:h.overlap]...) - } - itr.prevOverlap = h.overlap - valueOff := headerSize + h.diff - diffKey := entryData[headerSize:valueOff] - itr.key = append(itr.key[:h.overlap], diffKey...) - itr.val = entryData[valueOff:] -} - -func (itr *blockIterator) Valid() bool { - return itr != nil && itr.err == nil -} - -func (itr *blockIterator) Error() error { - return itr.err -} - -func (itr *blockIterator) Close() {} - -var ( - origin = 0 - current = 1 -) - -// seek brings us to the first block element that is >= input key. -func (itr *blockIterator) seek(key []byte, whence int) { - itr.err = nil - startIndex := 0 // This tells from which index we should start binary search. - - switch whence { - case origin: - // We don't need to do anything. startIndex is already at 0 - case current: - startIndex = itr.idx - } - - foundEntryIdx := sort.Search(len(itr.entryOffsets), func(idx int) bool { - // If idx is less than start index then just return false. - if idx < startIndex { - return false - } - itr.setIdx(idx) - return y.CompareKeys(itr.key, key) >= 0 - }) - itr.setIdx(foundEntryIdx) -} - -// seekToFirst brings us to the first element. -func (itr *blockIterator) seekToFirst() { - itr.setIdx(0) -} - -// seekToLast brings us to the last element. -func (itr *blockIterator) seekToLast() { - itr.setIdx(len(itr.entryOffsets) - 1) -} - -func (itr *blockIterator) next() { - itr.setIdx(itr.idx + 1) -} - -func (itr *blockIterator) prev() { - itr.setIdx(itr.idx - 1) -} - -// Iterator is an iterator for a Table. -type Iterator struct { - t *Table - bpos int - bi blockIterator - err error - - // Internally, Iterator is bidirectional. However, we only expose the - // unidirectional functionality for now. - reversed bool -} - -// NewIterator returns a new iterator of the Table -func (t *Table) NewIterator(reversed bool) *Iterator { - t.IncrRef() // Important. - ti := &Iterator{t: t, reversed: reversed} - ti.next() - return ti -} - -// Close closes the iterator (and it must be called). -func (itr *Iterator) Close() error { - return itr.t.DecrRef() -} - -func (itr *Iterator) reset() { - itr.bpos = 0 - itr.err = nil -} - -// Valid follows the y.Iterator interface -func (itr *Iterator) Valid() bool { - return itr.err == nil -} - -func (itr *Iterator) seekToFirst() { - numBlocks := itr.t.noOfBlocks - if numBlocks == 0 { - itr.err = io.EOF - return - } - itr.bpos = 0 - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi.setBlock(block) - itr.bi.seekToFirst() - itr.err = itr.bi.Error() -} - -func (itr *Iterator) seekToLast() { - numBlocks := itr.t.noOfBlocks - if numBlocks == 0 { - itr.err = io.EOF - return - } - itr.bpos = numBlocks - 1 - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi.setBlock(block) - itr.bi.seekToLast() - itr.err = itr.bi.Error() -} - -func (itr *Iterator) seekHelper(blockIdx int, key []byte) { - itr.bpos = blockIdx - block, err := itr.t.block(blockIdx) - if err != nil { - itr.err = err - return - } - itr.bi.setBlock(block) - itr.bi.seek(key, origin) - itr.err = itr.bi.Error() -} - -// seekFrom brings us to a key that is >= input key. -func (itr *Iterator) seekFrom(key []byte, whence int) { - itr.err = nil - switch whence { - case origin: - itr.reset() - case current: - } - - idx := sort.Search(itr.t.noOfBlocks, func(idx int) bool { - ko := itr.t.blockOffsets()[idx] - return y.CompareKeys(ko.Key, key) > 0 - }) - if idx == 0 { - // The smallest key in our table is already strictly > key. We can return that. - // This is like a SeekToFirst. - itr.seekHelper(0, key) - return - } - - // block[idx].smallest is > key. - // Since idx>0, we know block[idx-1].smallest is <= key. - // There are two cases. - // 1) Everything in block[idx-1] is strictly < key. In this case, we should go to the first - // element of block[idx]. - // 2) Some element in block[idx-1] is >= key. We should go to that element. - itr.seekHelper(idx-1, key) - if itr.err == io.EOF { - // Case 1. Need to visit block[idx]. - if idx == itr.t.noOfBlocks { - // If idx == len(itr.t.blockIndex), then input key is greater than ANY element of table. - // There's nothing we can do. Valid() should return false as we seek to end of table. - return - } - // Since block[idx].smallest is > key. This is essentially a block[idx].SeekToFirst. - itr.seekHelper(idx, key) - } - // Case 2: No need to do anything. We already did the seek in block[idx-1]. -} - -// seek will reset iterator and seek to >= key. -func (itr *Iterator) seek(key []byte) { - itr.seekFrom(key, origin) -} - -// seekForPrev will reset iterator and seek to <= key. -func (itr *Iterator) seekForPrev(key []byte) { - // TODO: Optimize this. We shouldn't have to take a Prev step. - itr.seekFrom(key, origin) - if !bytes.Equal(itr.Key(), key) { - itr.prev() - } -} - -func (itr *Iterator) next() { - itr.err = nil - - if itr.bpos >= itr.t.noOfBlocks { - itr.err = io.EOF - return - } - - if len(itr.bi.data) == 0 { - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi.setBlock(block) - itr.bi.seekToFirst() - itr.err = itr.bi.Error() - return - } - - itr.bi.next() - if !itr.bi.Valid() { - itr.bpos++ - itr.bi.data = nil - itr.next() - return - } -} - -func (itr *Iterator) prev() { - itr.err = nil - if itr.bpos < 0 { - itr.err = io.EOF - return - } - - if len(itr.bi.data) == 0 { - block, err := itr.t.block(itr.bpos) - if err != nil { - itr.err = err - return - } - itr.bi.setBlock(block) - itr.bi.seekToLast() - itr.err = itr.bi.Error() - return - } - - itr.bi.prev() - if !itr.bi.Valid() { - itr.bpos-- - itr.bi.data = nil - itr.prev() - return - } -} - -// Key follows the y.Iterator interface. -// Returns the key with timestamp. -func (itr *Iterator) Key() []byte { - return itr.bi.key -} - -// Value follows the y.Iterator interface -func (itr *Iterator) Value() (ret y.ValueStruct) { - ret.Decode(itr.bi.val) - return -} - -// ValueCopy copies the current value and returns it as decoded -// ValueStruct. -func (itr *Iterator) ValueCopy() (ret y.ValueStruct) { - dst := y.Copy(itr.bi.val) - ret.Decode(dst) - return -} - -// Next follows the y.Iterator interface -func (itr *Iterator) Next() { - if !itr.reversed { - itr.next() - } else { - itr.prev() - } -} - -// Rewind follows the y.Iterator interface -func (itr *Iterator) Rewind() { - if !itr.reversed { - itr.seekToFirst() - } else { - itr.seekToLast() - } -} - -// Seek follows the y.Iterator interface -func (itr *Iterator) Seek(key []byte) { - if !itr.reversed { - itr.seek(key) - } else { - itr.seekForPrev(key) - } -} - -// ConcatIterator concatenates the sequences defined by several iterators. (It only works with -// TableIterators, probably just because it's faster to not be so generic.) -type ConcatIterator struct { - idx int // Which iterator is active now. - cur *Iterator - iters []*Iterator // Corresponds to tables. - tables []*Table // Disregarding reversed, this is in ascending order. - reversed bool -} - -// NewConcatIterator creates a new concatenated iterator -func NewConcatIterator(tbls []*Table, reversed bool) *ConcatIterator { - iters := make([]*Iterator, len(tbls)) - for i := 0; i < len(tbls); i++ { - // Increment the reference count. Since, we're not creating the iterator right now. - // Here, We'll hold the reference of the tables, till the lifecycle of the iterator. - tbls[i].IncrRef() - - // Save cycles by not initializing the iterators until needed. - // iters[i] = tbls[i].NewIterator(reversed) - } - return &ConcatIterator{ - reversed: reversed, - iters: iters, - tables: tbls, - idx: -1, // Not really necessary because s.it.Valid()=false, but good to have. - } -} - -func (s *ConcatIterator) setIdx(idx int) { - s.idx = idx - if idx < 0 || idx >= len(s.iters) { - s.cur = nil - return - } - if s.iters[idx] == nil { - s.iters[idx] = s.tables[idx].NewIterator(s.reversed) - } - s.cur = s.iters[s.idx] -} - -// Rewind implements y.Interface -func (s *ConcatIterator) Rewind() { - if len(s.iters) == 0 { - return - } - if !s.reversed { - s.setIdx(0) - } else { - s.setIdx(len(s.iters) - 1) - } - s.cur.Rewind() -} - -// Valid implements y.Interface -func (s *ConcatIterator) Valid() bool { - return s.cur != nil && s.cur.Valid() -} - -// Key implements y.Interface -func (s *ConcatIterator) Key() []byte { - return s.cur.Key() -} - -// Value implements y.Interface -func (s *ConcatIterator) Value() y.ValueStruct { - return s.cur.Value() -} - -// Seek brings us to element >= key if reversed is false. Otherwise, <= key. -func (s *ConcatIterator) Seek(key []byte) { - var idx int - if !s.reversed { - idx = sort.Search(len(s.tables), func(i int) bool { - return y.CompareKeys(s.tables[i].Biggest(), key) >= 0 - }) - } else { - n := len(s.tables) - idx = n - 1 - sort.Search(n, func(i int) bool { - return y.CompareKeys(s.tables[n-1-i].Smallest(), key) <= 0 - }) - } - if idx >= len(s.tables) || idx < 0 { - s.setIdx(-1) - return - } - // For reversed=false, we know s.tables[i-1].Biggest() < key. Thus, the - // previous table cannot possibly contain key. - s.setIdx(idx) - s.cur.Seek(key) -} - -// Next advances our concat iterator. -func (s *ConcatIterator) Next() { - s.cur.Next() - if s.cur.Valid() { - // Nothing to do. Just stay with the current table. - return - } - for { // In case there are empty tables. - if !s.reversed { - s.setIdx(s.idx + 1) - } else { - s.setIdx(s.idx - 1) - } - if s.cur == nil { - // End of list. Valid will become false. - return - } - s.cur.Rewind() - if s.cur.Valid() { - break - } - } -} - -// Close implements y.Interface. -func (s *ConcatIterator) Close() error { - for _, t := range s.tables { - // DeReference the tables while closing the iterator. - if err := t.DecrRef(); err != nil { - return err - } - } - for _, it := range s.iters { - if it == nil { - continue - } - if err := it.Close(); err != nil { - return errors.Wrap(err, "ConcatIterator") - } - } - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/table/merge_iterator.go b/vendor/github.com/dgraph-io/badger/v2/table/merge_iterator.go deleted file mode 100644 index e1809e02..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/table/merge_iterator.go +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "bytes" - - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" -) - -// MergeIterator merges multiple iterators. -// NOTE: MergeIterator owns the array of iterators and is responsible for closing them. -type MergeIterator struct { - left node - right node - small *node - - curKey []byte - reverse bool -} - -type node struct { - valid bool - key []byte - iter y.Iterator - - // The two iterators are type asserted from `y.Iterator`, used to inline more function calls. - // Calling functions on concrete types is much faster (about 25-30%) than calling the - // interface's function. - merge *MergeIterator - concat *ConcatIterator -} - -func (n *node) setIterator(iter y.Iterator) { - n.iter = iter - // It's okay if the type assertion below fails and n.merge/n.concat are set to nil. - // We handle the nil values of merge and concat in all the methods. - n.merge, _ = iter.(*MergeIterator) - n.concat, _ = iter.(*ConcatIterator) -} - -func (n *node) setKey() { - switch { - case n.merge != nil: - n.valid = n.merge.small.valid - if n.valid { - n.key = n.merge.small.key - } - case n.concat != nil: - n.valid = n.concat.Valid() - if n.valid { - n.key = n.concat.Key() - } - default: - n.valid = n.iter.Valid() - if n.valid { - n.key = n.iter.Key() - } - } -} - -func (n *node) next() { - switch { - case n.merge != nil: - n.merge.Next() - case n.concat != nil: - n.concat.Next() - default: - n.iter.Next() - } - n.setKey() -} - -func (n *node) rewind() { - n.iter.Rewind() - n.setKey() -} - -func (n *node) seek(key []byte) { - n.iter.Seek(key) - n.setKey() -} - -func (mi *MergeIterator) fix() { - if !mi.bigger().valid { - return - } - if !mi.small.valid { - mi.swapSmall() - return - } - cmp := y.CompareKeys(mi.small.key, mi.bigger().key) - switch { - case cmp == 0: // Both the keys are equal. - // In case of same keys, move the right iterator ahead. - mi.right.next() - if &mi.right == mi.small { - mi.swapSmall() - } - return - case cmp < 0: // Small is less than bigger(). - if mi.reverse { - mi.swapSmall() - } else { - // we don't need to do anything. Small already points to the smallest. - } - return - default: // bigger() is less than small. - if mi.reverse { - // Do nothing since we're iterating in reverse. Small currently points to - // the bigger key and that's okay in reverse iteration. - } else { - mi.swapSmall() - } - return - } -} - -func (mi *MergeIterator) bigger() *node { - if mi.small == &mi.left { - return &mi.right - } - return &mi.left -} - -func (mi *MergeIterator) swapSmall() { - if mi.small == &mi.left { - mi.small = &mi.right - return - } - if mi.small == &mi.right { - mi.small = &mi.left - return - } -} - -// Next returns the next element. If it is the same as the current key, ignore it. -func (mi *MergeIterator) Next() { - for mi.Valid() { - if !bytes.Equal(mi.small.key, mi.curKey) { - break - } - mi.small.next() - mi.fix() - } - mi.setCurrent() -} - -func (mi *MergeIterator) setCurrent() { - mi.curKey = append(mi.curKey[:0], mi.small.key...) -} - -// Rewind seeks to first element (or last element for reverse iterator). -func (mi *MergeIterator) Rewind() { - mi.left.rewind() - mi.right.rewind() - mi.fix() - mi.setCurrent() -} - -// Seek brings us to element with key >= given key. -func (mi *MergeIterator) Seek(key []byte) { - mi.left.seek(key) - mi.right.seek(key) - mi.fix() - mi.setCurrent() -} - -// Valid returns whether the MergeIterator is at a valid element. -func (mi *MergeIterator) Valid() bool { - return mi.small.valid -} - -// Key returns the key associated with the current iterator. -func (mi *MergeIterator) Key() []byte { - return mi.small.key -} - -// Value returns the value associated with the iterator. -func (mi *MergeIterator) Value() y.ValueStruct { - return mi.small.iter.Value() -} - -// Close implements y.Iterator. -func (mi *MergeIterator) Close() error { - err1 := mi.left.iter.Close() - err2 := mi.right.iter.Close() - if err1 != nil { - return errors.Wrap(err1, "MergeIterator") - } - return errors.Wrap(err2, "MergeIterator") -} - -// NewMergeIterator creates a merge iterator. -func NewMergeIterator(iters []y.Iterator, reverse bool) y.Iterator { - switch len(iters) { - case 0: - return nil - case 1: - return iters[0] - case 2: - mi := &MergeIterator{ - reverse: reverse, - } - mi.left.setIterator(iters[0]) - mi.right.setIterator(iters[1]) - // Assign left iterator randomly. This will be fixed when user calls rewind/seek. - mi.small = &mi.left - return mi - } - mid := len(iters) / 2 - return NewMergeIterator( - []y.Iterator{ - NewMergeIterator(iters[:mid], reverse), - NewMergeIterator(iters[mid:], reverse), - }, reverse) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/table/table.go b/vendor/github.com/dgraph-io/badger/v2/table/table.go deleted file mode 100644 index f8841b63..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/table/table.go +++ /dev/null @@ -1,716 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package table - -import ( - "crypto/aes" - "encoding/binary" - "fmt" - "io" - "math" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - "unsafe" - - "github.com/golang/protobuf/proto" - "github.com/golang/snappy" - "github.com/pkg/errors" - - "github.com/dgraph-io/badger/v2/options" - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" - "github.com/dgraph-io/ristretto" - "github.com/dgraph-io/ristretto/z" -) - -const fileSuffix = ".sst" -const intSize = int(unsafe.Sizeof(int(0))) - -// 1 word = 8 bytes -// sizeOfOffsetStruct is the size of pb.BlockOffset -const sizeOfOffsetStruct int64 = 3*8 + // key array take 3 words - 1*8 + // offset and len takes 1 word - 3*8 + // XXX_unrecognized array takes 3 word. - 1*8 // so far 7 words, in order to round the slab we're adding one more word. - -// Options contains configurable options for Table/Builder. -type Options struct { - // Options for Opening/Building Table. - - // ChkMode is the checksum verification mode for Table. - ChkMode options.ChecksumVerificationMode - - // LoadingMode is the mode to be used for loading Table. - LoadingMode options.FileLoadingMode - - // Options for Table builder. - - // BloomFalsePositive is the false positive probabiltiy of bloom filter. - BloomFalsePositive float64 - - // BlockSize is the size of each block inside SSTable in bytes. - BlockSize int - - // DataKey is the key used to decrypt the encrypted text. - DataKey *pb.DataKey - - // Compression indicates the compression algorithm used for block compression. - Compression options.CompressionType - - BlockCache *ristretto.Cache - IndexCache *ristretto.Cache - - // ZSTDCompressionLevel is the ZSTD compression level used for compressing blocks. - ZSTDCompressionLevel int - - // When LoadBloomsOnOpen is set, bloom filters will be loaded while opening - // the table. Otherwise, they will be loaded lazily when they're accessed. - LoadBloomsOnOpen bool -} - -// TableInterface is useful for testing. -type TableInterface interface { - Smallest() []byte - Biggest() []byte - DoesNotHave(hash uint64) bool -} - -// Table represents a loaded table file with the info we have about it. -type Table struct { - sync.Mutex - - fd *os.File // Own fd. - tableSize int // Initialized in OpenTable, using fd.Stat(). - bfLock sync.Mutex - - blockOffset []*pb.BlockOffset - ref int32 // For file garbage collection. Atomic. - bf *z.Bloom // Nil if index cache in enabled. - - mmap []byte // Memory mapped. - - // The following are initialized once and const. - smallest, biggest []byte // Smallest and largest keys (with timestamps). - id uint64 // file id, part of filename - - Checksum []byte - // Stores the total size of key-values stored in this table (including the size on vlog). - estimatedSize uint64 - indexStart int - indexLen int - - IsInmemory bool // Set to true if the table is on level 0 and opened in memory. - opt *Options - - noOfBlocks int // Total number of blocks. -} - -// CompressionType returns the compression algorithm used for block compression. -func (t *Table) CompressionType() options.CompressionType { - return t.opt.Compression -} - -// IncrRef increments the refcount (having to do with whether the file should be deleted) -func (t *Table) IncrRef() { - atomic.AddInt32(&t.ref, 1) -} - -// DecrRef decrements the refcount and possibly deletes the table -func (t *Table) DecrRef() error { - newRef := atomic.AddInt32(&t.ref, -1) - if newRef == 0 { - // We can safely delete this file, because for all the current files, we always have - // at least one reference pointing to them. - - // It's necessary to delete windows files. - if t.opt.LoadingMode == options.MemoryMap { - if err := y.Munmap(t.mmap); err != nil { - return err - } - t.mmap = nil - } - // fd can be nil if the table belongs to L0 and it is opened in memory. See - // OpenTableInMemory method. - if t.fd == nil { - return nil - } - if err := t.fd.Truncate(0); err != nil { - // This is very important to let the FS know that the file is deleted. - return err - } - filename := t.fd.Name() - if err := t.fd.Close(); err != nil { - return err - } - if err := os.Remove(filename); err != nil { - return err - } - // Delete all blocks from the cache. - for i := 0; i < t.noOfBlocks; i++ { - t.opt.BlockCache.Del(t.blockCacheKey(i)) - } - // Delete bloom filter and indices from the cache. - t.opt.IndexCache.Del(t.blockOffsetsCacheKey()) - t.opt.IndexCache.Del(t.bfCacheKey()) - } - return nil -} - -type block struct { - offset int - data []byte - checksum []byte - entriesIndexStart int // start index of entryOffsets list - entryOffsets []uint32 - chkLen int // checksum length -} - -func (b *block) size() int64 { - return int64(3*intSize /* Size of the offset, entriesIndexStart and chkLen */ + - cap(b.data) + cap(b.checksum) + cap(b.entryOffsets)*4) -} - -func (b block) verifyCheckSum() error { - cs := &pb.Checksum{} - if err := proto.Unmarshal(b.checksum, cs); err != nil { - return y.Wrapf(err, "unable to unmarshal checksum for block") - } - return y.VerifyChecksum(b.data, cs) -} - -// OpenTable assumes file has only one table and opens it. Takes ownership of fd upon function -// entry. Returns a table with one reference count on it (decrementing which may delete the file! -// -- consider t.Close() instead). The fd has to writeable because we call Truncate on it before -// deleting. Checksum for all blocks of table is verified based on value of chkMode. -func OpenTable(fd *os.File, opts Options) (*Table, error) { - fileInfo, err := fd.Stat() - if err != nil { - // It's OK to ignore fd.Close() errs in this function because we have only read - // from the file. - _ = fd.Close() - return nil, y.Wrap(err) - } - - filename := fileInfo.Name() - id, ok := ParseFileID(filename) - if !ok { - _ = fd.Close() - return nil, errors.Errorf("Invalid filename: %s", filename) - } - t := &Table{ - fd: fd, - ref: 1, // Caller is given one reference. - id: id, - opt: &opts, - IsInmemory: false, - } - - t.tableSize = int(fileInfo.Size()) - - switch opts.LoadingMode { - case options.LoadToRAM: - if _, err := t.fd.Seek(0, io.SeekStart); err != nil { - return nil, err - } - t.mmap = make([]byte, t.tableSize) - n, err := t.fd.Read(t.mmap) - if err != nil { - // It's OK to ignore fd.Close() error because we have only read from the file. - _ = t.fd.Close() - return nil, y.Wrapf(err, "Failed to load file into RAM") - } - if n != t.tableSize { - return nil, errors.Errorf("Failed to read all bytes from the file."+ - "Bytes in file: %d Bytes actually Read: %d", t.tableSize, n) - } - case options.MemoryMap: - t.mmap, err = y.Mmap(fd, false, fileInfo.Size()) - if err != nil { - _ = fd.Close() - return nil, y.Wrapf(err, "Unable to map file: %q", fileInfo.Name()) - } - case options.FileIO: - t.mmap = nil - default: - panic(fmt.Sprintf("Invalid loading mode: %v", opts.LoadingMode)) - } - - if err := t.initBiggestAndSmallest(); err != nil { - return nil, errors.Wrapf(err, "failed to initialize table") - } - - if opts.ChkMode == options.OnTableRead || opts.ChkMode == options.OnTableAndBlockRead { - if err := t.VerifyChecksum(); err != nil { - _ = fd.Close() - return nil, errors.Wrapf(err, "failed to verify checksum") - } - } - - return t, nil -} - -// OpenInMemoryTable is similar to OpenTable but it opens a new table from the provided data. -// OpenInMemoryTable is used for L0 tables. -func OpenInMemoryTable(data []byte, id uint64, opt *Options) (*Table, error) { - opt.LoadingMode = options.LoadToRAM - t := &Table{ - ref: 1, // Caller is given one reference. - opt: opt, - mmap: data, - tableSize: len(data), - IsInmemory: true, - id: id, // It is important that each table gets a unique ID. - } - - if err := t.initBiggestAndSmallest(); err != nil { - return nil, err - } - return t, nil -} - -func (t *Table) initBiggestAndSmallest() error { - var err error - var ko *pb.BlockOffset - if ko, err = t.initIndex(); err != nil { - return errors.Wrapf(err, "failed to read index.") - } - - t.smallest = ko.Key - - it2 := t.NewIterator(true) - defer it2.Close() - it2.Rewind() - if !it2.Valid() { - return errors.Wrapf(it2.err, "failed to initialize biggest for table %s", t.Filename()) - } - t.biggest = it2.Key() - return nil -} - -// Close closes the open table. (Releases resources back to the OS.) -func (t *Table) Close() error { - if t.opt.LoadingMode == options.MemoryMap { - if err := y.Munmap(t.mmap); err != nil { - return err - } - t.mmap = nil - } - if t.fd == nil { - return nil - } - return t.fd.Close() -} - -func (t *Table) read(off, sz int) ([]byte, error) { - if len(t.mmap) > 0 { - if len(t.mmap[off:]) < sz { - return nil, y.ErrEOF - } - return t.mmap[off : off+sz], nil - } - - res := make([]byte, sz) - nbr, err := t.fd.ReadAt(res, int64(off)) - y.NumReads.Add(1) - y.NumBytesRead.Add(int64(nbr)) - return res, err -} - -func (t *Table) readNoFail(off, sz int) []byte { - res, err := t.read(off, sz) - y.Check(err) - return res -} - -// initIndex reads the index and populate the necessary table fields and returns -// first block offset -func (t *Table) initIndex() (*pb.BlockOffset, error) { - readPos := t.tableSize - - // Read checksum len from the last 4 bytes. - readPos -= 4 - buf := t.readNoFail(readPos, 4) - checksumLen := int(y.BytesToU32(buf)) - if checksumLen < 0 { - return nil, errors.New("checksum length less than zero. Data corrupted") - } - - // Read checksum. - expectedChk := &pb.Checksum{} - readPos -= checksumLen - buf = t.readNoFail(readPos, checksumLen) - if err := proto.Unmarshal(buf, expectedChk); err != nil { - return nil, err - } - - // Read index size from the footer. - readPos -= 4 - buf = t.readNoFail(readPos, 4) - t.indexLen = int(y.BytesToU32(buf)) - - // Read index. - readPos -= t.indexLen - t.indexStart = readPos - data := t.readNoFail(readPos, t.indexLen) - - if err := y.VerifyChecksum(data, expectedChk); err != nil { - return nil, y.Wrapf(err, "failed to verify checksum for table: %s", t.Filename()) - } - - index, err := t.readTableIndex() - if err != nil { - return nil, err - } - - if t.opt.Compression == options.None { - t.estimatedSize = index.EstimatedSize - } else { - // Due to compression the real size on disk is much - // smaller than what we estimate from index.EstimatedSize. - t.estimatedSize = uint64(t.tableSize) - } - t.noOfBlocks = len(index.Offsets) - - // No cache - if t.opt.IndexCache == nil { - if t.opt.LoadBloomsOnOpen { - bf, err := z.JSONUnmarshal(index.BloomFilter) - if err != nil { - return nil, - errors.Wrapf(err, "failed to unmarshal bloomfilter for table:%d", t.id) - } - // Keep blooms in memory. - t.bfLock.Lock() - t.bf = bf - t.bfLock.Unlock() - } - // Keep block offsets in memory since there is no cache. - t.blockOffset = index.Offsets - } - - // We don't need to put anything in the indexCache here. Table.Open will - // create an iterator and that iterator will push the indices in cache. - return index.Offsets[0], nil -} - -// blockOffsets returns block offsets of this table. -func (t *Table) blockOffsets() []*pb.BlockOffset { - if t.opt.IndexCache == nil { - return t.blockOffset - } - - if val, ok := t.opt.IndexCache.Get(t.blockOffsetsCacheKey()); ok && val != nil { - return val.([]*pb.BlockOffset) - } - - index, err := t.readTableIndex() - y.Check(err) - t.opt.IndexCache.Set( - t.blockOffsetsCacheKey(), - index.Offsets, - calculateOffsetsSize(index.Offsets)) - - return index.Offsets -} - -// calculateOffsetsSize returns the size of *pb.BlockOffset array -func calculateOffsetsSize(offsets []*pb.BlockOffset) int64 { - totalSize := sizeOfOffsetStruct * int64(len(offsets)) - - for _, ko := range offsets { - // add key size. - totalSize += int64(cap(ko.Key)) - // add XXX_unrecognized size. - totalSize += int64(cap(ko.XXX_unrecognized)) - } - // Add three words for array size. - return totalSize + 3*8 -} - -func (t *Table) block(idx int) (*block, error) { - y.AssertTruef(idx >= 0, "idx=%d", idx) - if idx >= t.noOfBlocks { - return nil, errors.New("block out of index") - } - if t.opt.BlockCache != nil { - key := t.blockCacheKey(idx) - blk, ok := t.opt.BlockCache.Get(key) - if ok && blk != nil { - return blk.(*block), nil - } - } - - // Read the block index if it's nil - ko := t.blockOffsets()[idx] - blk := &block{ - offset: int(ko.Offset), - } - var err error - if blk.data, err = t.read(blk.offset, int(ko.Len)); err != nil { - return nil, errors.Wrapf(err, - "failed to read from file: %s at offset: %d, len: %d", t.fd.Name(), blk.offset, ko.Len) - } - - if t.shouldDecrypt() { - // Decrypt the block if it is encrypted. - if blk.data, err = t.decrypt(blk.data); err != nil { - return nil, err - } - } - - blk.data, err = t.decompressData(blk.data) - if err != nil { - return nil, errors.Wrapf(err, - "failed to decode compressed data in file: %s at offset: %d, len: %d", - t.fd.Name(), blk.offset, ko.Len) - } - - // Read meta data related to block. - readPos := len(blk.data) - 4 // First read checksum length. - blk.chkLen = int(y.BytesToU32(blk.data[readPos : readPos+4])) - - // Checksum length greater than block size could happen if the table was compressed and - // it was opened with an incorrect compression algorithm (or the data was corrupted). - if blk.chkLen > len(blk.data) { - return nil, errors.New("invalid checksum length. Either the data is" + - "corrupted or the table options are incorrectly set") - } - - // Read checksum and store it - readPos -= blk.chkLen - blk.checksum = blk.data[readPos : readPos+blk.chkLen] - // Move back and read numEntries in the block. - readPos -= 4 - numEntries := int(y.BytesToU32(blk.data[readPos : readPos+4])) - entriesIndexStart := readPos - (numEntries * 4) - entriesIndexEnd := entriesIndexStart + numEntries*4 - - blk.entryOffsets = y.BytesToU32Slice(blk.data[entriesIndexStart:entriesIndexEnd]) - - blk.entriesIndexStart = entriesIndexStart - - // Drop checksum and checksum length. - // The checksum is calculated for actual data + entry index + index length - blk.data = blk.data[:readPos+4] - - // Verify checksum on if checksum verification mode is OnRead on OnStartAndRead. - if t.opt.ChkMode == options.OnBlockRead || t.opt.ChkMode == options.OnTableAndBlockRead { - if err = blk.verifyCheckSum(); err != nil { - return nil, err - } - } - if t.opt.BlockCache != nil { - key := t.blockCacheKey(idx) - t.opt.BlockCache.Set(key, blk, blk.size()) - } - return blk, nil -} - -// bfCacheKey returns the cache key for bloom filter. Bloom filters are stored in index cache. -func (t *Table) bfCacheKey() []byte { - y.AssertTrue(t.id < math.MaxUint32) - buf := make([]byte, 6) - // Without the "bf" prefix, we will have conflict with the blockCacheKey. - buf[0] = 'b' - buf[1] = 'f' - - binary.BigEndian.PutUint32(buf[2:], uint32(t.id)) - return buf -} - -// blockCacheKey is used to store blocks in the block cache. -func (t *Table) blockCacheKey(idx int) []byte { - y.AssertTrue(t.id < math.MaxUint32) - y.AssertTrue(uint32(idx) < math.MaxUint32) - - buf := make([]byte, 8) - // Assume t.ID does not overflow uint32. - binary.BigEndian.PutUint32(buf[:4], uint32(t.ID())) - binary.BigEndian.PutUint32(buf[4:], uint32(idx)) - return buf -} - -// blockOffsetsCacheKey returns the cache key for block offsets. blockOffsets -// are stored in the index cache. -func (t *Table) blockOffsetsCacheKey() uint64 { - return t.id -} - -// EstimatedSize returns the total size of key-values stored in this table (including the -// disk space occupied on the value log). -func (t *Table) EstimatedSize() uint64 { return t.estimatedSize } - -// Size is its file size in bytes -func (t *Table) Size() int64 { return int64(t.tableSize) } - -// Smallest is its smallest key, or nil if there are none -func (t *Table) Smallest() []byte { return t.smallest } - -// Biggest is its biggest key, or nil if there are none -func (t *Table) Biggest() []byte { return t.biggest } - -// Filename is NOT the file name. Just kidding, it is. -func (t *Table) Filename() string { return t.fd.Name() } - -// ID is the table's ID number (used to make the file name). -func (t *Table) ID() uint64 { return t.id } - -// DoesNotHave returns true if (but not "only if") the table does not have the key hash. -// It does a bloom filter lookup. -func (t *Table) DoesNotHave(hash uint64) bool { - // Return fast if the cache is absent. - if t.opt.IndexCache == nil { - t.bfLock.Lock() - if t.bf == nil { - y.AssertTrue(!t.opt.LoadBloomsOnOpen) - // Load bloomfilter into memory since the cache is absent. - t.bf, _ = t.readBloomFilter() - } - t.bfLock.Unlock() - return !t.bf.Has(hash) - } - - // Check if the bloom filter exists in the cache. - if bf, ok := t.opt.IndexCache.Get(t.bfCacheKey()); bf != nil && ok { - return !bf.(*z.Bloom).Has(hash) - } - - bf, sz := t.readBloomFilter() - t.opt.IndexCache.Set(t.bfCacheKey(), bf, int64(sz)) - return !bf.Has(hash) -} - -// readBloomFilter reads the bloom filter from the SST and returns its length -// along with the bloom filter. -func (t *Table) readBloomFilter() (*z.Bloom, int) { - // Read bloom filter from the SST. - index, err := t.readTableIndex() - y.Check(err) - - bf, err := z.JSONUnmarshal(index.BloomFilter) - y.Check(err) - return bf, len(index.BloomFilter) -} - -// readTableIndex reads table index from the sst and returns its pb format. -func (t *Table) readTableIndex() (*pb.TableIndex, error) { - data := t.readNoFail(t.indexStart, t.indexLen) - index := pb.TableIndex{} - var err error - // Decrypt the table index if it is encrypted. - if t.shouldDecrypt() { - if data, err = t.decrypt(data); err != nil { - return nil, y.Wrapf(err, - "Error while decrypting table index for the table %d in readTableIndex", t.id) - } - } - y.Check(proto.Unmarshal(data, &index)) - return &index, nil -} - -// VerifyChecksum verifies checksum for all blocks of table. This function is called by -// OpenTable() function. This function is also called inside levelsController.VerifyChecksum(). -func (t *Table) VerifyChecksum() error { - for i, os := range t.blockOffsets() { - b, err := t.block(i) - if err != nil { - return y.Wrapf(err, "checksum validation failed for table: %s, block: %d, offset:%d", - t.Filename(), i, os.Offset) - } - - // OnBlockRead or OnTableAndBlockRead, we don't need to call verify checksum - // on block, verification would be done while reading block itself. - if !(t.opt.ChkMode == options.OnBlockRead || t.opt.ChkMode == options.OnTableAndBlockRead) { - if err = b.verifyCheckSum(); err != nil { - return y.Wrapf(err, - "checksum validation failed for table: %s, block: %d, offset:%d", - t.Filename(), i, os.Offset) - } - } - } - - return nil -} - -// shouldDecrypt tells whether to decrypt or not. We decrypt only if the datakey exist -// for the table. -func (t *Table) shouldDecrypt() bool { - return t.opt.DataKey != nil -} - -// KeyID returns data key id. -func (t *Table) KeyID() uint64 { - if t.opt.DataKey != nil { - return t.opt.DataKey.KeyId - } - // By default it's 0, if it is plain text. - return 0 -} - -// decrypt decrypts the given data. It should be called only after checking shouldDecrypt. -func (t *Table) decrypt(data []byte) ([]byte, error) { - // Last BlockSize bytes of the data is the IV. - iv := data[len(data)-aes.BlockSize:] - // Rest all bytes are data. - data = data[:len(data)-aes.BlockSize] - return y.XORBlock(data, t.opt.DataKey.Data, iv) -} - -// ParseFileID reads the file id out of a filename. -func ParseFileID(name string) (uint64, bool) { - name = path.Base(name) - if !strings.HasSuffix(name, fileSuffix) { - return 0, false - } - // suffix := name[len(fileSuffix):] - name = strings.TrimSuffix(name, fileSuffix) - id, err := strconv.Atoi(name) - if err != nil { - return 0, false - } - y.AssertTrue(id >= 0) - return uint64(id), true -} - -// IDToFilename does the inverse of ParseFileID -func IDToFilename(id uint64) string { - return fmt.Sprintf("%06d", id) + fileSuffix -} - -// NewFilename should be named TableFilepath -- it combines the dir with the ID to make a table -// filepath. -func NewFilename(id uint64, dir string) string { - return filepath.Join(dir, IDToFilename(id)) -} - -// decompressData decompresses the given data. -func (t *Table) decompressData(data []byte) ([]byte, error) { - switch t.opt.Compression { - case options.None: - return data, nil - case options.Snappy: - return snappy.Decode(nil, data) - case options.ZSTD: - return y.ZSTDDecompress(nil, data) - } - return nil, errors.New("Unsupported compression type") -} diff --git a/vendor/github.com/dgraph-io/badger/v2/test.sh b/vendor/github.com/dgraph-io/badger/v2/test.sh deleted file mode 100644 index 06c9396c..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/test.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -set -e - -go version - -packages=$(go list ./... | grep github.com/dgraph-io/badger/v2/) - -if [[ ! -z "$TEAMCITY_VERSION" ]]; then - export GOFLAGS="-json" -fi - -# Ensure that we can compile the binary. -pushd badger -go build -v . -popd - -# Run the memory intensive tests first. -go test -v -run='TestBigKeyValuePairs$' --manual=true -go test -v -run='TestPushValueLogLimit' --manual=true - -# Run the special Truncate test. -rm -rf p -go test -v -run='TestTruncateVlogNoClose$' --manual=true -truncate --size=4096 p/000000.vlog -go test -v -run='TestTruncateVlogNoClose2$' --manual=true -go test -v -run='TestTruncateVlogNoClose3$' --manual=true -rm -rf p - -# Then the normal tests. -echo -echo "==> Starting test for table, skl and y package" -go test -v -race github.com/dgraph-io/badger/v2/skl -# Run test for all package except the top level package. The top level package support the -# `vlog_mmap` flag which rest of the packages don't support. -go test -v -race $packages - -echo -echo "==> Starting tests with value log mmapped..." -# Run top level package tests with mmap flag. -go test -timeout=25m -v -race github.com/dgraph-io/badger/v2 --vlog_mmap=true - -echo -echo "==> Starting tests with value log not mmapped..." -go test -timeout=25m -v -race github.com/dgraph-io/badger/v2 --vlog_mmap=false - diff --git a/vendor/github.com/dgraph-io/badger/v2/trie/trie.go b/vendor/github.com/dgraph-io/badger/v2/trie/trie.go deleted file mode 100644 index 98e4a9dc..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/trie/trie.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package trie - -type node struct { - children map[byte]*node - ids []uint64 -} - -func newNode() *node { - return &node{ - children: make(map[byte]*node), - ids: []uint64{}, - } -} - -// Trie datastructure. -type Trie struct { - root *node -} - -// NewTrie returns Trie. -func NewTrie() *Trie { - return &Trie{ - root: newNode(), - } -} - -// Add adds the id in the trie for the given prefix path. -func (t *Trie) Add(prefix []byte, id uint64) { - node := t.root - for _, val := range prefix { - child, ok := node.children[val] - if !ok { - child = newNode() - node.children[val] = child - } - node = child - } - // We only need to add the id to the last node of the given prefix. - node.ids = append(node.ids, id) -} - -// Get returns prefix matched ids for the given key. -func (t *Trie) Get(key []byte) map[uint64]struct{} { - out := make(map[uint64]struct{}) - node := t.root - // If root has ids that means we have subscribers for "nil/[]byte{}" - // prefix. Add them to the list. - if len(node.ids) > 0 { - for _, i := range node.ids { - out[i] = struct{}{} - } - } - for _, val := range key { - child, ok := node.children[val] - if !ok { - break - } - // We need ids of the all the node in the matching key path. - for _, id := range child.ids { - out[id] = struct{}{} - } - node = child - } - return out -} - -// Delete will delete the id if the id exist in the given index path. -func (t *Trie) Delete(index []byte, id uint64) { - node := t.root - for _, val := range index { - child, ok := node.children[val] - if !ok { - return - } - node = child - } - // We're just removing the id not the hanging path. - out := node.ids[:0] - for _, val := range node.ids { - if val != id { - out = append(out, val) - } - } - for i := len(out); i < len(node.ids); i++ { - node.ids[i] = 0 // garbage collecting - } - node.ids = out -} diff --git a/vendor/github.com/dgraph-io/badger/v2/txn.go b/vendor/github.com/dgraph-io/badger/v2/txn.go deleted file mode 100644 index 8fc5381e..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/txn.go +++ /dev/null @@ -1,823 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bytes" - "context" - "encoding/hex" - "math" - "sort" - "strconv" - "sync" - "sync/atomic" - - "github.com/dgraph-io/badger/v2/y" - "github.com/dgraph-io/ristretto/z" - "github.com/pkg/errors" -) - -type oracle struct { - isManaged bool // Does not change value, so no locking required. - detectConflicts bool // Determines if the txns should be checked for conflicts. - - sync.Mutex // For nextTxnTs and commits. - // writeChLock lock is for ensuring that transactions go to the write - // channel in the same order as their commit timestamps. - writeChLock sync.Mutex - nextTxnTs uint64 - - // Used to block NewTransaction, so all previous commits are visible to a new read. - txnMark *y.WaterMark - - // Either of these is used to determine which versions can be permanently - // discarded during compaction. - discardTs uint64 // Used by ManagedDB. - readMark *y.WaterMark // Used by DB. - - // committedTxns contains all committed writes (contains fingerprints - // of keys written and their latest commit counter). - committedTxns []committedTxn - lastCleanupTs uint64 - - // closer is used to stop watermarks. - closer *y.Closer -} - -type committedTxn struct { - ts uint64 - // ConflictKeys Keeps track of the entries written at timestamp ts. - conflictKeys map[uint64]struct{} -} - -func newOracle(opt Options) *oracle { - orc := &oracle{ - isManaged: opt.managedTxns, - detectConflicts: opt.DetectConflicts, - // We're not initializing nextTxnTs and readOnlyTs. It would be done after replay in Open. - // - // WaterMarks must be 64-bit aligned for atomic package, hence we must use pointers here. - // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG. - readMark: &y.WaterMark{Name: "badger.PendingReads"}, - txnMark: &y.WaterMark{Name: "badger.TxnTimestamp"}, - closer: y.NewCloser(2), - } - orc.readMark.Init(orc.closer) - orc.txnMark.Init(orc.closer) - return orc -} - -func (o *oracle) Stop() { - o.closer.SignalAndWait() -} - -func (o *oracle) readTs() uint64 { - if o.isManaged { - panic("ReadTs should not be retrieved for managed DB") - } - - var readTs uint64 - o.Lock() - readTs = o.nextTxnTs - 1 - o.readMark.Begin(readTs) - o.Unlock() - - // Wait for all txns which have no conflicts, have been assigned a commit - // timestamp and are going through the write to value log and LSM tree - // process. Not waiting here could mean that some txns which have been - // committed would not be read. - y.Check(o.txnMark.WaitForMark(context.Background(), readTs)) - return readTs -} - -func (o *oracle) nextTs() uint64 { - o.Lock() - defer o.Unlock() - return o.nextTxnTs -} - -func (o *oracle) incrementNextTs() { - o.Lock() - defer o.Unlock() - o.nextTxnTs++ -} - -// Any deleted or invalid versions at or below ts would be discarded during -// compaction to reclaim disk space in LSM tree and thence value log. -func (o *oracle) setDiscardTs(ts uint64) { - o.Lock() - defer o.Unlock() - o.discardTs = ts - o.cleanupCommittedTransactions() -} - -func (o *oracle) discardAtOrBelow() uint64 { - if o.isManaged { - o.Lock() - defer o.Unlock() - return o.discardTs - } - return o.readMark.DoneUntil() -} - -// hasConflict must be called while having a lock. -func (o *oracle) hasConflict(txn *Txn) bool { - if len(txn.reads) == 0 { - return false - } - for _, committedTxn := range o.committedTxns { - // If the committedTxn.ts is less than txn.readTs that implies that the - // committedTxn finished before the current transaction started. - // We don't need to check for conflict in that case. - // This change assumes linearizability. Lack of linearizability could - // cause the read ts of a new txn to be lower than the commit ts of - // a txn before it (@mrjn). - if committedTxn.ts <= txn.readTs { - continue - } - - for _, ro := range txn.reads { - if _, has := committedTxn.conflictKeys[ro]; has { - return true - } - } - } - - return false -} - -func (o *oracle) newCommitTs(txn *Txn) uint64 { - o.Lock() - defer o.Unlock() - - if o.hasConflict(txn) { - return 0 - } - - var ts uint64 - if !o.isManaged { - o.doneRead(txn) - o.cleanupCommittedTransactions() - - // This is the general case, when user doesn't specify the read and commit ts. - ts = o.nextTxnTs - o.nextTxnTs++ - o.txnMark.Begin(ts) - - } else { - // If commitTs is set, use it instead. - ts = txn.commitTs - } - - y.AssertTrue(ts >= o.lastCleanupTs) - - if o.detectConflicts { - // We should ensure that txns are not added to o.committedTxns slice when - // conflict detection is disabled otherwise this slice would keep growing. - o.committedTxns = append(o.committedTxns, committedTxn{ - ts: ts, - conflictKeys: txn.conflictKeys, - }) - } - - return ts -} - -func (o *oracle) doneRead(txn *Txn) { - if !txn.doneRead { - txn.doneRead = true - o.readMark.Done(txn.readTs) - } -} - -func (o *oracle) cleanupCommittedTransactions() { // Must be called under o.Lock - if !o.detectConflicts { - // When detectConflicts is set to false, we do not store any - // committedTxns and so there's nothing to clean up. - return - } - // Same logic as discardAtOrBelow but unlocked - var maxReadTs uint64 - if o.isManaged { - maxReadTs = o.discardTs - } else { - maxReadTs = o.readMark.DoneUntil() - } - - y.AssertTrue(maxReadTs >= o.lastCleanupTs) - - // do not run clean up if the maxReadTs (read timestamp of the - // oldest transaction that is still in flight) has not increased - if maxReadTs == o.lastCleanupTs { - return - } - o.lastCleanupTs = maxReadTs - - tmp := o.committedTxns[:0] - for _, txn := range o.committedTxns { - if txn.ts <= maxReadTs { - continue - } - tmp = append(tmp, txn) - } - o.committedTxns = tmp -} - -func (o *oracle) doneCommit(cts uint64) { - if o.isManaged { - // No need to update anything. - return - } - o.txnMark.Done(cts) -} - -// Txn represents a Badger transaction. -type Txn struct { - readTs uint64 - commitTs uint64 - - update bool // update is used to conditionally keep track of reads. - reads []uint64 // contains fingerprints of keys read. - // contains fingerprints of keys written. This is used for conflict detection. - conflictKeys map[uint64]struct{} - readsLock sync.Mutex // guards the reads slice. See addReadKey. - - pendingWrites map[string]*Entry // cache stores any writes done by txn. - duplicateWrites []*Entry // Used in managed mode to store duplicate entries. - - db *DB - discarded bool - doneRead bool - - size int64 - count int64 - numIterators int32 -} - -type pendingWritesIterator struct { - entries []*Entry - nextIdx int - readTs uint64 - reversed bool -} - -func (pi *pendingWritesIterator) Next() { - pi.nextIdx++ -} - -func (pi *pendingWritesIterator) Rewind() { - pi.nextIdx = 0 -} - -func (pi *pendingWritesIterator) Seek(key []byte) { - key = y.ParseKey(key) - pi.nextIdx = sort.Search(len(pi.entries), func(idx int) bool { - cmp := bytes.Compare(pi.entries[idx].Key, key) - if !pi.reversed { - return cmp >= 0 - } - return cmp <= 0 - }) -} - -func (pi *pendingWritesIterator) Key() []byte { - y.AssertTrue(pi.Valid()) - entry := pi.entries[pi.nextIdx] - return y.KeyWithTs(entry.Key, pi.readTs) -} - -func (pi *pendingWritesIterator) Value() y.ValueStruct { - y.AssertTrue(pi.Valid()) - entry := pi.entries[pi.nextIdx] - return y.ValueStruct{ - Value: entry.Value, - Meta: entry.meta, - UserMeta: entry.UserMeta, - ExpiresAt: entry.ExpiresAt, - Version: pi.readTs, - } -} - -func (pi *pendingWritesIterator) Valid() bool { - return pi.nextIdx < len(pi.entries) -} - -func (pi *pendingWritesIterator) Close() error { - return nil -} - -func (txn *Txn) newPendingWritesIterator(reversed bool) *pendingWritesIterator { - if !txn.update || len(txn.pendingWrites) == 0 { - return nil - } - entries := make([]*Entry, 0, len(txn.pendingWrites)) - for _, e := range txn.pendingWrites { - entries = append(entries, e) - } - // Number of pending writes per transaction shouldn't be too big in general. - sort.Slice(entries, func(i, j int) bool { - cmp := bytes.Compare(entries[i].Key, entries[j].Key) - if !reversed { - return cmp < 0 - } - return cmp > 0 - }) - return &pendingWritesIterator{ - readTs: txn.readTs, - entries: entries, - reversed: reversed, - } -} - -func (txn *Txn) checkSize(e *Entry) error { - count := txn.count + 1 - // Extra bytes for the version in key. - size := txn.size + int64(e.estimateSize(txn.db.opt.ValueThreshold)) + 10 - if count >= txn.db.opt.maxBatchCount || size >= txn.db.opt.maxBatchSize { - return ErrTxnTooBig - } - txn.count, txn.size = count, size - return nil -} - -func exceedsSize(prefix string, max int64, key []byte) error { - return errors.Errorf("%s with size %d exceeded %d limit. %s:\n%s", - prefix, len(key), max, prefix, hex.Dump(key[:1<<10])) -} - -func (txn *Txn) modify(e *Entry) error { - const maxKeySize = 65000 - - switch { - case !txn.update: - return ErrReadOnlyTxn - case txn.discarded: - return ErrDiscardedTxn - case len(e.Key) == 0: - return ErrEmptyKey - case bytes.HasPrefix(e.Key, badgerPrefix): - return ErrInvalidKey - case len(e.Key) > maxKeySize: - // Key length can't be more than uint16, as determined by table::header. To - // keep things safe and allow badger move prefix and a timestamp suffix, let's - // cut it down to 65000, instead of using 65536. - return exceedsSize("Key", maxKeySize, e.Key) - case int64(len(e.Value)) > txn.db.opt.ValueLogFileSize: - return exceedsSize("Value", txn.db.opt.ValueLogFileSize, e.Value) - case txn.db.opt.InMemory && len(e.Value) > txn.db.opt.ValueThreshold: - return exceedsSize("Value", int64(txn.db.opt.ValueThreshold), e.Value) - } - - if err := txn.checkSize(e); err != nil { - return err - } - - // The txn.conflictKeys is used for conflict detection. If conflict detection - // is disabled, we don't need to store key hashes in this map. - if txn.db.opt.DetectConflicts { - fp := z.MemHash(e.Key) // Avoid dealing with byte arrays. - txn.conflictKeys[fp] = struct{}{} - } - // If a duplicate entry was inserted in managed mode, move it to the duplicate writes slice. - // Add the entry to duplicateWrites only if both the entries have different versions. For - // same versions, we will overwrite the existing entry. - if oldEntry, ok := txn.pendingWrites[string(e.Key)]; ok && oldEntry.version != e.version { - txn.duplicateWrites = append(txn.duplicateWrites, oldEntry) - } - txn.pendingWrites[string(e.Key)] = e - return nil -} - -// Set adds a key-value pair to the database. -// It will return ErrReadOnlyTxn if update flag was set to false when creating the transaction. -// -// The current transaction keeps a reference to the key and val byte slice -// arguments. Users must not modify key and val until the end of the transaction. -func (txn *Txn) Set(key, val []byte) error { - return txn.SetEntry(NewEntry(key, val)) -} - -// SetEntry takes an Entry struct and adds the key-value pair in the struct, -// along with other metadata to the database. -// -// The current transaction keeps a reference to the entry passed in argument. -// Users must not modify the entry until the end of the transaction. -func (txn *Txn) SetEntry(e *Entry) error { - return txn.modify(e) -} - -// Delete deletes a key. -// -// This is done by adding a delete marker for the key at commit timestamp. Any -// reads happening before this timestamp would be unaffected. Any reads after -// this commit would see the deletion. -// -// The current transaction keeps a reference to the key byte slice argument. -// Users must not modify the key until the end of the transaction. -func (txn *Txn) Delete(key []byte) error { - e := &Entry{ - Key: key, - meta: bitDelete, - } - return txn.modify(e) -} - -// Get looks for key and returns corresponding Item. -// If key is not found, ErrKeyNotFound is returned. -func (txn *Txn) Get(key []byte) (item *Item, rerr error) { - if len(key) == 0 { - return nil, ErrEmptyKey - } else if txn.discarded { - return nil, ErrDiscardedTxn - } - - item = new(Item) - if txn.update { - if e, has := txn.pendingWrites[string(key)]; has && bytes.Equal(key, e.Key) { - if isDeletedOrExpired(e.meta, e.ExpiresAt) { - return nil, ErrKeyNotFound - } - // Fulfill from cache. - item.meta = e.meta - item.val = e.Value - item.userMeta = e.UserMeta - item.key = key - item.status = prefetched - item.version = txn.readTs - item.expiresAt = e.ExpiresAt - // We probably don't need to set db on item here. - return item, nil - } - // Only track reads if this is update txn. No need to track read if txn serviced it - // internally. - txn.addReadKey(key) - } - - seek := y.KeyWithTs(key, txn.readTs) - vs, err := txn.db.get(seek) - if err != nil { - return nil, errors.Wrapf(err, "DB::Get key: %q", key) - } - if vs.Value == nil && vs.Meta == 0 { - return nil, ErrKeyNotFound - } - if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { - return nil, ErrKeyNotFound - } - - item.key = key - item.version = vs.Version - item.meta = vs.Meta - item.userMeta = vs.UserMeta - item.db = txn.db - item.vptr = y.SafeCopy(item.vptr, vs.Value) - item.txn = txn - item.expiresAt = vs.ExpiresAt - return item, nil -} - -func (txn *Txn) addReadKey(key []byte) { - if txn.update { - fp := z.MemHash(key) - - // Because of the possibility of multiple iterators it is now possible - // for multiple threads within a read-write transaction to read keys at - // the same time. The reads slice is not currently thread-safe and - // needs to be locked whenever we mark a key as read. - txn.readsLock.Lock() - txn.reads = append(txn.reads, fp) - txn.readsLock.Unlock() - } -} - -// Discard discards a created transaction. This method is very important and must be called. Commit -// method calls this internally, however, calling this multiple times doesn't cause any issues. So, -// this can safely be called via a defer right when transaction is created. -// -// NOTE: If any operations are run on a discarded transaction, ErrDiscardedTxn is returned. -func (txn *Txn) Discard() { - if txn.discarded { // Avoid a re-run. - return - } - if atomic.LoadInt32(&txn.numIterators) > 0 { - panic("Unclosed iterator at time of Txn.Discard.") - } - txn.discarded = true - if !txn.db.orc.isManaged { - txn.db.orc.doneRead(txn) - } -} - -func (txn *Txn) commitAndSend() (func() error, error) { - orc := txn.db.orc - // Ensure that the order in which we get the commit timestamp is the same as - // the order in which we push these updates to the write channel. So, we - // acquire a writeChLock before getting a commit timestamp, and only release - // it after pushing the entries to it. - orc.writeChLock.Lock() - defer orc.writeChLock.Unlock() - - commitTs := orc.newCommitTs(txn) - // The commitTs can be zero if the transaction is running in managed mode. - // Individual entries might have their own timestamps. - if commitTs == 0 && !txn.db.opt.managedTxns { - return nil, ErrConflict - } - - keepTogether := true - setVersion := func(e *Entry) { - if e.version == 0 { - e.version = commitTs - } else { - keepTogether = false - } - } - for _, e := range txn.pendingWrites { - setVersion(e) - } - // The duplicateWrites slice will be non-empty only if there are duplicate - // entries with different versions. - for _, e := range txn.duplicateWrites { - setVersion(e) - } - - entries := make([]*Entry, 0, len(txn.pendingWrites)+len(txn.duplicateWrites)+1) - - processEntry := func(e *Entry) { - // Suffix the keys with commit ts, so the key versions are sorted in - // descending order of commit timestamp. - e.Key = y.KeyWithTs(e.Key, e.version) - // Add bitTxn only if these entries are part of a transaction. We - // support SetEntryAt(..) in managed mode which means a single - // transaction can have entries with different timestamps. If entries - // in a single transaction have different timestamps, we don't add the - // transaction markers. - if keepTogether { - e.meta |= bitTxn - } - entries = append(entries, e) - } - - // The following debug information is what led to determining the cause of - // bank txn violation bug, and it took a whole bunch of effort to narrow it - // down to here. So, keep this around for at least a couple of months. - // var b strings.Builder - // fmt.Fprintf(&b, "Read: %d. Commit: %d. reads: %v. writes: %v. Keys: ", - // txn.readTs, commitTs, txn.reads, txn.conflictKeys) - for _, e := range txn.pendingWrites { - processEntry(e) - } - for _, e := range txn.duplicateWrites { - processEntry(e) - } - - if keepTogether { - // CommitTs should not be zero if we're inserting transaction markers. - y.AssertTrue(commitTs != 0) - e := &Entry{ - Key: y.KeyWithTs(txnKey, commitTs), - Value: []byte(strconv.FormatUint(commitTs, 10)), - meta: bitFinTxn, - } - entries = append(entries, e) - } - - req, err := txn.db.sendToWriteCh(entries) - if err != nil { - orc.doneCommit(commitTs) - return nil, err - } - ret := func() error { - err := req.Wait() - // Wait before marking commitTs as done. - // We can't defer doneCommit above, because it is being called from a - // callback here. - orc.doneCommit(commitTs) - return err - } - return ret, nil -} - -func (txn *Txn) commitPrecheck() error { - if txn.discarded { - return errors.New("Trying to commit a discarded txn") - } - keepTogether := true - for _, e := range txn.pendingWrites { - if e.version != 0 { - keepTogether = false - } - } - - // If keepTogether is True, it implies transaction markers will be added. - // In that case, commitTs should not be never be zero. This might happen if - // someone uses txn.Commit instead of txn.CommitAt in managed mode. This - // should happen only in managed mode. In normal mode, keepTogether will - // always be true. - if keepTogether && txn.db.opt.managedTxns && txn.commitTs == 0 { - return errors.New("CommitTs cannot be zero. Please use commitAt instead") - } - return nil -} - -// Commit commits the transaction, following these steps: -// -// 1. If there are no writes, return immediately. -// -// 2. Check if read rows were updated since txn started. If so, return ErrConflict. -// -// 3. If no conflict, generate a commit timestamp and update written rows' commit ts. -// -// 4. Batch up all writes, write them to value log and LSM tree. -// -// 5. If callback is provided, Badger will return immediately after checking -// for conflicts. Writes to the database will happen in the background. If -// there is a conflict, an error will be returned and the callback will not -// run. If there are no conflicts, the callback will be called in the -// background upon successful completion of writes or any error during write. -// -// If error is nil, the transaction is successfully committed. In case of a non-nil error, the LSM -// tree won't be updated, so there's no need for any rollback. -func (txn *Txn) Commit() error { - // txn.conflictKeys can be zero if conflict detection is turned off. So we - // should check txn.pendingWrites. - if len(txn.pendingWrites) == 0 { - return nil // Nothing to do. - } - // Precheck before discarding txn. - if err := txn.commitPrecheck(); err != nil { - return err - } - defer txn.Discard() - - txnCb, err := txn.commitAndSend() - if err != nil { - return err - } - // If batchSet failed, LSM would not have been updated. So, no need to rollback anything. - - // TODO: What if some of the txns successfully make it to value log, but others fail. - // Nothing gets updated to LSM, until a restart happens. - return txnCb() -} - -type txnCb struct { - commit func() error - user func(error) - err error -} - -func runTxnCallback(cb *txnCb) { - switch { - case cb == nil: - panic("txn callback is nil") - case cb.user == nil: - panic("Must have caught a nil callback for txn.CommitWith") - case cb.err != nil: - cb.user(cb.err) - case cb.commit != nil: - err := cb.commit() - cb.user(err) - default: - cb.user(nil) - } -} - -// CommitWith acts like Commit, but takes a callback, which gets run via a -// goroutine to avoid blocking this function. The callback is guaranteed to run, -// so it is safe to increment sync.WaitGroup before calling CommitWith, and -// decrementing it in the callback; to block until all callbacks are run. -func (txn *Txn) CommitWith(cb func(error)) { - if cb == nil { - panic("Nil callback provided to CommitWith") - } - - if len(txn.pendingWrites) == 0 { - // Do not run these callbacks from here, because the CommitWith and the - // callback might be acquiring the same locks. Instead run the callback - // from another goroutine. - go runTxnCallback(&txnCb{user: cb, err: nil}) - return - } - - // Precheck before discarding txn. - if err := txn.commitPrecheck(); err != nil { - cb(err) - return - } - - defer txn.Discard() - - commitCb, err := txn.commitAndSend() - if err != nil { - go runTxnCallback(&txnCb{user: cb, err: err}) - return - } - - go runTxnCallback(&txnCb{user: cb, commit: commitCb}) -} - -// ReadTs returns the read timestamp of the transaction. -func (txn *Txn) ReadTs() uint64 { - return txn.readTs -} - -// NewTransaction creates a new transaction. Badger supports concurrent execution of transactions, -// providing serializable snapshot isolation, avoiding write skews. Badger achieves this by tracking -// the keys read and at Commit time, ensuring that these read keys weren't concurrently modified by -// another transaction. -// -// For read-only transactions, set update to false. In this mode, we don't track the rows read for -// any changes. Thus, any long running iterations done in this mode wouldn't pay this overhead. -// -// Running transactions concurrently is OK. However, a transaction itself isn't thread safe, and -// should only be run serially. It doesn't matter if a transaction is created by one goroutine and -// passed down to other, as long as the Txn APIs are called serially. -// -// When you create a new transaction, it is absolutely essential to call -// Discard(). This should be done irrespective of what the update param is set -// to. Commit API internally runs Discard, but running it twice wouldn't cause -// any issues. -// -// txn := db.NewTransaction(false) -// defer txn.Discard() -// // Call various APIs. -func (db *DB) NewTransaction(update bool) *Txn { - return db.newTransaction(update, false) -} - -func (db *DB) newTransaction(update, isManaged bool) *Txn { - if db.opt.ReadOnly && update { - // DB is read-only, force read-only transaction. - update = false - } - - txn := &Txn{ - update: update, - db: db, - count: 1, // One extra entry for BitFin. - size: int64(len(txnKey) + 10), // Some buffer for the extra entry. - } - if update { - if db.opt.DetectConflicts { - txn.conflictKeys = make(map[uint64]struct{}) - } - txn.pendingWrites = make(map[string]*Entry) - } - if !isManaged { - txn.readTs = db.orc.readTs() - } - return txn -} - -// View executes a function creating and managing a read-only transaction for the user. Error -// returned by the function is relayed by the View method. -// If View is used with managed transactions, it would assume a read timestamp of MaxUint64. -func (db *DB) View(fn func(txn *Txn) error) error { - if db.IsClosed() { - return ErrDBClosed - } - var txn *Txn - if db.opt.managedTxns { - txn = db.NewTransactionAt(math.MaxUint64, false) - } else { - txn = db.NewTransaction(false) - } - defer txn.Discard() - - return fn(txn) -} - -// Update executes a function, creating and managing a read-write transaction -// for the user. Error returned by the function is relayed by the Update method. -// Update cannot be used with managed transactions. -func (db *DB) Update(fn func(txn *Txn) error) error { - if db.IsClosed() { - return ErrDBClosed - } - if db.opt.managedTxns { - panic("Update can only be used with managedDB=false.") - } - txn := db.NewTransaction(true) - defer txn.Discard() - - if err := fn(txn); err != nil { - return err - } - - return txn.Commit() -} diff --git a/vendor/github.com/dgraph-io/badger/v2/util.go b/vendor/github.com/dgraph-io/badger/v2/util.go deleted file mode 100644 index ccf7939f..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/util.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "encoding/hex" - "io/ioutil" - "math/rand" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/v2/table" - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" -) - -func (s *levelsController) validate() error { - for _, l := range s.levels { - if err := l.validate(); err != nil { - return errors.Wrap(err, "Levels Controller") - } - } - return nil -} - -// Check does some sanity check on one level of data or in-memory index. -func (s *levelHandler) validate() error { - if s.level == 0 { - return nil - } - - s.RLock() - defer s.RUnlock() - numTables := len(s.tables) - for j := 1; j < numTables; j++ { - if j >= len(s.tables) { - return errors.Errorf("Level %d, j=%d numTables=%d", s.level, j, numTables) - } - - if y.CompareKeys(s.tables[j-1].Biggest(), s.tables[j].Smallest()) >= 0 { - return errors.Errorf( - "Inter: Biggest(j-1) \n%s\n vs Smallest(j): \n%s\n: level=%d j=%d numTables=%d", - hex.Dump(s.tables[j-1].Biggest()), hex.Dump(s.tables[j].Smallest()), - s.level, j, numTables) - } - - if y.CompareKeys(s.tables[j].Smallest(), s.tables[j].Biggest()) > 0 { - return errors.Errorf( - "Intra: \n%s\n vs \n%s\n: level=%d j=%d numTables=%d", - hex.Dump(s.tables[j].Smallest()), hex.Dump(s.tables[j].Biggest()), s.level, j, numTables) - } - } - return nil -} - -// func (s *KV) debugPrintMore() { s.lc.debugPrintMore() } - -// // debugPrintMore shows key ranges of each level. -// func (s *levelsController) debugPrintMore() { -// s.Lock() -// defer s.Unlock() -// for i := 0; i < s.kv.opt.MaxLevels; i++ { -// s.levels[i].debugPrintMore() -// } -// } - -// func (s *levelHandler) debugPrintMore() { -// s.RLock() -// defer s.RUnlock() -// s.elog.Printf("Level %d:", s.level) -// for _, t := range s.tables { -// y.Printf(" [%s, %s]", t.Smallest(), t.Biggest()) -// } -// y.Printf("\n") -// } - -// reserveFileID reserves a unique file id. -func (s *levelsController) reserveFileID() uint64 { - id := atomic.AddUint64(&s.nextFileID, 1) - return id - 1 -} - -func getIDMap(dir string) map[uint64]struct{} { - fileInfos, err := ioutil.ReadDir(dir) - y.Check(err) - idMap := make(map[uint64]struct{}) - for _, info := range fileInfos { - if info.IsDir() { - continue - } - fileID, ok := table.ParseFileID(info.Name()) - if !ok { - continue - } - idMap[fileID] = struct{}{} - } - return idMap -} - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/value.go b/vendor/github.com/dgraph-io/badger/v2/value.go deleted file mode 100644 index 08653e1c..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/value.go +++ /dev/null @@ -1,2022 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bufio" - "bytes" - "crypto/aes" - cryptorand "crypto/rand" - "encoding/binary" - "encoding/json" - "fmt" - "hash" - "hash/crc32" - "io" - "io/ioutil" - "math" - "math/rand" - "os" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/v2/options" - "github.com/dgraph-io/badger/v2/pb" - "github.com/dgraph-io/badger/v2/y" - "github.com/pkg/errors" - "golang.org/x/net/trace" -) - -// maxVlogFileSize is the maximum size of the vlog file which can be created. Vlog Offset is of -// uint32, so limiting at max uint32. -var maxVlogFileSize uint32 = math.MaxUint32 - -// Values have their first byte being byteData or byteDelete. This helps us distinguish between -// a key that has never been seen and a key that has been explicitly deleted. -const ( - bitDelete byte = 1 << 0 // Set if the key has been deleted. - bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key. - bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded. - // Set if item shouldn't be discarded via compactions (used by merge operator) - bitMergeEntry byte = 1 << 3 - // The MSB 2 bits are for transactions. - bitTxn byte = 1 << 6 // Set if the entry is part of a txn. - bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log. - - mi int64 = 1 << 20 - - // The number of updates after which discard map should be flushed into badger. - discardStatsFlushThreshold = 100 - - // size of vlog header. - // +----------------+------------------+ - // | keyID(8 bytes) | baseIV(12 bytes)| - // +----------------+------------------+ - vlogHeaderSize = 20 -) - -type logFile struct { - path string - // This is a lock on the log file. It guards the fd’s value, the file’s - // existence and the file’s memory map. - // - // Use shared ownership when reading/writing the file or memory map, use - // exclusive ownership to open/close the descriptor, unmap or remove the file. - lock sync.RWMutex - fd *os.File - fid uint32 - fmap []byte - size uint32 - loadingMode options.FileLoadingMode - dataKey *pb.DataKey - baseIV []byte - registry *KeyRegistry -} - -// encodeEntry will encode entry to the buf -// layout of entry -// +--------+-----+-------+-------+ -// | header | key | value | crc32 | -// +--------+-----+-------+-------+ -func (lf *logFile) encodeEntry(e *Entry, buf *bytes.Buffer, offset uint32) (int, error) { - h := header{ - klen: uint32(len(e.Key)), - vlen: uint32(len(e.Value)), - expiresAt: e.ExpiresAt, - meta: e.meta, - userMeta: e.UserMeta, - } - - // encode header. - var headerEnc [maxHeaderSize]byte - sz := h.Encode(headerEnc[:]) - y.Check2(buf.Write(headerEnc[:sz])) - // write hash. - hash := crc32.New(y.CastagnoliCrcTable) - y.Check2(hash.Write(headerEnc[:sz])) - // we'll encrypt only key and value. - if lf.encryptionEnabled() { - // TODO: no need to allocate the bytes. we can calculate the encrypted buf one by one - // since we're using ctr mode of AES encryption. Ordering won't changed. Need some - // refactoring in XORBlock which will work like stream cipher. - eBuf := make([]byte, 0, len(e.Key)+len(e.Value)) - eBuf = append(eBuf, e.Key...) - eBuf = append(eBuf, e.Value...) - var err error - eBuf, err = y.XORBlock(eBuf, lf.dataKey.Data, lf.generateIV(offset)) - if err != nil { - return 0, y.Wrapf(err, "Error while encoding entry for vlog.") - } - // write encrypted buf. - y.Check2(buf.Write(eBuf)) - // write the hash. - y.Check2(hash.Write(eBuf)) - } else { - // Encryption is disabled so writing directly to the buffer. - // write key. - y.Check2(buf.Write(e.Key)) - // write key hash. - y.Check2(hash.Write(e.Key)) - // write value. - y.Check2(buf.Write(e.Value)) - // write value hash. - y.Check2(hash.Write(e.Value)) - } - // write crc32 hash. - var crcBuf [crc32.Size]byte - binary.BigEndian.PutUint32(crcBuf[:], hash.Sum32()) - y.Check2(buf.Write(crcBuf[:])) - // return encoded length. - return len(headerEnc[:sz]) + len(e.Key) + len(e.Value) + len(crcBuf), nil -} - -func (lf *logFile) decodeEntry(buf []byte, offset uint32) (*Entry, error) { - var h header - hlen := h.Decode(buf) - kv := buf[hlen:] - if lf.encryptionEnabled() { - var err error - // No need to worry about mmap. because, XORBlock allocates a byte array to do the - // xor. So, the given slice is not being mutated. - if kv, err = lf.decryptKV(kv, offset); err != nil { - return nil, err - } - } - e := &Entry{ - meta: h.meta, - UserMeta: h.userMeta, - ExpiresAt: h.expiresAt, - offset: offset, - Key: kv[:h.klen], - Value: kv[h.klen : h.klen+h.vlen], - } - return e, nil -} - -func (lf *logFile) decryptKV(buf []byte, offset uint32) ([]byte, error) { - return y.XORBlock(buf, lf.dataKey.Data, lf.generateIV(offset)) -} - -// KeyID returns datakey's ID. -func (lf *logFile) keyID() uint64 { - if lf.dataKey == nil { - // If there is no datakey, then we'll return 0. Which means no encryption. - return 0 - } - return lf.dataKey.KeyId -} - -func (lf *logFile) mmap(size int64) (err error) { - if lf.loadingMode != options.MemoryMap { - // Nothing to do - return nil - } - lf.fmap, err = y.Mmap(lf.fd, false, size) - if err == nil { - err = y.Madvise(lf.fmap, false) // Disable readahead - } - return err -} - -func (lf *logFile) encryptionEnabled() bool { - return lf.dataKey != nil -} - -func (lf *logFile) munmap() (err error) { - if lf.loadingMode != options.MemoryMap || len(lf.fmap) == 0 { - // Nothing to do - return nil - } - - if err := y.Munmap(lf.fmap); err != nil { - return errors.Wrapf(err, "Unable to munmap value log: %q", lf.path) - } - // This is important. We should set the map to nil because ummap - // system call doesn't change the length or capacity of the fmap slice. - lf.fmap = nil - return nil -} - -// Acquire lock on mmap/file if you are calling this -func (lf *logFile) read(p valuePointer, s *y.Slice) (buf []byte, err error) { - var nbr int64 - offset := p.Offset - if lf.loadingMode == options.FileIO { - buf = s.Resize(int(p.Len)) - var n int - n, err = lf.fd.ReadAt(buf, int64(offset)) - nbr = int64(n) - } else { - // Do not convert size to uint32, because the lf.fmap can be of size - // 4GB, which overflows the uint32 during conversion to make the size 0, - // causing the read to fail with ErrEOF. See issue #585. - size := int64(len(lf.fmap)) - valsz := p.Len - lfsz := atomic.LoadUint32(&lf.size) - if int64(offset) >= size || int64(offset+valsz) > size || - // Ensure that the read is within the file's actual size. It might be possible that - // the offset+valsz length is beyond the file's actual size. This could happen when - // dropAll and iterations are running simultaneously. - int64(offset+valsz) > int64(lfsz) { - err = y.ErrEOF - } else { - buf = lf.fmap[offset : offset+valsz] - nbr = int64(valsz) - } - } - y.NumReads.Add(1) - y.NumBytesRead.Add(nbr) - return buf, err -} - -// generateIV will generate IV by appending given offset with the base IV. -func (lf *logFile) generateIV(offset uint32) []byte { - iv := make([]byte, aes.BlockSize) - // baseIV is of 12 bytes. - y.AssertTrue(12 == copy(iv[:12], lf.baseIV)) - // remaining 4 bytes is obtained from offset. - binary.BigEndian.PutUint32(iv[12:], offset) - return iv -} - -func (lf *logFile) doneWriting(offset uint32) error { - // Sync before acquiring lock. (We call this from write() and thus know we have shared access - // to the fd.) - if err := lf.fd.Sync(); err != nil { - return errors.Wrapf(err, "Unable to sync value log: %q", lf.path) - } - - // Before we were acquiring a lock here on lf.lock, because we were invalidating the file - // descriptor due to reopening it as read-only. Now, we don't invalidate the fd, but unmap it, - // truncate it and remap it. That creates a window where we have segfaults because the mmap is - // no longer valid, while someone might be reading it. Therefore, we need a lock here again. - lf.lock.Lock() - defer lf.lock.Unlock() - - // Unmap file before we truncate it. Windows cannot truncate a file that is mmapped. - if err := lf.munmap(); err != nil { - return errors.Wrapf(err, "failed to munmap vlog file %s", lf.fd.Name()) - } - - // TODO: Confirm if we need to run a file sync after truncation. - // Truncation must run after unmapping, otherwise Windows would crap itself. - if err := lf.fd.Truncate(int64(offset)); err != nil { - return errors.Wrapf(err, "Unable to truncate file: %q", lf.path) - } - - // Reinitialize the log file. This will mmap the entire file. - if err := lf.init(); err != nil { - return errors.Wrapf(err, "failed to initialize file %s", lf.fd.Name()) - } - - // Previously we used to close the file after it was written and reopen it in read-only mode. - // We no longer open files in read-only mode. We keep all vlog files open in read-write mode. - return nil -} - -// You must hold lf.lock to sync() -func (lf *logFile) sync() error { - return lf.fd.Sync() -} - -var errStop = errors.New("Stop iteration") -var errTruncate = errors.New("Do truncate") -var errDeleteVlogFile = errors.New("Delete vlog file") - -type logEntry func(e Entry, vp valuePointer) error - -type safeRead struct { - k []byte - v []byte - - recordOffset uint32 - lf *logFile -} - -// hashReader implements io.Reader, io.ByteReader interfaces. It also keeps track of the number -// bytes read. The hashReader writes to h (hash) what it reads from r. -type hashReader struct { - r io.Reader - h hash.Hash32 - bytesRead int // Number of bytes read. -} - -func newHashReader(r io.Reader) *hashReader { - hash := crc32.New(y.CastagnoliCrcTable) - return &hashReader{ - r: r, - h: hash, - } -} - -// Read reads len(p) bytes from the reader. Returns the number of bytes read, error on failure. -func (t *hashReader) Read(p []byte) (int, error) { - n, err := t.r.Read(p) - if err != nil { - return n, err - } - t.bytesRead += n - return t.h.Write(p[:n]) -} - -// ReadByte reads exactly one byte from the reader. Returns error on failure. -func (t *hashReader) ReadByte() (byte, error) { - b := make([]byte, 1) - _, err := t.Read(b) - return b[0], err -} - -// Sum32 returns the sum32 of the underlying hash. -func (t *hashReader) Sum32() uint32 { - return t.h.Sum32() -} - -// Entry reads an entry from the provided reader. It also validates the checksum for every entry -// read. Returns error on failure. -func (r *safeRead) Entry(reader io.Reader) (*Entry, error) { - tee := newHashReader(reader) - var h header - hlen, err := h.DecodeFrom(tee) - if err != nil { - return nil, err - } - if h.klen > uint32(1<<16) { // Key length must be below uint16. - return nil, errTruncate - } - kl := int(h.klen) - if cap(r.k) < kl { - r.k = make([]byte, 2*kl) - } - vl := int(h.vlen) - if cap(r.v) < vl { - r.v = make([]byte, 2*vl) - } - - e := &Entry{} - e.offset = r.recordOffset - e.hlen = hlen - buf := make([]byte, h.klen+h.vlen) - if _, err := io.ReadFull(tee, buf[:]); err != nil { - if err == io.EOF { - err = errTruncate - } - return nil, err - } - if r.lf.encryptionEnabled() { - if buf, err = r.lf.decryptKV(buf[:], r.recordOffset); err != nil { - return nil, err - } - } - e.Key = buf[:h.klen] - e.Value = buf[h.klen:] - var crcBuf [crc32.Size]byte - if _, err := io.ReadFull(reader, crcBuf[:]); err != nil { - if err == io.EOF { - err = errTruncate - } - return nil, err - } - crc := y.BytesToU32(crcBuf[:]) - if crc != tee.Sum32() { - return nil, errTruncate - } - e.meta = h.meta - e.UserMeta = h.userMeta - e.ExpiresAt = h.expiresAt - return e, nil -} - -// iterate iterates over log file. It doesn't not allocate new memory for every kv pair. -// Therefore, the kv pair is only valid for the duration of fn call. -func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) { - fi, err := lf.fd.Stat() - if err != nil { - return 0, err - } - if offset == 0 { - // If offset is set to zero, let's advance past the encryption key header. - offset = vlogHeaderSize - } - if int64(offset) == fi.Size() { - // We're at the end of the file already. No need to do anything. - return offset, nil - } - if vlog.opt.ReadOnly { - // We're not at the end of the file. We'd need to replay the entries, or - // possibly truncate the file. - return 0, ErrReplayNeeded - } - - // We're not at the end of the file. Let's Seek to the offset and start reading. - if _, err := lf.fd.Seek(int64(offset), io.SeekStart); err != nil { - return 0, errFile(err, lf.path, "Unable to seek") - } - - reader := bufio.NewReader(lf.fd) - read := &safeRead{ - k: make([]byte, 10), - v: make([]byte, 10), - recordOffset: offset, - lf: lf, - } - - var lastCommit uint64 - var validEndOffset uint32 = offset - -loop: - for { - e, err := read.Entry(reader) - switch { - case err == io.EOF: - break loop - case err == io.ErrUnexpectedEOF || err == errTruncate: - break loop - case err != nil: - return 0, err - case e == nil: - continue - } - - var vp valuePointer - vp.Len = uint32(int(e.hlen) + len(e.Key) + len(e.Value) + crc32.Size) - read.recordOffset += vp.Len - - vp.Offset = e.offset - vp.Fid = lf.fid - - switch { - case e.meta&bitTxn > 0: - txnTs := y.ParseTs(e.Key) - if lastCommit == 0 { - lastCommit = txnTs - } - if lastCommit != txnTs { - break loop - } - - case e.meta&bitFinTxn > 0: - txnTs, err := strconv.ParseUint(string(e.Value), 10, 64) - if err != nil || lastCommit != txnTs { - break loop - } - // Got the end of txn. Now we can store them. - lastCommit = 0 - validEndOffset = read.recordOffset - - default: - if lastCommit != 0 { - // This is most likely an entry which was moved as part of GC. - // We shouldn't get this entry in the middle of a transaction. - break loop - } - validEndOffset = read.recordOffset - } - - if err := fn(*e, vp); err != nil { - if err == errStop { - break - } - return 0, errFile(err, lf.path, "Iteration function") - } - } - return validEndOffset, nil -} - -func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error { - vlog.filesLock.RLock() - maxFid := vlog.maxFid - vlog.filesLock.RUnlock() - y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid) - tr.LazyPrintf("Rewriting fid: %d", f.fid) - - wb := make([]*Entry, 0, 1000) - var size int64 - - y.AssertTrue(vlog.db != nil) - var count, moved int - fe := func(e Entry) error { - count++ - if count%100000 == 0 { - tr.LazyPrintf("Processing entry %d", count) - } - - vs, err := vlog.db.get(e.Key) - if err != nil { - return err - } - if discardEntry(e, vs, vlog.db) { - return nil - } - - // Value is still present in value log. - if len(vs.Value) == 0 { - return errors.Errorf("Empty value: %+v", vs) - } - var vp valuePointer - vp.Decode(vs.Value) - - // If the entry found from the LSM Tree points to a newer vlog file, don't do anything. - if vp.Fid > f.fid { - return nil - } - // If the entry found from the LSM Tree points to an offset greater than the one - // read from vlog, don't do anything. - if vp.Offset > e.offset { - return nil - } - // If the entry read from LSM Tree and vlog file point to the same vlog file and offset, - // insert them back into the DB. - // NOTE: It might be possible that the entry read from the LSM Tree points to - // an older vlog file. See the comments in the else part. - if vp.Fid == f.fid && vp.Offset == e.offset { - moved++ - // This new entry only contains the key, and a pointer to the value. - ne := new(Entry) - ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits. - ne.UserMeta = e.UserMeta - ne.ExpiresAt = e.ExpiresAt - - // Create a new key in a separate keyspace, prefixed by moveKey. We are not - // allowed to rewrite an older version of key in the LSM tree, because then this older - // version would be at the top of the LSM tree. To work correctly, reads expect the - // latest versions to be at the top, and the older versions at the bottom. - if bytes.HasPrefix(e.Key, badgerMove) { - ne.Key = append([]byte{}, e.Key...) - } else { - ne.Key = make([]byte, len(badgerMove)+len(e.Key)) - n := copy(ne.Key, badgerMove) - copy(ne.Key[n:], e.Key) - } - - ne.Value = append([]byte{}, e.Value...) - es := int64(ne.estimateSize(vlog.opt.ValueThreshold)) - // Consider size of value as well while considering the total size - // of the batch. There have been reports of high memory usage in - // rewrite because we don't consider the value size. See #1292. - es += int64(len(e.Value)) - - // Ensure length and size of wb is within transaction limits. - if int64(len(wb)+1) >= vlog.opt.maxBatchCount || - size+es >= vlog.opt.maxBatchSize { - tr.LazyPrintf("request has %d entries, size %d", len(wb), size) - if err := vlog.db.batchSet(wb); err != nil { - return err - } - size = 0 - wb = wb[:0] - } - wb = append(wb, ne) - size += es - } else { - // It might be possible that the entry read from LSM Tree points to an older vlog file. - // This can happen in the following situation. Assume DB is opened with - // numberOfVersionsToKeep=1 - // - // Now, if we have ONLY one key in the system "FOO" which has been updated 3 times and - // the same key has been garbage collected 3 times, we'll have 3 versions of the movekey - // for the same key "FOO". - // NOTE: moveKeyi is the moveKey with version i - // Assume we have 3 move keys in L0. - // - moveKey1 (points to vlog file 10), - // - moveKey2 (points to vlog file 14) and - // - moveKey3 (points to vlog file 15). - - // Also, assume there is another move key "moveKey1" (points to vlog file 6) (this is - // also a move Key for key "FOO" ) on upper levels (let's say 3). The move key - // "moveKey1" on level 0 was inserted because vlog file 6 was GCed. - // - // Here's what the arrangement looks like - // L0 => (moveKey1 => vlog10), (moveKey2 => vlog14), (moveKey3 => vlog15) - // L1 => .... - // L2 => .... - // L3 => (moveKey1 => vlog6) - // - // When L0 compaction runs, it keeps only moveKey3 because the number of versions - // to keep is set to 1. (we've dropped moveKey1's latest version) - // - // The new arrangement of keys is - // L0 => .... - // L1 => (moveKey3 => vlog15) - // L2 => .... - // L3 => (moveKey1 => vlog6) - // - // Now if we try to GC vlog file 10, the entry read from vlog file will point to vlog10 - // but the entry read from LSM Tree will point to vlog6. The move key read from LSM tree - // will point to vlog6 because we've asked for version 1 of the move key. - // - // This might seem like an issue but it's not really an issue because the user has set - // the number of versions to keep to 1 and the latest version of moveKey points to the - // correct vlog file and offset. The stale move key on L3 will be eventually dropped by - // compaction because there is a newer versions in the upper levels. - } - return nil - } - - _, err := vlog.iterate(f, 0, func(e Entry, vp valuePointer) error { - return fe(e) - }) - if err != nil { - return err - } - - tr.LazyPrintf("request has %d entries, size %d", len(wb), size) - batchSize := 1024 - var loops int - for i := 0; i < len(wb); { - loops++ - if batchSize == 0 { - vlog.db.opt.Warningf("We shouldn't reach batch size of zero.") - return ErrNoRewrite - } - end := i + batchSize - if end > len(wb) { - end = len(wb) - } - if err := vlog.db.batchSet(wb[i:end]); err != nil { - if err == ErrTxnTooBig { - // Decrease the batch size to half. - batchSize = batchSize / 2 - tr.LazyPrintf("Dropped batch size to %d", batchSize) - continue - } - return err - } - i += batchSize - } - tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops) - tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved) - tr.LazyPrintf("Removing fid: %d", f.fid) - var deleteFileNow bool - // Entries written to LSM. Remove the older file now. - { - vlog.filesLock.Lock() - // Just a sanity-check. - if _, ok := vlog.filesMap[f.fid]; !ok { - vlog.filesLock.Unlock() - return errors.Errorf("Unable to find fid: %d", f.fid) - } - if vlog.iteratorCount() == 0 { - delete(vlog.filesMap, f.fid) - deleteFileNow = true - } else { - vlog.filesToBeDeleted = append(vlog.filesToBeDeleted, f.fid) - } - vlog.filesLock.Unlock() - } - - if deleteFileNow { - if err := vlog.deleteLogFile(f); err != nil { - return err - } - } - - return nil -} - -func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) error { - db := vlog.db - var result []*Entry - var count, pointers uint64 - tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid) - err := db.View(func(txn *Txn) error { - opt := DefaultIteratorOptions - opt.InternalAccess = true - opt.PrefetchValues = false - itr := txn.NewIterator(opt) - defer itr.Close() - - for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() { - count++ - item := itr.Item() - if item.meta&bitValuePointer == 0 { - continue - } - pointers++ - var vp valuePointer - vp.Decode(item.vptr) - if vp.Fid == fid { - e := &Entry{Key: y.KeyWithTs(item.Key(), item.Version()), meta: bitDelete} - result = append(result, e) - } - } - return nil - }) - if err != nil { - tr.LazyPrintf("Got error while iterating move keys: %v", err) - tr.SetError() - return err - } - tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers) - tr.LazyPrintf("Number of invalid move keys found: %d", len(result)) - batchSize := 10240 - for i := 0; i < len(result); { - end := i + batchSize - if end > len(result) { - end = len(result) - } - if err := db.batchSet(result[i:end]); err != nil { - if err == ErrTxnTooBig { - batchSize /= 2 - tr.LazyPrintf("Dropped batch size to %d", batchSize) - continue - } - tr.LazyPrintf("Error while doing batchSet: %v", err) - tr.SetError() - return err - } - i += batchSize - } - tr.LazyPrintf("Move keys deletion done.") - return nil -} - -func (vlog *valueLog) incrIteratorCount() { - atomic.AddInt32(&vlog.numActiveIterators, 1) -} - -func (vlog *valueLog) iteratorCount() int { - return int(atomic.LoadInt32(&vlog.numActiveIterators)) -} - -func (vlog *valueLog) decrIteratorCount() error { - num := atomic.AddInt32(&vlog.numActiveIterators, -1) - if num != 0 { - return nil - } - - vlog.filesLock.Lock() - lfs := make([]*logFile, 0, len(vlog.filesToBeDeleted)) - for _, id := range vlog.filesToBeDeleted { - lfs = append(lfs, vlog.filesMap[id]) - delete(vlog.filesMap, id) - } - vlog.filesToBeDeleted = nil - vlog.filesLock.Unlock() - - for _, lf := range lfs { - if err := vlog.deleteLogFile(lf); err != nil { - return err - } - } - return nil -} - -func (vlog *valueLog) deleteLogFile(lf *logFile) error { - if lf == nil { - return nil - } - lf.lock.Lock() - defer lf.lock.Unlock() - - path := vlog.fpath(lf.fid) - if err := lf.munmap(); err != nil { - _ = lf.fd.Close() - return err - } - lf.fmap = nil - if err := lf.fd.Close(); err != nil { - return err - } - return os.Remove(path) -} - -func (vlog *valueLog) dropAll() (int, error) { - // If db is opened in InMemory mode, we don't need to do anything since there are no vlog files. - if vlog.db.opt.InMemory { - return 0, nil - } - // We don't want to block dropAll on any pending transactions. So, don't worry about iterator - // count. - var count int - deleteAll := func() error { - vlog.filesLock.Lock() - defer vlog.filesLock.Unlock() - for _, lf := range vlog.filesMap { - if err := vlog.deleteLogFile(lf); err != nil { - return err - } - count++ - } - vlog.filesMap = make(map[uint32]*logFile) - return nil - } - if err := deleteAll(); err != nil { - return count, err - } - - vlog.db.opt.Infof("Value logs deleted. Creating value log file: 0") - if _, err := vlog.createVlogFile(0); err != nil { // Called while writes are stopped. - return count, err - } - return count, nil -} - -// lfDiscardStats keeps track of the amount of data that could be discarded for -// a given logfile. -type lfDiscardStats struct { - sync.RWMutex - m map[uint32]int64 - flushChan chan map[uint32]int64 - closer *y.Closer - updatesSinceFlush int -} - -type valueLog struct { - dirPath string - - // guards our view of which files exist, which to be deleted, how many active iterators - filesLock sync.RWMutex - filesMap map[uint32]*logFile - maxFid uint32 - filesToBeDeleted []uint32 - // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted. - numActiveIterators int32 - - db *DB - writableLogOffset uint32 // read by read, written by write. Must access via atomics. - numEntriesWritten uint32 - opt Options - - garbageCh chan struct{} - lfDiscardStats *lfDiscardStats -} - -func vlogFilePath(dirPath string, fid uint32) string { - return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid) -} - -func (vlog *valueLog) fpath(fid uint32) string { - return vlogFilePath(vlog.dirPath, fid) -} - -func (vlog *valueLog) populateFilesMap() error { - vlog.filesMap = make(map[uint32]*logFile) - - files, err := ioutil.ReadDir(vlog.dirPath) - if err != nil { - return errFile(err, vlog.dirPath, "Unable to open log dir.") - } - - found := make(map[uint64]struct{}) - for _, file := range files { - if !strings.HasSuffix(file.Name(), ".vlog") { - continue - } - fsz := len(file.Name()) - fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32) - if err != nil { - return errFile(err, file.Name(), "Unable to parse log id.") - } - if _, ok := found[fid]; ok { - return errFile(err, file.Name(), "Duplicate file found. Please delete one.") - } - found[fid] = struct{}{} - - lf := &logFile{ - fid: uint32(fid), - path: vlog.fpath(uint32(fid)), - loadingMode: vlog.opt.ValueLogLoadingMode, - registry: vlog.db.registry, - } - vlog.filesMap[uint32(fid)] = lf - if vlog.maxFid < uint32(fid) { - vlog.maxFid = uint32(fid) - } - } - return nil -} - -func (lf *logFile) open(path string, flags uint32) error { - var err error - if lf.fd, err = y.OpenExistingFile(path, flags); err != nil { - return y.Wrapf(err, "Error while opening file in logfile %s", path) - } - - fi, err := lf.fd.Stat() - if err != nil { - return errFile(err, lf.path, "Unable to run file.Stat") - } - sz := fi.Size() - y.AssertTruef( - sz <= math.MaxUint32, - "file size: %d greater than %d", - uint32(sz), uint32(math.MaxUint32), - ) - lf.size = uint32(sz) - if sz < vlogHeaderSize { - // Every vlog file should have at least vlogHeaderSize. If it is less than vlogHeaderSize - // then it must have been corrupted. But no need to handle here. log replayer will truncate - // and bootstrap the logfile. So ignoring here. - return nil - } - buf := make([]byte, vlogHeaderSize) - if _, err = lf.fd.Read(buf); err != nil { - return y.Wrapf(err, "Error while reading vlog file %d", lf.fid) - } - keyID := binary.BigEndian.Uint64(buf[:8]) - var dk *pb.DataKey - // retrieve datakey. - if dk, err = lf.registry.dataKey(keyID); err != nil { - return y.Wrapf(err, "While opening vlog file %d", lf.fid) - } - lf.dataKey = dk - lf.baseIV = buf[8:] - y.AssertTrue(len(lf.baseIV) == 12) - return nil -} - -// bootstrap will initialize the log file with key id and baseIV. -// The below figure shows the layout of log file. -// +----------------+------------------+------------------+ -// | keyID(8 bytes) | baseIV(12 bytes)| entry... | -// +----------------+------------------+------------------+ -func (lf *logFile) bootstrap() error { - var err error - // delete all the data. because bootstrap is been called while creating vlog and as well - // as replaying log. While replaying log, there may be any data left. So we need to truncate - // everything. - if err = lf.fd.Truncate(0); err != nil { - return y.Wrapf(err, "Error while bootstraping.") - } - - if _, err = lf.fd.Seek(0, io.SeekStart); err != nil { - return y.Wrapf(err, "Error while SeekStart for the logfile %d in logFile.bootstarp", lf.fid) - } - // generate data key for the log file. - var dk *pb.DataKey - if dk, err = lf.registry.latestDataKey(); err != nil { - return y.Wrapf(err, "Error while retrieving datakey in logFile.bootstarp") - } - lf.dataKey = dk - // We'll always preserve vlogHeaderSize for key id and baseIV. - buf := make([]byte, vlogHeaderSize) - // write key id to the buf. - // key id will be zero if the logfile is in plain text. - binary.BigEndian.PutUint64(buf[:8], lf.keyID()) - // generate base IV. It'll be used with offset of the vptr to encrypt the entry. - if _, err := cryptorand.Read(buf[8:]); err != nil { - return y.Wrapf(err, "Error while creating base IV, while creating logfile") - } - // Initialize base IV. - lf.baseIV = buf[8:] - y.AssertTrue(len(lf.baseIV) == 12) - // write the key id and base IV to the file. - _, err = lf.fd.Write(buf) - return err -} - -func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) { - path := vlog.fpath(fid) - - lf := &logFile{ - fid: fid, - path: path, - loadingMode: vlog.opt.ValueLogLoadingMode, - registry: vlog.db.registry, - } - // writableLogOffset is only written by write func, by read by Read func. - // To avoid a race condition, all reads and updates to this variable must be - // done via atomics. - var err error - if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil { - return nil, errFile(err, lf.path, "Create value log file") - } - - removeFile := func() { - // Remove the file so that we don't get an error when createVlogFile is - // called for the same fid, again. This could happen if there is an - // transient error because of which we couldn't create a new file - // and the second attempt to create the file succeeds. - y.Check(os.Remove(lf.fd.Name())) - } - - if err = lf.bootstrap(); err != nil { - removeFile() - return nil, err - } - - if err = syncDir(vlog.dirPath); err != nil { - removeFile() - return nil, errFile(err, vlog.dirPath, "Sync value log dir") - } - - if err = lf.mmap(2 * vlog.opt.ValueLogFileSize); err != nil { - removeFile() - return nil, errFile(err, lf.path, "Mmap value log file") - } - - vlog.filesLock.Lock() - vlog.filesMap[fid] = lf - vlog.maxFid = fid - // writableLogOffset is only written by write func, by read by Read func. - // To avoid a race condition, all reads and updates to this variable must be - // done via atomics. - atomic.StoreUint32(&vlog.writableLogOffset, vlogHeaderSize) - vlog.numEntriesWritten = 0 - vlog.filesLock.Unlock() - - return lf, nil -} - -func errFile(err error, path string, msg string) error { - return fmt.Errorf("%s. Path=%s. Error=%v", msg, path, err) -} - -func (vlog *valueLog) replayLog(lf *logFile, offset uint32, replayFn logEntry) error { - fi, err := lf.fd.Stat() - if err != nil { - return errFile(err, lf.path, "Unable to run file.Stat") - } - - // Alright, let's iterate now. - endOffset, err := vlog.iterate(lf, offset, replayFn) - if err != nil { - return errFile(err, lf.path, "Unable to replay logfile") - } - if int64(endOffset) == fi.Size() { - return nil - } - - // End offset is different from file size. So, we should truncate the file - // to that size. - if !vlog.opt.Truncate { - vlog.db.opt.Warningf("Truncate Needed. File %s size: %d Endoffset: %d", - lf.fd.Name(), fi.Size(), endOffset) - return ErrTruncateNeeded - } - - // The entire file should be truncated (i.e. it should be deleted). - // If fid == maxFid then it's okay to truncate the entire file since it will be - // used for future additions. Also, it's okay if the last file has size zero. - // We mmap 2*opt.ValueLogSize for the last file. See vlog.Open() function - // if endOffset <= vlogHeaderSize && lf.fid != vlog.maxFid { - - if endOffset <= vlogHeaderSize { - if lf.fid != vlog.maxFid { - return errDeleteVlogFile - } - return lf.bootstrap() - } - - vlog.db.opt.Infof("Truncating vlog file %s to offset: %d", lf.fd.Name(), endOffset) - if err := lf.fd.Truncate(int64(endOffset)); err != nil { - return errFile(err, lf.path, fmt.Sprintf( - "Truncation needed at offset %d. Can be done manually as well.", endOffset)) - } - return nil -} - -// init initializes the value log struct. This initialization needs to happen -// before compactions start. -func (vlog *valueLog) init(db *DB) { - vlog.opt = db.opt - vlog.db = db - // We don't need to open any vlog files or collect stats for GC if DB is opened - // in InMemory mode. InMemory mode doesn't create any files/directories on disk. - if vlog.opt.InMemory { - return - } - vlog.dirPath = vlog.opt.ValueDir - - vlog.garbageCh = make(chan struct{}, 1) // Only allow one GC at a time. - vlog.lfDiscardStats = &lfDiscardStats{ - m: make(map[uint32]int64), - closer: y.NewCloser(1), - flushChan: make(chan map[uint32]int64, 16), - } -} - -func (vlog *valueLog) open(db *DB, ptr valuePointer, replayFn logEntry) error { - // We don't need to open any vlog files or collect stats for GC if DB is opened - // in InMemory mode. InMemory mode doesn't create any files/directories on disk. - if db.opt.InMemory { - return nil - } - - go vlog.flushDiscardStats() - if err := vlog.populateFilesMap(); err != nil { - return err - } - // If no files are found, then create a new file. - if len(vlog.filesMap) == 0 { - _, err := vlog.createVlogFile(0) - return y.Wrapf(err, "Error while creating log file in valueLog.open") - } - fids := vlog.sortedFids() - for _, fid := range fids { - lf, ok := vlog.filesMap[fid] - y.AssertTrue(ok) - var flags uint32 - switch { - case vlog.opt.ReadOnly: - // If we have read only, we don't need SyncWrites. - flags |= y.ReadOnly - // Set sync flag. - case vlog.opt.SyncWrites: - flags |= y.Sync - } - - // We cannot mmap the files upfront here. Windows does not like mmapped files to be - // truncated. We might need to truncate files during a replay. - var err error - if err = lf.open(vlog.fpath(fid), flags); err != nil { - return errors.Wrapf(err, "Open existing file: %q", lf.path) - } - - // This file is before the value head pointer. So, we don't need to - // replay it, and can just open it in readonly mode. - if fid < ptr.Fid { - // Mmap the file here, we don't need to replay it. - if err := lf.init(); err != nil { - return err - } - continue - } - - var offset uint32 - if fid == ptr.Fid { - offset = ptr.Offset + ptr.Len - } - vlog.db.opt.Infof("Replaying file id: %d at offset: %d\n", fid, offset) - now := time.Now() - // Replay and possible truncation done. Now we can open the file as per - // user specified options. - if err := vlog.replayLog(lf, offset, replayFn); err != nil { - // Log file is corrupted. Delete it. - if err == errDeleteVlogFile { - delete(vlog.filesMap, fid) - // Close the fd of the file before deleting the file otherwise windows complaints. - if err := lf.fd.Close(); err != nil { - return errors.Wrapf(err, "failed to close vlog file %s", lf.fd.Name()) - } - path := vlog.fpath(lf.fid) - if err := os.Remove(path); err != nil { - return y.Wrapf(err, "failed to delete empty value log file: %q", path) - } - continue - } - return err - } - vlog.db.opt.Infof("Replay took: %s\n", time.Since(now)) - - if fid < vlog.maxFid { - // This file has been replayed. It can now be mmapped. - // For maxFid, the mmap would be done by the specially written code below. - if err := lf.init(); err != nil { - return err - } - } - } - // Seek to the end to start writing. - last, ok := vlog.filesMap[vlog.maxFid] - y.AssertTrue(ok) - // We'll create a new vlog if the last vlog is encrypted and db is opened in - // plain text mode or vice versa. A single vlog file can't have both - // encrypted entries and plain text entries. - if last.encryptionEnabled() != vlog.db.shouldEncrypt() { - newid := vlog.maxFid + 1 - _, err := vlog.createVlogFile(newid) - if err != nil { - return y.Wrapf(err, "Error while creating log file %d in valueLog.open", newid) - } - last, ok = vlog.filesMap[newid] - y.AssertTrue(ok) - } - lastOffset, err := last.fd.Seek(0, io.SeekEnd) - if err != nil { - return errFile(err, last.path, "file.Seek to end") - } - vlog.writableLogOffset = uint32(lastOffset) - - // Update the head to point to the updated tail. Otherwise, even after doing a successful - // replay and closing the DB, the value log head does not get updated, which causes the replay - // to happen repeatedly. - vlog.db.vhead = valuePointer{Fid: vlog.maxFid, Offset: uint32(lastOffset)} - - // Map the file if needed. When we create a file, it is automatically mapped. - if err = last.mmap(2 * vlog.opt.ValueLogFileSize); err != nil { - return errFile(err, last.path, "Map log file") - } - if err := vlog.populateDiscardStats(); err != nil { - // Print the error and continue. We don't want to prevent value log open if there's an error - // with the fetching discards stats. - db.opt.Errorf("Failed to populate discard stats: %s", err) - } - return nil -} - -func (lf *logFile) init() error { - fstat, err := lf.fd.Stat() - if err != nil { - return errors.Wrapf(err, "Unable to check stat for %q", lf.path) - } - sz := fstat.Size() - if sz == 0 { - // File is empty. We don't need to mmap it. Return. - return nil - } - y.AssertTrue(sz <= math.MaxUint32) - lf.size = uint32(sz) - if err = lf.mmap(sz); err != nil { - _ = lf.fd.Close() - return errors.Wrapf(err, "Unable to map file: %q", fstat.Name()) - } - return nil -} - -func (vlog *valueLog) stopFlushDiscardStats() { - if vlog.lfDiscardStats != nil { - vlog.lfDiscardStats.closer.Signal() - } -} - -func (vlog *valueLog) Close() error { - if vlog == nil || vlog.db == nil || vlog.db.opt.InMemory { - return nil - } - // close flushDiscardStats. - vlog.lfDiscardStats.closer.SignalAndWait() - - vlog.opt.Debugf("Stopping garbage collection of values.") - - var err error - for id, f := range vlog.filesMap { - f.lock.Lock() // We won’t release the lock. - if munmapErr := f.munmap(); munmapErr != nil && err == nil { - err = munmapErr - } - - maxFid := vlog.maxFid - // TODO(ibrahim) - Do we need the following truncations on non-windows - // platforms? We expand the file only on windows and the vlog.woffset() - // should point to end of file on all other platforms. - if !vlog.opt.ReadOnly && id == maxFid { - // truncate writable log file to correct offset. - if truncErr := f.fd.Truncate( - int64(vlog.woffset())); truncErr != nil && err == nil { - err = truncErr - } - } - - if closeErr := f.fd.Close(); closeErr != nil && err == nil { - err = closeErr - } - } - return err -} - -// sortedFids returns the file id's not pending deletion, sorted. Assumes we have shared access to -// filesMap. -func (vlog *valueLog) sortedFids() []uint32 { - toBeDeleted := make(map[uint32]struct{}) - for _, fid := range vlog.filesToBeDeleted { - toBeDeleted[fid] = struct{}{} - } - ret := make([]uint32, 0, len(vlog.filesMap)) - for fid := range vlog.filesMap { - if _, ok := toBeDeleted[fid]; !ok { - ret = append(ret, fid) - } - } - sort.Slice(ret, func(i, j int) bool { - return ret[i] < ret[j] - }) - return ret -} - -type request struct { - // Input values - Entries []*Entry - // Output values and wait group stuff below - Ptrs []valuePointer - Wg sync.WaitGroup - Err error - ref int32 -} - -func (req *request) reset() { - req.Entries = req.Entries[:0] - req.Ptrs = req.Ptrs[:0] - req.Wg = sync.WaitGroup{} - req.Err = nil - req.ref = 0 -} - -func (req *request) IncrRef() { - atomic.AddInt32(&req.ref, 1) -} - -func (req *request) DecrRef() { - nRef := atomic.AddInt32(&req.ref, -1) - if nRef > 0 { - return - } - req.Entries = nil - requestPool.Put(req) -} - -func (req *request) Wait() error { - req.Wg.Wait() - err := req.Err - req.DecrRef() // DecrRef after writing to DB. - return err -} - -type requests []*request - -func (reqs requests) DecrRef() { - for _, req := range reqs { - req.DecrRef() - } -} - -func (reqs requests) IncrRef() { - for _, req := range reqs { - req.IncrRef() - } -} - -// sync function syncs content of latest value log file to disk. Syncing of value log directory is -// not required here as it happens every time a value log file rotation happens(check createVlogFile -// function). During rotation, previous value log file also gets synced to disk. It only syncs file -// if fid >= vlog.maxFid. In some cases such as replay(while opening db), it might be called with -// fid < vlog.maxFid. To sync irrespective of file id just call it with math.MaxUint32. -func (vlog *valueLog) sync(fid uint32) error { - if vlog.opt.SyncWrites || vlog.opt.InMemory { - return nil - } - - vlog.filesLock.RLock() - maxFid := vlog.maxFid - // During replay it is possible to get sync call with fid less than maxFid. - // Because older file has already been synced, we can return from here. - if fid < maxFid || len(vlog.filesMap) == 0 { - vlog.filesLock.RUnlock() - return nil - } - curlf := vlog.filesMap[maxFid] - // Sometimes it is possible that vlog.maxFid has been increased but file creation - // with same id is still in progress and this function is called. In those cases - // entry for the file might not be present in vlog.filesMap. - if curlf == nil { - vlog.filesLock.RUnlock() - return nil - } - curlf.lock.RLock() - vlog.filesLock.RUnlock() - - err := curlf.sync() - curlf.lock.RUnlock() - return err -} - -func (vlog *valueLog) woffset() uint32 { - return atomic.LoadUint32(&vlog.writableLogOffset) -} - -// validateWrites will check whether the given requests can fit into 4GB vlog file. -// NOTE: 4GB is the maximum size we can create for vlog because value pointer offset is of type -// uint32. If we create more than 4GB, it will overflow uint32. So, limiting the size to 4GB. -func (vlog *valueLog) validateWrites(reqs []*request) error { - vlogOffset := uint64(vlog.woffset()) - for _, req := range reqs { - // calculate size of the request. - size := estimateRequestSize(req) - estimatedVlogOffset := vlogOffset + size - if estimatedVlogOffset > uint64(maxVlogFileSize) { - return errors.Errorf("Request size offset %d is bigger than maximum offset %d", - estimatedVlogOffset, maxVlogFileSize) - } - - if estimatedVlogOffset >= uint64(vlog.opt.ValueLogFileSize) { - // We'll create a new vlog file if the estimated offset is greater or equal to - // max vlog size. So, resetting the vlogOffset. - vlogOffset = 0 - continue - } - // Estimated vlog offset will become current vlog offset if the vlog is not rotated. - vlogOffset = estimatedVlogOffset - } - return nil -} - -// estimateRequestSize returns the size that needed to be written for the given request. -func estimateRequestSize(req *request) uint64 { - size := uint64(0) - for _, e := range req.Entries { - size += uint64(maxHeaderSize + len(e.Key) + len(e.Value) + crc32.Size) - } - return size -} - -// write is thread-unsafe by design and should not be called concurrently. -func (vlog *valueLog) write(reqs []*request) error { - if vlog.db.opt.InMemory { - return nil - } - // Validate writes before writing to vlog. Because, we don't want to partially write and return - // an error. - if err := vlog.validateWrites(reqs); err != nil { - return err - } - - vlog.filesLock.RLock() - maxFid := vlog.maxFid - curlf, ok := vlog.filesMap[maxFid] - if !ok { - var fids []uint32 - for fid := range vlog.filesMap { - fids = append(fids, fid) - } - return errors.Errorf("Cannot find MaxFid: %d in filesMap: %+v", maxFid, fids) - } - vlog.filesLock.RUnlock() - - var buf bytes.Buffer - flushWrites := func() error { - if buf.Len() == 0 { - return nil - } - vlog.opt.Debugf("Flushing buffer of size %d to vlog", buf.Len()) - n, err := curlf.fd.Write(buf.Bytes()) - if err != nil { - return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path) - } - buf.Reset() - y.NumWrites.Add(1) - y.NumBytesWritten.Add(int64(n)) - vlog.opt.Debugf("Done") - atomic.AddUint32(&vlog.writableLogOffset, uint32(n)) - atomic.StoreUint32(&curlf.size, vlog.writableLogOffset) - return nil - } - toDisk := func() error { - if err := flushWrites(); err != nil { - return err - } - if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) || - vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries { - if err := curlf.doneWriting(vlog.woffset()); err != nil { - return err - } - - newid := vlog.maxFid + 1 - y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid) - newlf, err := vlog.createVlogFile(newid) - if err != nil { - return err - } - curlf = newlf - atomic.AddInt32(&vlog.db.logRotates, 1) - } - return nil - } - for i := range reqs { - b := reqs[i] - b.Ptrs = b.Ptrs[:0] - var written int - for j := range b.Entries { - e := b.Entries[j] - if e.skipVlog { - b.Ptrs = append(b.Ptrs, valuePointer{}) - continue - } - var p valuePointer - - p.Fid = curlf.fid - // Use the offset including buffer length so far. - p.Offset = vlog.woffset() + uint32(buf.Len()) - plen, err := curlf.encodeEntry(e, &buf, p.Offset) // Now encode the entry into buffer. - if err != nil { - return err - } - p.Len = uint32(plen) - b.Ptrs = append(b.Ptrs, p) - written++ - - // It is possible that the size of the buffer grows beyond the max size of the value - // log (this happens when a transaction contains entries with large value sizes) and - // badger might run into out of memory errors. We flush the buffer here if it's size - // grows beyond the max value log size. - if int64(buf.Len()) > vlog.db.opt.ValueLogFileSize { - if err := flushWrites(); err != nil { - return err - } - } - } - vlog.numEntriesWritten += uint32(written) - // We write to disk here so that all entries that are part of the same transaction are - // written to the same vlog file. - writeNow := - vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) || - vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries) - if writeNow { - if err := toDisk(); err != nil { - return err - } - } - } - return toDisk() -} - -// Gets the logFile and acquires and RLock() for the mmap. You must call RUnlock on the file -// (if non-nil) -func (vlog *valueLog) getFileRLocked(vp valuePointer) (*logFile, error) { - vlog.filesLock.RLock() - defer vlog.filesLock.RUnlock() - ret, ok := vlog.filesMap[vp.Fid] - if !ok { - // log file has gone away, will need to retry the operation. - return nil, ErrRetry - } - - // Check for valid offset if we are reading from writable log. - maxFid := vlog.maxFid - if vp.Fid == maxFid { - currentOffset := vlog.woffset() - if vp.Offset >= currentOffset { - return nil, errors.Errorf( - "Invalid value pointer offset: %d greater than current offset: %d", - vp.Offset, currentOffset) - } - } - - ret.lock.RLock() - return ret, nil -} - -// Read reads the value log at a given location. -// TODO: Make this read private. -func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) { - buf, lf, err := vlog.readValueBytes(vp, s) - // log file is locked so, decide whether to lock immediately or let the caller to - // unlock it, after caller uses it. - cb := vlog.getUnlockCallback(lf) - if err != nil { - return nil, cb, err - } - - if vlog.opt.VerifyValueChecksum { - hash := crc32.New(y.CastagnoliCrcTable) - if _, err := hash.Write(buf[:len(buf)-crc32.Size]); err != nil { - runCallback(cb) - return nil, nil, errors.Wrapf(err, "failed to write hash for vp %+v", vp) - } - // Fetch checksum from the end of the buffer. - checksum := buf[len(buf)-crc32.Size:] - if hash.Sum32() != y.BytesToU32(checksum) { - runCallback(cb) - return nil, nil, errors.Wrapf(y.ErrChecksumMismatch, "value corrupted for vp: %+v", vp) - } - } - var h header - headerLen := h.Decode(buf) - kv := buf[headerLen:] - if lf.encryptionEnabled() { - kv, err = lf.decryptKV(kv, vp.Offset) - if err != nil { - return nil, cb, err - } - } - if uint32(len(kv)) < h.klen+h.vlen { - vlog.db.opt.Logger.Errorf("Invalid read: vp: %+v", vp) - return nil, nil, errors.Errorf("Invalid read: Len: %d read at:[%d:%d]", - len(kv), h.klen, h.klen+h.vlen) - } - return kv[h.klen : h.klen+h.vlen], cb, nil -} - -// getUnlockCallback will returns a function which unlock the logfile if the logfile is mmaped. -// otherwise, it unlock the logfile and return nil. -func (vlog *valueLog) getUnlockCallback(lf *logFile) func() { - if lf == nil { - return nil - } - if vlog.opt.ValueLogLoadingMode == options.MemoryMap { - return lf.lock.RUnlock - } - lf.lock.RUnlock() - return nil -} - -// readValueBytes return vlog entry slice and read locked log file. Caller should take care of -// logFile unlocking. -func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, *logFile, error) { - lf, err := vlog.getFileRLocked(vp) - if err != nil { - return nil, nil, err - } - - buf, err := lf.read(vp, s) - return buf, lf, err -} - -func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) { - vlog.filesLock.RLock() - defer vlog.filesLock.RUnlock() - fids := vlog.sortedFids() - switch { - case len(fids) <= 1: - tr.LazyPrintf("Only one or less value log file.") - return nil - case head.Fid == 0: - tr.LazyPrintf("Head pointer is at zero.") - return nil - } - - // Pick a candidate that contains the largest amount of discardable data - candidate := struct { - fid uint32 - discard int64 - }{math.MaxUint32, 0} - vlog.lfDiscardStats.RLock() - for _, fid := range fids { - if fid >= head.Fid { - break - } - if vlog.lfDiscardStats.m[fid] > candidate.discard { - candidate.fid = fid - candidate.discard = vlog.lfDiscardStats.m[fid] - } - } - vlog.lfDiscardStats.RUnlock() - - if candidate.fid != math.MaxUint32 { // Found a candidate - tr.LazyPrintf("Found candidate via discard stats: %v", candidate) - files = append(files, vlog.filesMap[candidate.fid]) - } else { - tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.") - } - - // Fallback to randomly picking a log file - var idxHead int - for i, fid := range fids { - if fid == head.Fid { - idxHead = i - break - } - } - if idxHead == 0 { // Not found or first file - tr.LazyPrintf("Could not find any file.") - return nil - } - idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it. - if idx > 0 { - idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids. - } - tr.LazyPrintf("Randomly chose fid: %d", fids[idx]) - files = append(files, vlog.filesMap[fids[idx]]) - return files -} - -func discardEntry(e Entry, vs y.ValueStruct, db *DB) bool { - if vs.Version != y.ParseTs(e.Key) { - // Version not found. Discard. - return true - } - if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { - return true - } - if (vs.Meta & bitValuePointer) == 0 { - // Key also stores the value in LSM. Discard. - return true - } - if (vs.Meta & bitFinTxn) > 0 { - // Just a txn finish entry. Discard. - return true - } - if bytes.HasPrefix(e.Key, badgerMove) { - // Verify the actual key entry without the badgerPrefix has not been deleted. - // If this is not done the badgerMove entry will be kept forever moving from - // vlog to vlog during rewrites. - avs, err := db.get(e.Key[len(badgerMove):]) - if err != nil { - return false - } - return avs.Version == 0 - } - return false -} - -func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) { - // Update stats before exiting - defer func() { - if err == nil { - vlog.lfDiscardStats.Lock() - delete(vlog.lfDiscardStats.m, lf.fid) - vlog.lfDiscardStats.Unlock() - } - }() - - type reason struct { - total float64 - discard float64 - count int - } - - fi, err := lf.fd.Stat() - if err != nil { - tr.LazyPrintf("Error while finding file size: %v", err) - tr.SetError() - return err - } - - // Set up the sampling window sizes. - sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window. - sizeWindowM := sizeWindow / (1 << 20) // in MBs. - countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries. - tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow) - - // Pick a random start point for the log. - skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location. - skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window. - skipFirstM /= float64(mi) // Convert to MBs. - tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi) - var skipped float64 - - var r reason - start := time.Now() - y.AssertTrue(vlog.db != nil) - s := new(y.Slice) - var numIterations int - _, err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error { - numIterations++ - esz := float64(vp.Len) / (1 << 20) // in MBs. - if skipped < skipFirstM { - skipped += esz - return nil - } - - // Sample until we reach the window sizes or exceed 10 seconds. - if r.count > countWindow { - tr.LazyPrintf("Stopping sampling after %d entries.", countWindow) - return errStop - } - if r.total > sizeWindowM { - tr.LazyPrintf("Stopping sampling after reaching window size.") - return errStop - } - if time.Since(start) > 10*time.Second { - tr.LazyPrintf("Stopping sampling after 10 seconds.") - return errStop - } - r.total += esz - r.count++ - - vs, err := vlog.db.get(e.Key) - if err != nil { - return err - } - if discardEntry(e, vs, vlog.db) { - r.discard += esz - return nil - } - - // Value is still present in value log. - y.AssertTrue(len(vs.Value) > 0) - vp.Decode(vs.Value) - - if vp.Fid > lf.fid { - // Value is present in a later log. Discard. - r.discard += esz - return nil - } - if vp.Offset > e.offset { - // Value is present in a later offset, but in the same log. - r.discard += esz - return nil - } - if vp.Fid == lf.fid && vp.Offset == e.offset { - // This is still the active entry. This would need to be rewritten. - - } else { - vlog.opt.Debugf("Reason=%+v\n", r) - buf, lf, err := vlog.readValueBytes(vp, s) - // we need to decide, whether to unlock the lock file immediately based on the - // loading mode. getUnlockCallback will take care of it. - cb := vlog.getUnlockCallback(lf) - if err != nil { - runCallback(cb) - return errStop - } - ne, err := lf.decodeEntry(buf, vp.Offset) - if err != nil { - runCallback(cb) - return errStop - } - ne.print("Latest Entry Header in LSM") - e.print("Latest Entry in Log") - runCallback(cb) - return errors.Errorf("This shouldn't happen. Latest Pointer:%+v. Meta:%v.", - vp, vs.Meta) - } - return nil - }) - - if err != nil { - tr.LazyPrintf("Error while iterating for RunGC: %v", err) - tr.SetError() - return err - } - tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n", - lf.fid, skipped, numIterations, r) - - // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size, - // and what we can discard is below the threshold, we should skip the rewrite. - if (r.count < countWindow && r.total < sizeWindowM*0.75) || r.discard < discardRatio*r.total { - tr.LazyPrintf("Skipping GC on fid: %d", lf.fid) - return ErrNoRewrite - } - if err = vlog.rewrite(lf, tr); err != nil { - return err - } - tr.LazyPrintf("Done rewriting.") - return nil -} - -func (vlog *valueLog) waitOnGC(lc *y.Closer) { - defer lc.Done() - - <-lc.HasBeenClosed() // Wait for lc to be closed. - - // Block any GC in progress to finish, and don't allow any more writes to runGC by filling up - // the channel of size 1. - vlog.garbageCh <- struct{}{} -} - -func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error { - select { - case vlog.garbageCh <- struct{}{}: - // Pick a log file for GC. - tr := trace.New("Badger.ValueLog", "GC") - tr.SetMaxEvents(100) - defer func() { - tr.Finish() - <-vlog.garbageCh - }() - - var err error - files := vlog.pickLog(head, tr) - if len(files) == 0 { - tr.LazyPrintf("PickLog returned zero results.") - return ErrNoRewrite - } - tried := make(map[uint32]bool) - for _, lf := range files { - if _, done := tried[lf.fid]; done { - continue - } - tried[lf.fid] = true - err = vlog.doRunGC(lf, discardRatio, tr) - if err == nil { - return vlog.deleteMoveKeysFor(lf.fid, tr) - } - } - return err - default: - return ErrRejected - } -} - -func (vlog *valueLog) updateDiscardStats(stats map[uint32]int64) { - if vlog.opt.InMemory { - return - } - - select { - case vlog.lfDiscardStats.flushChan <- stats: - default: - vlog.opt.Warningf("updateDiscardStats called: discard stats flushChan full, " + - "returning without pushing to flushChan") - } -} - -func (vlog *valueLog) flushDiscardStats() { - defer vlog.lfDiscardStats.closer.Done() - - mergeStats := func(stats map[uint32]int64) ([]byte, error) { - vlog.lfDiscardStats.Lock() - defer vlog.lfDiscardStats.Unlock() - for fid, count := range stats { - vlog.lfDiscardStats.m[fid] += count - vlog.lfDiscardStats.updatesSinceFlush++ - } - - if vlog.lfDiscardStats.updatesSinceFlush > discardStatsFlushThreshold { - encodedDS, err := json.Marshal(vlog.lfDiscardStats.m) - if err != nil { - return nil, err - } - vlog.lfDiscardStats.updatesSinceFlush = 0 - return encodedDS, nil - } - return nil, nil - } - - process := func(stats map[uint32]int64) error { - encodedDS, err := mergeStats(stats) - if err != nil || encodedDS == nil { - return err - } - - entries := []*Entry{{ - Key: y.KeyWithTs(lfDiscardStatsKey, 1), - Value: encodedDS, - }} - req, err := vlog.db.sendToWriteCh(entries) - // No special handling of ErrBlockedWrites is required as err is just logged in - // for loop below. - if err != nil { - return errors.Wrapf(err, "failed to push discard stats to write channel") - } - return req.Wait() - } - - closer := vlog.lfDiscardStats.closer - for { - select { - case <-closer.HasBeenClosed(): - // For simplicity just return without processing already present in stats in flushChan. - return - case stats := <-vlog.lfDiscardStats.flushChan: - if err := process(stats); err != nil { - vlog.opt.Errorf("unable to process discardstats with error: %s", err) - } - } - } -} - -// populateDiscardStats populates vlog.lfDiscardStats. -// This function will be called while initializing valueLog. -func (vlog *valueLog) populateDiscardStats() error { - key := y.KeyWithTs(lfDiscardStatsKey, math.MaxUint64) - var statsMap map[uint32]int64 - var val []byte - var vp valuePointer - for { - vs, err := vlog.db.get(key) - if err != nil { - return err - } - // Value doesn't exist. - if vs.Meta == 0 && len(vs.Value) == 0 { - vlog.opt.Debugf("Value log discard stats empty") - return nil - } - vp.Decode(vs.Value) - // Entry stored in LSM tree. - if vs.Meta&bitValuePointer == 0 { - val = y.SafeCopy(val, vs.Value) - break - } - // Read entry from value log. - result, cb, err := vlog.Read(vp, new(y.Slice)) - runCallback(cb) - val = y.SafeCopy(val, result) - // The result is stored in val. We can break the loop from here. - if err == nil { - break - } - if err != ErrRetry { - return err - } - // If we're at this point it means we haven't found the value yet and if the current key has - // badger move prefix, we should break from here since we've already tried the original key - // and the key with move prefix. "val" would be empty since we haven't found the value yet. - if bytes.HasPrefix(key, badgerMove) { - break - } - // If we're at this point it means the discard stats key was moved by the GC and the actual - // entry is the one prefixed by badger move key. - // Prepend existing key with badger move and search for the key. - key = append(badgerMove, key...) - } - - if len(val) == 0 { - return nil - } - if err := json.Unmarshal(val, &statsMap); err != nil { - return errors.Wrapf(err, "failed to unmarshal discard stats") - } - vlog.opt.Debugf("Value Log Discard stats: %v", statsMap) - vlog.lfDiscardStats.flushChan <- statsMap - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/checksum.go b/vendor/github.com/dgraph-io/badger/v2/y/checksum.go deleted file mode 100644 index ab202484..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/checksum.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "hash/crc32" - - "github.com/dgraph-io/badger/v2/pb" - - "github.com/cespare/xxhash" - "github.com/pkg/errors" -) - -// ErrChecksumMismatch is returned at checksum mismatch. -var ErrChecksumMismatch = errors.New("checksum mismatch") - -// CalculateChecksum calculates checksum for data using ct checksum type. -func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64 { - switch ct { - case pb.Checksum_CRC32C: - return uint64(crc32.Checksum(data, CastagnoliCrcTable)) - case pb.Checksum_XXHash64: - return xxhash.Sum64(data) - default: - panic("checksum type not supported") - } -} - -// VerifyChecksum validates the checksum for the data against the given expected checksum. -func VerifyChecksum(data []byte, expected *pb.Checksum) error { - actual := CalculateChecksum(data, expected.Algo) - if actual != expected.Sum { - return Wrapf(ErrChecksumMismatch, "actual: %d, expected: %d", actual, expected.Sum) - } - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/encrypt.go b/vendor/github.com/dgraph-io/badger/v2/y/encrypt.go deleted file mode 100644 index dbfe019f..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/encrypt.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/rand" -) - -// XORBlock encrypts the given data with AES and XOR's with IV. -// Can be used for both encryption and decryption. IV is of -// AES block size. -func XORBlock(src, key, iv []byte) ([]byte, error) { - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - stream := cipher.NewCTR(block, iv) - dst := make([]byte, len(src)) - stream.XORKeyStream(dst, src) - return dst, nil -} - -// GenerateIV generates IV. -func GenerateIV() ([]byte, error) { - iv := make([]byte, aes.BlockSize) - _, err := rand.Read(iv) - return iv, err -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/error.go b/vendor/github.com/dgraph-io/badger/v2/y/error.go deleted file mode 100644 index 59bb2835..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/error.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -// This file contains some functions for error handling. Note that we are moving -// towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these -// functions are useful for simple checks logged on one machine. -// Some common use cases are: -// (1) You receive an error from external lib, and would like to check/log fatal. -// For this, use x.Check, x.Checkf. These will check for err != nil, which is -// more common in Go. If you want to check for boolean being true, use -// x.Assert, x.Assertf. -// (2) You receive an error from external lib, and would like to pass on with some -// stack trace information. In this case, use x.Wrap or x.Wrapf. -// (3) You want to generate a new error with stack trace info. Use x.Errorf. - -import ( - "fmt" - "log" - - "github.com/pkg/errors" -) - -var debugMode = true - -// Check logs fatal if err != nil. -func Check(err error) { - if err != nil { - log.Fatalf("%+v", Wrap(err)) - } -} - -// Check2 acts as convenience wrapper around Check, using the 2nd argument as error. -func Check2(_ interface{}, err error) { - Check(err) -} - -// AssertTrue asserts that b is true. Otherwise, it would log fatal. -func AssertTrue(b bool) { - if !b { - log.Fatalf("%+v", errors.Errorf("Assert failed")) - } -} - -// AssertTruef is AssertTrue with extra info. -func AssertTruef(b bool, format string, args ...interface{}) { - if !b { - log.Fatalf("%+v", errors.Errorf(format, args...)) - } -} - -// Wrap wraps errors from external lib. -func Wrap(err error) error { - if !debugMode { - return err - } - return errors.Wrap(err, "") -} - -// Wrapf is Wrap with extra info. -func Wrapf(err error, format string, args ...interface{}) error { - if !debugMode { - if err == nil { - return nil - } - return fmt.Errorf(format+" error: %+v", append(args, err)...) - } - return errors.Wrapf(err, format, args...) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/event_log.go b/vendor/github.com/dgraph-io/badger/v2/y/event_log.go deleted file mode 100644 index ba9dcb1f..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/event_log.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "golang.org/x/net/trace" - -var ( - NoEventLog trace.EventLog = nilEventLog{} -) - -type nilEventLog struct{} - -func (nel nilEventLog) Printf(format string, a ...interface{}) {} - -func (nel nilEventLog) Errorf(format string, a ...interface{}) {} - -func (nel nilEventLog) Finish() {} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/v2/y/file_dsync.go deleted file mode 100644 index ea4d9ab2..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/file_dsync.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !dragonfly,!freebsd,!windows,!plan9 - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "golang.org/x/sys/unix" - -func init() { - datasyncFileFlag = unix.O_DSYNC -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/v2/y/file_nodsync.go deleted file mode 100644 index 54a2184e..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/file_nodsync.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build dragonfly freebsd windows plan9 - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "syscall" - -func init() { - datasyncFileFlag = syscall.O_SYNC -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/iterator.go b/vendor/github.com/dgraph-io/badger/v2/y/iterator.go deleted file mode 100644 index 6d0f677c..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/iterator.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "bytes" - "encoding/binary" -) - -// ValueStruct represents the value info that can be associated with a key, but also the internal -// Meta field. -type ValueStruct struct { - Meta byte - UserMeta byte - ExpiresAt uint64 - Value []byte - - Version uint64 // This field is not serialized. Only for internal usage. -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodedSize is the size of the ValueStruct when encoded -func (v *ValueStruct) EncodedSize() uint32 { - sz := len(v.Value) + 2 // meta, usermeta. - if v.ExpiresAt == 0 { - return uint32(sz + 1) - } - - enc := sizeVarint(v.ExpiresAt) - return uint32(sz + enc) -} - -// Decode uses the length of the slice to infer the length of the Value field. -func (v *ValueStruct) Decode(b []byte) { - v.Meta = b[0] - v.UserMeta = b[1] - var sz int - v.ExpiresAt, sz = binary.Uvarint(b[2:]) - v.Value = b[2+sz:] -} - -// Encode expects a slice of length at least v.EncodedSize(). -func (v *ValueStruct) Encode(b []byte) { - b[0] = v.Meta - b[1] = v.UserMeta - sz := binary.PutUvarint(b[2:], v.ExpiresAt) - copy(b[2+sz:], v.Value) -} - -// EncodeTo should be kept in sync with the Encode function above. The reason -// this function exists is to avoid creating byte arrays per key-value pair in -// table/builder.go. -func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) { - buf.WriteByte(v.Meta) - buf.WriteByte(v.UserMeta) - var enc [binary.MaxVarintLen64]byte - sz := binary.PutUvarint(enc[:], v.ExpiresAt) - buf.Write(enc[:sz]) - buf.Write(v.Value) -} - -// Iterator is an interface for a basic iterator. -type Iterator interface { - Next() - Rewind() - Seek(key []byte) - Key() []byte - Value() ValueStruct - Valid() bool - - // All iterators should be closed so that file garbage collection works. - Close() error -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/metrics.go b/vendor/github.com/dgraph-io/badger/v2/y/metrics.go deleted file mode 100644 index 742e1aea..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/metrics.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "expvar" - -var ( - // LSMSize has size of the LSM in bytes - LSMSize *expvar.Map - // VlogSize has size of the value log in bytes - VlogSize *expvar.Map - // PendingWrites tracks the number of pending writes. - PendingWrites *expvar.Map - - // These are cumulative - - // NumReads has cumulative number of reads - NumReads *expvar.Int - // NumWrites has cumulative number of writes - NumWrites *expvar.Int - // NumBytesRead has cumulative number of bytes read - NumBytesRead *expvar.Int - // NumBytesWritten has cumulative number of bytes written - NumBytesWritten *expvar.Int - // NumLSMGets is number of LMS gets - NumLSMGets *expvar.Map - // NumLSMBloomHits is number of LMS bloom hits - NumLSMBloomHits *expvar.Map - // NumGets is number of gets - NumGets *expvar.Int - // NumPuts is number of puts - NumPuts *expvar.Int - // NumBlockedPuts is number of blocked puts - NumBlockedPuts *expvar.Int - // NumMemtableGets is number of memtable gets - NumMemtableGets *expvar.Int -) - -// These variables are global and have cumulative values for all kv stores. -func init() { - NumReads = expvar.NewInt("badger_v2_disk_reads_total") - NumWrites = expvar.NewInt("badger_v2_disk_writes_total") - NumBytesRead = expvar.NewInt("badger_v2_read_bytes") - NumBytesWritten = expvar.NewInt("badger_v2_written_bytes") - NumLSMGets = expvar.NewMap("badger_v2_lsm_level_gets_total") - NumLSMBloomHits = expvar.NewMap("badger_v2_lsm_bloom_hits_total") - NumGets = expvar.NewInt("badger_v2_gets_total") - NumPuts = expvar.NewInt("badger_v2_puts_total") - NumBlockedPuts = expvar.NewInt("badger_v2_blocked_puts_total") - NumMemtableGets = expvar.NewInt("badger_v2_memtable_gets_total") - LSMSize = expvar.NewMap("badger_v2_lsm_size_bytes") - VlogSize = expvar.NewMap("badger_v2_vlog_size_bytes") - PendingWrites = expvar.NewMap("badger_v2_pending_writes_total") -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap.go deleted file mode 100644 index 4a477af3..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/mmap.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - return mmap(fd, writable, size) -} - -// Munmap unmaps a previously mapped slice. -func Munmap(b []byte) error { - return munmap(b) -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func Madvise(b []byte, readahead bool) error { - return madvise(b, readahead) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_darwin.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_darwin.go deleted file mode 100644 index 10b756ba..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_darwin.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - mtype := unix.PROT_READ - if writable { - mtype |= unix.PROT_WRITE - } - return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return unix.Munmap(b) -} - -// This is required because the unix package does not support the madvise system call on OS X. -func madvise(b []byte, readahead bool) error { - advice := unix.MADV_NORMAL - if !readahead { - advice = unix.MADV_RANDOM - } - - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), - uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - return e1 - } - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_plan9.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_plan9.go deleted file mode 100644 index 21db76bf..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_plan9.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" - "syscall" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - return nil, syscall.EPLAN9 -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return syscall.EPLAN9 -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func madvise(b []byte, readahead bool) error { - return syscall.EPLAN9 -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_unix.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_unix.go deleted file mode 100644 index 003f5972..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_unix.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build !windows,!darwin,!plan9 - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - mtype := unix.PROT_READ - if writable { - mtype |= unix.PROT_WRITE - } - return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return unix.Munmap(b) -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func madvise(b []byte, readahead bool) error { - flags := unix.MADV_NORMAL - if !readahead { - flags = unix.MADV_RANDOM - } - return unix.Madvise(b, flags) -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/mmap_windows.go b/vendor/github.com/dgraph-io/badger/v2/y/mmap_windows.go deleted file mode 100644 index b2419af9..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/mmap_windows.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build windows - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -func mmap(fd *os.File, write bool, size int64) ([]byte, error) { - protect := syscall.PAGE_READONLY - access := syscall.FILE_MAP_READ - - if write { - protect = syscall.PAGE_READWRITE - access = syscall.FILE_MAP_WRITE - } - fi, err := fd.Stat() - if err != nil { - return nil, err - } - - // In windows, we cannot mmap a file more than it's actual size. - // So truncate the file to the size of the mmap. - if fi.Size() < size { - if err := fd.Truncate(size); err != nil { - return nil, fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(size >> 32) - sizehi := uint32(size) & 0xffffffff - - handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil, - uint32(protect), sizelo, sizehi, nil) - if err != nil { - return nil, os.NewSyscallError("CreateFileMapping", err) - } - - // Create the memory map. - addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size)) - if addr == 0 { - return nil, os.NewSyscallError("MapViewOfFile", err) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil { - return nil, os.NewSyscallError("CloseHandle", err) - } - - // Slice memory layout - // Copied this snippet from golang/sys package - var sl = struct { - addr uintptr - len int - cap int - }{addr, int(size), int(size)} - - // Use unsafe to turn sl into a []byte. - data := *(*[]byte)(unsafe.Pointer(&sl)) - - return data, nil -} - -func munmap(b []byte) error { - return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0]))) -} - -func madvise(b []byte, readahead bool) error { - // Do Nothing. We don’t care about this setting on Windows - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/watermark.go b/vendor/github.com/dgraph-io/badger/v2/y/watermark.go deleted file mode 100644 index 1462cb73..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/watermark.go +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright 2016-2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "container/heap" - "context" - "sync/atomic" -) - -type uint64Heap []uint64 - -func (u uint64Heap) Len() int { return len(u) } -func (u uint64Heap) Less(i, j int) bool { return u[i] < u[j] } -func (u uint64Heap) Swap(i, j int) { u[i], u[j] = u[j], u[i] } -func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) } -func (u *uint64Heap) Pop() interface{} { - old := *u - n := len(old) - x := old[n-1] - *u = old[0 : n-1] - return x -} - -// mark contains one of more indices, along with a done boolean to indicate the -// status of the index: begin or done. It also contains waiters, who could be -// waiting for the watermark to reach >= a certain index. -type mark struct { - // Either this is an (index, waiter) pair or (index, done) or (indices, done). - index uint64 - waiter chan struct{} - indices []uint64 - done bool // Set to true if the index is done. -} - -// WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes -// finished or "done" according to a WaterMark once Done(k) has been called -// 1. as many times as Begin(k) has, AND -// 2. a positive number of times. -// -// An index may also become "done" by calling SetDoneUntil at a time such that it is not -// inter-mingled with Begin/Done calls. -// -// Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they -// are 64-bit aligned by putting them at the beginning of the structure. -type WaterMark struct { - doneUntil uint64 - lastIndex uint64 - Name string - markCh chan mark -} - -// Init initializes a WaterMark struct. MUST be called before using it. -func (w *WaterMark) Init(closer *Closer) { - w.markCh = make(chan mark, 100) - go w.process(closer) -} - -// Begin sets the last index to the given value. -func (w *WaterMark) Begin(index uint64) { - atomic.StoreUint64(&w.lastIndex, index) - w.markCh <- mark{index: index, done: false} -} - -// BeginMany works like Begin but accepts multiple indices. -func (w *WaterMark) BeginMany(indices []uint64) { - atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1]) - w.markCh <- mark{index: 0, indices: indices, done: false} -} - -// Done sets a single index as done. -func (w *WaterMark) Done(index uint64) { - w.markCh <- mark{index: index, done: true} -} - -// DoneMany works like Done but accepts multiple indices. -func (w *WaterMark) DoneMany(indices []uint64) { - w.markCh <- mark{index: 0, indices: indices, done: true} -} - -// DoneUntil returns the maximum index that has the property that all indices -// less than or equal to it are done. -func (w *WaterMark) DoneUntil() uint64 { - return atomic.LoadUint64(&w.doneUntil) -} - -// SetDoneUntil sets the maximum index that has the property that all indices -// less than or equal to it are done. -func (w *WaterMark) SetDoneUntil(val uint64) { - atomic.StoreUint64(&w.doneUntil, val) -} - -// LastIndex returns the last index for which Begin has been called. -func (w *WaterMark) LastIndex() uint64 { - return atomic.LoadUint64(&w.lastIndex) -} - -// WaitForMark waits until the given index is marked as done. -func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error { - if w.DoneUntil() >= index { - return nil - } - waitCh := make(chan struct{}) - w.markCh <- mark{index: index, waiter: waitCh} - - select { - case <-ctx.Done(): - return ctx.Err() - case <-waitCh: - return nil - } -} - -// process is used to process the Mark channel. This is not thread-safe, -// so only run one goroutine for process. One is sufficient, because -// all goroutine ops use purely memory and cpu. -// Each index has to emit atleast one begin watermark in serial order otherwise waiters -// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101, -// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it -// can't decide whether the task at 101 has decided not to emit watermark or it didn't get -// scheduled yet. -func (w *WaterMark) process(closer *Closer) { - defer closer.Done() - - var indices uint64Heap - // pending maps raft proposal index to the number of pending mutations for this proposal. - pending := make(map[uint64]int) - waiters := make(map[uint64][]chan struct{}) - - heap.Init(&indices) - - processOne := func(index uint64, done bool) { - // If not already done, then set. Otherwise, don't undo a done entry. - prev, present := pending[index] - if !present { - heap.Push(&indices, index) - } - - delta := 1 - if done { - delta = -1 - } - pending[index] = prev + delta - - // Update mark by going through all indices in order; and checking if they have - // been done. Stop at the first index, which isn't done. - doneUntil := w.DoneUntil() - if doneUntil > index { - AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index) - } - - until := doneUntil - loops := 0 - - for len(indices) > 0 { - min := indices[0] - if done := pending[min]; done > 0 { - break // len(indices) will be > 0. - } - // Even if done is called multiple times causing it to become - // negative, we should still pop the index. - heap.Pop(&indices) - delete(pending, min) - until = min - loops++ - } - - if until != doneUntil { - AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until)) - } - - notifyAndRemove := func(idx uint64, toNotify []chan struct{}) { - for _, ch := range toNotify { - close(ch) - } - delete(waiters, idx) // Release the memory back. - } - - if until-doneUntil <= uint64(len(waiters)) { - // Issue #908 showed that if doneUntil is close to 2^60, while until is zero, this loop - // can hog up CPU just iterating over integers creating a busy-wait loop. So, only do - // this path if until - doneUntil is less than the number of waiters. - for idx := doneUntil + 1; idx <= until; idx++ { - if toNotify, ok := waiters[idx]; ok { - notifyAndRemove(idx, toNotify) - } - } - } else { - for idx, toNotify := range waiters { - if idx <= until { - notifyAndRemove(idx, toNotify) - } - } - } // end of notifying waiters. - } - - for { - select { - case <-closer.HasBeenClosed(): - return - case mark := <-w.markCh: - if mark.waiter != nil { - doneUntil := atomic.LoadUint64(&w.doneUntil) - if doneUntil >= mark.index { - close(mark.waiter) - } else { - ws, ok := waiters[mark.index] - if !ok { - waiters[mark.index] = []chan struct{}{mark.waiter} - } else { - waiters[mark.index] = append(ws, mark.waiter) - } - } - } else { - if mark.index > 0 { - processOne(mark.index, mark.done) - } - for _, index := range mark.indices { - processOne(index, mark.done) - } - } - } - } -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/y.go b/vendor/github.com/dgraph-io/badger/v2/y/y.go deleted file mode 100644 index 554a413e..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/y.go +++ /dev/null @@ -1,516 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "io" - "math" - "os" - "reflect" - "sync" - "time" - "unsafe" - - "github.com/pkg/errors" -) - -var ( - // ErrEOF indicates an end of file when trying to read from a memory mapped file - // and encountering the end of slice. - ErrEOF = errors.New("End of mapped region") -) - -const ( - // Sync indicates that O_DSYNC should be set on the underlying file, - // ensuring that data writes do not return until the data is flushed - // to disk. - Sync = 1 << iota - // ReadOnly opens the underlying file on a read-only basis. - ReadOnly -) - -var ( - // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go - datasyncFileFlag = 0x0 - - // CastagnoliCrcTable is a CRC32 polynomial table - CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli) - - // Dummy channel for nil closers. - dummyCloserChan = make(chan struct{}) -) - -// OpenExistingFile opens an existing file, errors if it doesn't exist. -func OpenExistingFile(filename string, flags uint32) (*os.File, error) { - openFlags := os.O_RDWR - if flags&ReadOnly != 0 { - openFlags = os.O_RDONLY - } - - if flags&Sync != 0 { - openFlags |= datasyncFileFlag - } - return os.OpenFile(filename, openFlags, 0) -} - -// CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed. -func CreateSyncedFile(filename string, sync bool) (*os.File, error) { - flags := os.O_RDWR | os.O_CREATE | os.O_EXCL - if sync { - flags |= datasyncFileFlag - } - return os.OpenFile(filename, flags, 0600) -} - -// OpenSyncedFile creates the file if one doesn't exist. -func OpenSyncedFile(filename string, sync bool) (*os.File, error) { - flags := os.O_RDWR | os.O_CREATE - if sync { - flags |= datasyncFileFlag - } - return os.OpenFile(filename, flags, 0600) -} - -// OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC -func OpenTruncFile(filename string, sync bool) (*os.File, error) { - flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC - if sync { - flags |= datasyncFileFlag - } - return os.OpenFile(filename, flags, 0600) -} - -// SafeCopy does append(a[:0], src...). -func SafeCopy(a, src []byte) []byte { - return append(a[:0], src...) -} - -// Copy copies a byte slice and returns the copied slice. -func Copy(a []byte) []byte { - b := make([]byte, len(a)) - copy(b, a) - return b -} - -// KeyWithTs generates a new key by appending ts to key. -func KeyWithTs(key []byte, ts uint64) []byte { - out := make([]byte, len(key)+8) - copy(out, key) - binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts) - return out -} - -// ParseTs parses the timestamp from the key bytes. -func ParseTs(key []byte) uint64 { - if len(key) <= 8 { - return 0 - } - return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:]) -} - -// CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs -// is same. -// a would be sorted higher than aa if we use bytes.compare -// All keys should have timestamp. -func CompareKeys(key1, key2 []byte) int { - if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 { - return cmp - } - return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:]) -} - -// ParseKey parses the actual key from the key bytes. -func ParseKey(key []byte) []byte { - if key == nil { - return nil - } - - return key[:len(key)-8] -} - -// SameKey checks for key equality ignoring the version timestamp suffix. -func SameKey(src, dst []byte) bool { - if len(src) != len(dst) { - return false - } - return bytes.Equal(ParseKey(src), ParseKey(dst)) -} - -// Slice holds a reusable buf, will reallocate if you request a larger size than ever before. -// One problem is with n distinct sizes in random order it'll reallocate log(n) times. -type Slice struct { - buf []byte -} - -// Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of -// length sz. -func (s *Slice) Resize(sz int) []byte { - if cap(s.buf) < sz { - s.buf = make([]byte, sz) - } - return s.buf[0:sz] -} - -// FixedDuration returns a string representation of the given duration with the -// hours, minutes, and seconds. -func FixedDuration(d time.Duration) string { - str := fmt.Sprintf("%02ds", int(d.Seconds())%60) - if d >= time.Minute { - str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str - } - if d >= time.Hour { - str = fmt.Sprintf("%02dh", int(d.Hours())) + str - } - return str -} - -// Closer holds the two things we need to close a goroutine and wait for it to finish: a chan -// to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting -// down. -type Closer struct { - closed chan struct{} - waiting sync.WaitGroup - closeOnce sync.Once -} - -// NewCloser constructs a new Closer, with an initial count on the WaitGroup. -func NewCloser(initial int) *Closer { - ret := &Closer{closed: make(chan struct{})} - ret.waiting.Add(initial) - return ret -} - -// AddRunning Add()'s delta to the WaitGroup. -func (lc *Closer) AddRunning(delta int) { - lc.waiting.Add(delta) -} - -// Signal signals the HasBeenClosed signal. -func (lc *Closer) Signal() { - // Todo(ibrahim): Change Signal to return error on next badger breaking change. - lc.closeOnce.Do(func() { - close(lc.closed) - }) -} - -// HasBeenClosed gets signaled when Signal() is called. -func (lc *Closer) HasBeenClosed() <-chan struct{} { - if lc == nil { - return dummyCloserChan - } - return lc.closed -} - -// Done calls Done() on the WaitGroup. -func (lc *Closer) Done() { - if lc == nil { - return - } - lc.waiting.Done() -} - -// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done -// calls to balance out.) -func (lc *Closer) Wait() { - lc.waiting.Wait() -} - -// SignalAndWait calls Signal(), then Wait(). -func (lc *Closer) SignalAndWait() { - lc.Signal() - lc.Wait() -} - -// Throttle allows a limited number of workers to run at a time. It also -// provides a mechanism to check for errors encountered by workers and wait for -// them to finish. -type Throttle struct { - once sync.Once - wg sync.WaitGroup - ch chan struct{} - errCh chan error - finishErr error -} - -// NewThrottle creates a new throttle with a max number of workers. -func NewThrottle(max int) *Throttle { - return &Throttle{ - ch: make(chan struct{}, max), - errCh: make(chan error, max), - } -} - -// Do should be called by workers before they start working. It blocks if there -// are already maximum number of workers working. If it detects an error from -// previously Done workers, it would return it. -func (t *Throttle) Do() error { - for { - select { - case t.ch <- struct{}{}: - t.wg.Add(1) - return nil - case err := <-t.errCh: - if err != nil { - return err - } - } - } -} - -// Done should be called by workers when they finish working. They can also -// pass the error status of work done. -func (t *Throttle) Done(err error) { - if err != nil { - t.errCh <- err - } - select { - case <-t.ch: - default: - panic("Throttle Do Done mismatch") - } - t.wg.Done() -} - -// Finish waits until all workers have finished working. It would return any error passed by Done. -// If Finish is called multiple time, it will wait for workers to finish only once(first time). -// From next calls, it will return same error as found on first call. -func (t *Throttle) Finish() error { - t.once.Do(func() { - t.wg.Wait() - close(t.ch) - close(t.errCh) - for err := range t.errCh { - if err != nil { - t.finishErr = err - return - } - } - }) - - return t.finishErr -} - -// U32ToBytes converts the given Uint32 to bytes -func U32ToBytes(v uint32) []byte { - var uBuf [4]byte - binary.BigEndian.PutUint32(uBuf[:], v) - return uBuf[:] -} - -// BytesToU32 converts the given byte slice to uint32 -func BytesToU32(b []byte) uint32 { - return binary.BigEndian.Uint32(b) -} - -// U32SliceToBytes converts the given Uint32 slice to byte slice -func U32SliceToBytes(u32s []uint32) []byte { - if len(u32s) == 0 { - return nil - } - var b []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - hdr.Len = len(u32s) * 4 - hdr.Cap = hdr.Len - hdr.Data = uintptr(unsafe.Pointer(&u32s[0])) - return b -} - -// BytesToU32Slice converts the given byte slice to uint32 slice -func BytesToU32Slice(b []byte) []uint32 { - if len(b) == 0 { - return nil - } - var u32s []uint32 - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u32s)) - hdr.Len = len(b) / 4 - hdr.Cap = hdr.Len - hdr.Data = uintptr(unsafe.Pointer(&b[0])) - return u32s -} - -// page struct contains one underlying buffer. -type page struct { - buf []byte -} - -// PageBuffer consists of many pages. A page is a wrapper over []byte. PageBuffer can act as a -// replacement of bytes.Buffer. Instead of having single underlying buffer, it has multiple -// underlying buffers. Hence it avoids any copy during relocation(as happens in bytes.Buffer). -// PageBuffer allocates memory in pages. Once a page is full, it will allocate page with double the -// size of previous page. Its function are not thread safe. -type PageBuffer struct { - pages []*page - - length int // Length of PageBuffer. - nextPageSize int // Size of next page to be allocated. -} - -// NewPageBuffer returns a new PageBuffer with first page having size pageSize. -func NewPageBuffer(pageSize int) *PageBuffer { - b := &PageBuffer{} - b.pages = append(b.pages, &page{buf: make([]byte, 0, pageSize)}) - b.nextPageSize = pageSize * 2 - return b -} - -// Write writes data to PageBuffer b. It returns number of bytes written and any error encountered. -func (b *PageBuffer) Write(data []byte) (int, error) { - dataLen := len(data) - for { - cp := b.pages[len(b.pages)-1] // Current page. - - n := copy(cp.buf[len(cp.buf):cap(cp.buf)], data) - cp.buf = cp.buf[:len(cp.buf)+n] - b.length += n - - if len(data) == n { - break - } - data = data[n:] - - b.pages = append(b.pages, &page{buf: make([]byte, 0, b.nextPageSize)}) - b.nextPageSize *= 2 - } - - return dataLen, nil -} - -// WriteByte writes data byte to PageBuffer and returns any encountered error. -func (b *PageBuffer) WriteByte(data byte) error { - _, err := b.Write([]byte{data}) - return err -} - -// Len returns length of PageBuffer. -func (b *PageBuffer) Len() int { - return b.length -} - -// pageForOffset returns pageIdx and startIdx for the offset. -func (b *PageBuffer) pageForOffset(offset int) (int, int) { - AssertTrue(offset < b.length) - - var pageIdx, startIdx, sizeNow int - for i := 0; i < len(b.pages); i++ { - cp := b.pages[i] - - if sizeNow+len(cp.buf)-1 < offset { - sizeNow += len(cp.buf) - } else { - pageIdx = i - startIdx = offset - sizeNow - break - } - } - - return pageIdx, startIdx -} - -// Truncate truncates PageBuffer to length n. -func (b *PageBuffer) Truncate(n int) { - pageIdx, startIdx := b.pageForOffset(n) - // For simplicity of the code reject extra pages. These pages can be kept. - b.pages = b.pages[:pageIdx+1] - cp := b.pages[len(b.pages)-1] - cp.buf = cp.buf[:startIdx] - b.length = n -} - -// Bytes returns whole Buffer data as single []byte. -func (b *PageBuffer) Bytes() []byte { - buf := make([]byte, b.length) - written := 0 - for i := 0; i < len(b.pages); i++ { - written += copy(buf[written:], b.pages[i].buf) - } - - return buf -} - -// WriteTo writes whole buffer to w. It returns number of bytes written and any error encountered. -func (b *PageBuffer) WriteTo(w io.Writer) (int64, error) { - written := int64(0) - for i := 0; i < len(b.pages); i++ { - n, err := w.Write(b.pages[i].buf) - written += int64(n) - if err != nil { - return written, err - } - } - - return written, nil -} - -// NewReaderAt returns a reader which starts reading from offset in page buffer. -func (b *PageBuffer) NewReaderAt(offset int) *PageBufferReader { - pageIdx, startIdx := b.pageForOffset(offset) - - return &PageBufferReader{ - buf: b, - pageIdx: pageIdx, - startIdx: startIdx, - } -} - -// PageBufferReader is a reader for PageBuffer. -type PageBufferReader struct { - buf *PageBuffer // Underlying page buffer. - pageIdx int // Idx of page from where it will start reading. - startIdx int // Idx inside page - buf.pages[pageIdx] from where it will start reading. -} - -// Read reads upto len(p) bytes. It returns number of bytes read and any error encountered. -func (r *PageBufferReader) Read(p []byte) (int, error) { - // Check if there is enough to Read. - pc := len(r.buf.pages) - - read := 0 - for r.pageIdx < pc && read < len(p) { - cp := r.buf.pages[r.pageIdx] // Current Page. - endIdx := len(cp.buf) // Last Idx up to which we can read from this page. - - n := copy(p[read:], cp.buf[r.startIdx:endIdx]) - read += n - r.startIdx += n - - // Instead of len(cp.buf), we comparing with cap(cp.buf). This ensures that we move to next - // page only when we have read all data. Reading from last page is an edge case. We don't - // want to move to next page until last page is full to its capacity. - if r.startIdx >= cap(cp.buf) { - // We should move to next page. - r.pageIdx++ - r.startIdx = 0 - continue - } - - // When last page in not full to its capacity and we have read all data up to its - // length, just break out of the loop. - if r.pageIdx == pc-1 { - break - } - } - - if read == 0 { - return read, io.EOF - } - - return read, nil -} diff --git a/vendor/github.com/dgraph-io/badger/v2/y/zstd.go b/vendor/github.com/dgraph-io/badger/v2/y/zstd.go deleted file mode 100644 index 57018680..00000000 --- a/vendor/github.com/dgraph-io/badger/v2/y/zstd.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "sync" - - "github.com/klauspost/compress/zstd" -) - -var ( - decoder *zstd.Decoder - encoder *zstd.Encoder - - encOnce, decOnce sync.Once -) - -// ZSTDDecompress decompresses a block using ZSTD algorithm. -func ZSTDDecompress(dst, src []byte) ([]byte, error) { - decOnce.Do(func() { - var err error - decoder, err = zstd.NewReader(nil) - Check(err) - }) - return decoder.DecodeAll(src, dst[:0]) -} - -// ZSTDCompress compresses a block using ZSTD algorithm. -func ZSTDCompress(dst, src []byte, compressionLevel int) ([]byte, error) { - encOnce.Do(func() { - var err error - level := zstd.EncoderLevelFromZstd(compressionLevel) - encoder, err = zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) - Check(err) - }) - return encoder.EncodeAll(src, dst[:0]), nil -} - -// ZSTDCompressBound returns the worst case size needed for a destination buffer. -// Klauspost ZSTD library does not provide any API for Compression Bound. This -// calculation is based on the DataDog ZSTD library. -// See https://pkg.go.dev/github.com/DataDog/zstd#CompressBound -func ZSTDCompressBound(srcSize int) int { - lowLimit := 128 << 10 // 128 kB - var margin int - if srcSize < lowLimit { - margin = (lowLimit - srcSize) >> 11 - } - return srcSize + (srcSize >> 8) + margin -} diff --git a/vendor/github.com/dgraph-io/badger/value.go b/vendor/github.com/dgraph-io/badger/value.go deleted file mode 100644 index 53d60e0a..00000000 --- a/vendor/github.com/dgraph-io/badger/value.go +++ /dev/null @@ -1,1661 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package badger - -import ( - "bufio" - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "hash/crc32" - "io" - "io/ioutil" - "math" - "math/rand" - "os" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/badger/options" - "github.com/dgraph-io/badger/y" - "github.com/pkg/errors" - "golang.org/x/net/trace" -) - -// Values have their first byte being byteData or byteDelete. This helps us distinguish between -// a key that has never been seen and a key that has been explicitly deleted. -const ( - bitDelete byte = 1 << 0 // Set if the key has been deleted. - bitValuePointer byte = 1 << 1 // Set if the value is NOT stored directly next to key. - bitDiscardEarlierVersions byte = 1 << 2 // Set if earlier versions can be discarded. - // Set if item shouldn't be discarded via compactions (used by merge operator) - bitMergeEntry byte = 1 << 3 - // The MSB 2 bits are for transactions. - bitTxn byte = 1 << 6 // Set if the entry is part of a txn. - bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log. - - mi int64 = 1 << 20 - - // The number of updates after which discard map should be flushed into badger. - discardStatsFlushThreshold = 100 -) - -type logFile struct { - path string - // This is a lock on the log file. It guards the fd’s value, the file’s - // existence and the file’s memory map. - // - // Use shared ownership when reading/writing the file or memory map, use - // exclusive ownership to open/close the descriptor, unmap or remove the file. - lock sync.RWMutex - fd *os.File - fid uint32 - fmap []byte - size uint32 - loadingMode options.FileLoadingMode -} - -func (lf *logFile) mmap(size int64) (err error) { - if lf.loadingMode != options.MemoryMap { - // Nothing to do - return nil - } - lf.fmap, err = y.Mmap(lf.fd, false, size) - if err == nil { - err = y.Madvise(lf.fmap, false) // Disable readahead - } - return err -} - -func (lf *logFile) munmap() (err error) { - if lf.loadingMode != options.MemoryMap || len(lf.fmap) == 0 { - // Nothing to do - return nil - } - - if err := y.Munmap(lf.fmap); err != nil { - return errors.Wrapf(err, "Unable to munmap value log: %q", lf.path) - } - // This is important. We should set the map to nil because ummap - // system call doesn't change the length or capacity of the fmap slice. - lf.fmap = nil - return nil -} - -// Acquire lock on mmap/file if you are calling this -func (lf *logFile) read(p valuePointer, s *y.Slice) (buf []byte, err error) { - var nbr int64 - offset := p.Offset - if lf.loadingMode == options.FileIO { - buf = s.Resize(int(p.Len)) - var n int - n, err = lf.fd.ReadAt(buf, int64(offset)) - nbr = int64(n) - } else { - // Do not convert size to uint32, because the lf.fmap can be of size - // 4GB, which overflows the uint32 during conversion to make the size 0, - // causing the read to fail with ErrEOF. See issue #585. - size := int64(len(lf.fmap)) - valsz := p.Len - lfsz := atomic.LoadUint32(&lf.size) - if int64(offset) >= size || int64(offset+valsz) > size || - // Ensure that the read is within the file's actual size. It might be possible that - // the offset+valsz length is beyond the file's actual size. This could happen when - // dropAll and iterations are running simultaneously. - int64(offset+valsz) > int64(lfsz) { - err = y.ErrEOF - } else { - buf = lf.fmap[offset : offset+valsz] - nbr = int64(valsz) - } - } - y.NumReads.Add(1) - y.NumBytesRead.Add(nbr) - return buf, err -} - -func (lf *logFile) doneWriting(offset uint32) error { - // Sync before acquiring lock. (We call this from write() and thus know we have shared access - // to the fd.) - if err := y.FileSync(lf.fd); err != nil { - return errors.Wrapf(err, "Unable to sync value log: %q", lf.path) - } - - // Before we were acquiring a lock here on lf.lock, because we were invalidating the file - // descriptor due to reopening it as read-only. Now, we don't invalidate the fd, but unmap it, - // truncate it and remap it. That creates a window where we have segfaults because the mmap is - // no longer valid, while someone might be reading it. Therefore, we need a lock here again. - lf.lock.Lock() - defer lf.lock.Unlock() - - // Unmap file before we truncate it. Windows cannot truncate a file that is mmapped. - if err := lf.munmap(); err != nil { - return errors.Wrapf(err, "failed to munmap vlog file %s", lf.fd.Name()) - } - - // TODO: Confirm if we need to run a file sync after truncation. - // Truncation must run after unmapping, otherwise Windows would crap itself. - if err := lf.fd.Truncate(int64(offset)); err != nil { - return errors.Wrapf(err, "Unable to truncate file: %q", lf.path) - } - - fstat, err := lf.fd.Stat() - if err != nil { - return errors.Wrapf(err, "Unable to check stat for %q", lf.path) - } - sz := fstat.Size() - if sz == 0 { - // File is empty. We don't need to mmap it. Return. - return nil - } - y.AssertTrue(sz <= math.MaxUint32) - lf.size = uint32(sz) - if err = lf.mmap(sz); err != nil { - _ = lf.fd.Close() - return errors.Wrapf(err, "Unable to map file: %q", fstat.Name()) - } - // Previously we used to close the file after it was written and reopen it in read-only mode. - // We no longer open files in read-only mode. We keep all vlog files open in read-write mode. - return nil -} - -// You must hold lf.lock to sync() -func (lf *logFile) sync() error { - return y.FileSync(lf.fd) -} - -var errStop = errors.New("Stop iteration") -var errTruncate = errors.New("Do truncate") -var errDeleteVlogFile = errors.New("Delete vlog file") - -type logEntry func(e Entry, vp valuePointer) error - -type safeRead struct { - k []byte - v []byte - - recordOffset uint32 -} - -func (r *safeRead) Entry(reader *bufio.Reader) (*Entry, error) { - var hbuf [headerBufSize]byte - var err error - - hash := crc32.New(y.CastagnoliCrcTable) - tee := io.TeeReader(reader, hash) - if _, err = io.ReadFull(tee, hbuf[:]); err != nil { - return nil, err - } - - var h header - h.Decode(hbuf[:]) - if h.klen > uint32(1<<16) { // Key length must be below uint16. - return nil, errTruncate - } - kl := int(h.klen) - if cap(r.k) < kl { - r.k = make([]byte, 2*kl) - } - vl := int(h.vlen) - if cap(r.v) < vl { - r.v = make([]byte, 2*vl) - } - - e := &Entry{} - e.offset = r.recordOffset - e.Key = r.k[:kl] - e.Value = r.v[:vl] - - if _, err = io.ReadFull(tee, e.Key); err != nil { - if err == io.EOF { - err = errTruncate - } - return nil, err - } - if _, err = io.ReadFull(tee, e.Value); err != nil { - if err == io.EOF { - err = errTruncate - } - return nil, err - } - var crcBuf [4]byte - if _, err = io.ReadFull(reader, crcBuf[:]); err != nil { - if err == io.EOF { - err = errTruncate - } - return nil, err - } - crc := binary.BigEndian.Uint32(crcBuf[:]) - if crc != hash.Sum32() { - return nil, errTruncate - } - e.meta = h.meta - e.UserMeta = h.userMeta - e.ExpiresAt = h.expiresAt - return e, nil -} - -// iterate iterates over log file. It doesn't not allocate new memory for every kv pair. -// Therefore, the kv pair is only valid for the duration of fn call. -func (vlog *valueLog) iterate(lf *logFile, offset uint32, fn logEntry) (uint32, error) { - fi, err := lf.fd.Stat() - if err != nil { - return 0, err - } - if int64(offset) == fi.Size() { - // We're at the end of the file already. No need to do anything. - return offset, nil - } - if vlog.opt.ReadOnly { - // We're not at the end of the file. We'd need to replay the entries, or - // possibly truncate the file. - return 0, ErrReplayNeeded - } - if int64(offset) > fi.Size() { - // Return 0 which would truncate the entire file. This was the original behavior before - // commit 7539f0a:Fix windows dataloss issue (#1134) was merged. - return 0, nil - } - // We're not at the end of the file. Let's Seek to the offset and start reading. - if _, err := lf.fd.Seek(int64(offset), io.SeekStart); err != nil { - return 0, errFile(err, lf.path, "Unable to seek") - } - - reader := bufio.NewReader(lf.fd) - read := &safeRead{ - k: make([]byte, 10), - v: make([]byte, 10), - recordOffset: offset, - } - - var lastCommit uint64 - var validEndOffset uint32 = offset - for { - e, err := read.Entry(reader) - if err == io.EOF { - break - } else if err == io.ErrUnexpectedEOF || err == errTruncate { - break - } else if err != nil { - return 0, err - } else if e == nil { - continue - } - - var vp valuePointer - vp.Len = uint32(headerBufSize + len(e.Key) + len(e.Value) + crc32.Size) - read.recordOffset += vp.Len - - vp.Offset = e.offset - vp.Fid = lf.fid - - if e.meta&bitTxn > 0 { - txnTs := y.ParseTs(e.Key) - if lastCommit == 0 { - lastCommit = txnTs - } - if lastCommit != txnTs { - break - } - - } else if e.meta&bitFinTxn > 0 { - txnTs, err := strconv.ParseUint(string(e.Value), 10, 64) - if err != nil || lastCommit != txnTs { - break - } - // Got the end of txn. Now we can store them. - lastCommit = 0 - validEndOffset = read.recordOffset - - } else { - if lastCommit != 0 { - // This is most likely an entry which was moved as part of GC. - // We shouldn't get this entry in the middle of a transaction. - break - } - validEndOffset = read.recordOffset - } - - if err := fn(*e, vp); err != nil { - if err == errStop { - break - } - return 0, errFile(err, lf.path, "Iteration function") - } - } - return validEndOffset, nil -} - -func (vlog *valueLog) rewrite(f *logFile, tr trace.Trace) error { - maxFid := atomic.LoadUint32(&vlog.maxFid) - y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid) - tr.LazyPrintf("Rewriting fid: %d", f.fid) - - wb := make([]*Entry, 0, 1000) - var size int64 - - y.AssertTrue(vlog.db != nil) - var count, moved int - fe := func(e Entry) error { - count++ - if count%100000 == 0 { - tr.LazyPrintf("Processing entry %d", count) - } - - vs, err := vlog.db.get(e.Key) - if err != nil { - return err - } - if discardEntry(e, vs, vlog.db) { - return nil - } - - // Value is still present in value log. - if len(vs.Value) == 0 { - return errors.Errorf("Empty value: %+v", vs) - } - var vp valuePointer - vp.Decode(vs.Value) - - // If the entry found from the LSM Tree points to a newer vlog file, don't do anything. - if vp.Fid > f.fid { - return nil - } - // If the entry found from the LSM Tree points to an offset greater than the one - // read from vlog, don't do anything. - if vp.Offset > e.offset { - return nil - } - // If the entry read from LSM Tree and vlog file point to the same vlog file and offset, - // insert them back into the DB. - // NOTE: It might be possible that the entry read from the LSM Tree points to - // an older vlog file. See the comments in the else part. - if vp.Fid == f.fid && vp.Offset == e.offset { - moved++ - // This new entry only contains the key, and a pointer to the value. - ne := new(Entry) - ne.meta = 0 // Remove all bits. Different keyspace doesn't need these bits. - ne.UserMeta = e.UserMeta - ne.ExpiresAt = e.ExpiresAt - - // Create a new key in a separate keyspace, prefixed by moveKey. We are not - // allowed to rewrite an older version of key in the LSM tree, because then this older - // version would be at the top of the LSM tree. To work correctly, reads expect the - // latest versions to be at the top, and the older versions at the bottom. - if bytes.HasPrefix(e.Key, badgerMove) { - ne.Key = append([]byte{}, e.Key...) - } else { - ne.Key = make([]byte, len(badgerMove)+len(e.Key)) - n := copy(ne.Key, badgerMove) - copy(ne.Key[n:], e.Key) - } - - ne.Value = append([]byte{}, e.Value...) - es := int64(ne.estimateSize(vlog.opt.ValueThreshold)) - // Consider size of value as well while considering the total size - // of the batch. There have been reports of high memory usage in - // rewrite because we don't consider the value size. See #1292. - es += int64(len(e.Value)) - - // Ensure length and size of wb is within transaction limits. - if int64(len(wb)+1) >= vlog.opt.maxBatchCount || - size+es >= vlog.opt.maxBatchSize { - tr.LazyPrintf("request has %d entries, size %d", len(wb), size) - if err := vlog.db.batchSet(wb); err != nil { - return err - } - size = 0 - wb = wb[:0] - } - wb = append(wb, ne) - size += es - } else { - // It might be possible that the entry read from LSM Tree points to an older vlog file. - // This can happen in the following situation. Assume DB is opened with - // numberOfVersionsToKeep=1 - // - // Now, if we have ONLY one key in the system "FOO" which has been updated 3 times and - // the same key has been garbage collected 3 times, we'll have 3 versions of the movekey - // for the same key "FOO". - // NOTE: moveKeyi is the moveKey with version i - // Assume we have 3 move keys in L0. - // - moveKey1 (points to vlog file 10), - // - moveKey2 (points to vlog file 14) and - // - moveKey3 (points to vlog file 15). - - // Also, assume there is another move key "moveKey1" (points to vlog file 6) (this is - // also a move Key for key "FOO" ) on upper levels (let's say 3). The move key - // "moveKey1" on level 0 was inserted because vlog file 6 was GCed. - // - // Here's what the arrangement looks like - // L0 => (moveKey1 => vlog10), (moveKey2 => vlog14), (moveKey3 => vlog15) - // L1 => .... - // L2 => .... - // L3 => (moveKey1 => vlog6) - // - // When L0 compaction runs, it keeps only moveKey3 because the number of versions - // to keep is set to 1. (we've dropped moveKey1's latest version) - // - // The new arrangement of keys is - // L0 => .... - // L1 => (moveKey3 => vlog15) - // L2 => .... - // L3 => (moveKey1 => vlog6) - // - // Now if we try to GC vlog file 10, the entry read from vlog file will point to vlog10 - // but the entry read from LSM Tree will point to vlog6. The move key read from LSM tree - // will point to vlog6 because we've asked for version 1 of the move key. - // - // This might seem like an issue but it's not really an issue because the user has set - // the number of versions to keep to 1 and the latest version of moveKey points to the - // correct vlog file and offset. The stale move key on L3 will be eventually dropped by - // compaction because there is a newer versions in the upper levels. - } - return nil - } - - _, err := vlog.iterate(f, 0, func(e Entry, vp valuePointer) error { - return fe(e) - }) - if err != nil { - return err - } - - tr.LazyPrintf("request has %d entries, size %d", len(wb), size) - batchSize := 1024 - var loops int - for i := 0; i < len(wb); { - loops++ - if batchSize == 0 { - vlog.db.opt.Warningf("We shouldn't reach batch size of zero.") - return ErrNoRewrite - } - end := i + batchSize - if end > len(wb) { - end = len(wb) - } - if err := vlog.db.batchSet(wb[i:end]); err != nil { - if err == ErrTxnTooBig { - // Decrease the batch size to half. - batchSize = batchSize / 2 - tr.LazyPrintf("Dropped batch size to %d", batchSize) - continue - } - return err - } - i += batchSize - } - tr.LazyPrintf("Processed %d entries in %d loops", len(wb), loops) - tr.LazyPrintf("Total entries: %d. Moved: %d", count, moved) - tr.LazyPrintf("Removing fid: %d", f.fid) - var deleteFileNow bool - // Entries written to LSM. Remove the older file now. - { - vlog.filesLock.Lock() - // Just a sanity-check. - if _, ok := vlog.filesMap[f.fid]; !ok { - vlog.filesLock.Unlock() - return errors.Errorf("Unable to find fid: %d", f.fid) - } - if vlog.iteratorCount() == 0 { - delete(vlog.filesMap, f.fid) - deleteFileNow = true - } else { - vlog.filesToBeDeleted = append(vlog.filesToBeDeleted, f.fid) - } - vlog.filesLock.Unlock() - } - - if deleteFileNow { - if err := vlog.deleteLogFile(f); err != nil { - return err - } - } - - return nil -} - -func (vlog *valueLog) deleteMoveKeysFor(fid uint32, tr trace.Trace) error { - db := vlog.db - var result []*Entry - var count, pointers uint64 - tr.LazyPrintf("Iterating over move keys to find invalids for fid: %d", fid) - err := db.View(func(txn *Txn) error { - opt := DefaultIteratorOptions - opt.InternalAccess = true - opt.PrefetchValues = false - itr := txn.NewIterator(opt) - defer itr.Close() - - for itr.Seek(badgerMove); itr.ValidForPrefix(badgerMove); itr.Next() { - count++ - item := itr.Item() - if item.meta&bitValuePointer == 0 { - continue - } - pointers++ - var vp valuePointer - vp.Decode(item.vptr) - if vp.Fid == fid { - e := &Entry{Key: y.KeyWithTs(item.Key(), item.Version()), meta: bitDelete} - result = append(result, e) - } - } - return nil - }) - if err != nil { - tr.LazyPrintf("Got error while iterating move keys: %v", err) - tr.SetError() - return err - } - tr.LazyPrintf("Num total move keys: %d. Num pointers: %d", count, pointers) - tr.LazyPrintf("Number of invalid move keys found: %d", len(result)) - batchSize := 10240 - for i := 0; i < len(result); { - end := i + batchSize - if end > len(result) { - end = len(result) - } - if err := db.batchSet(result[i:end]); err != nil { - if err == ErrTxnTooBig { - batchSize /= 2 - tr.LazyPrintf("Dropped batch size to %d", batchSize) - continue - } - tr.LazyPrintf("Error while doing batchSet: %v", err) - tr.SetError() - return err - } - i += batchSize - } - tr.LazyPrintf("Move keys deletion done.") - return nil -} - -func (vlog *valueLog) incrIteratorCount() { - atomic.AddInt32(&vlog.numActiveIterators, 1) -} - -func (vlog *valueLog) iteratorCount() int { - return int(atomic.LoadInt32(&vlog.numActiveIterators)) -} - -func (vlog *valueLog) decrIteratorCount() error { - num := atomic.AddInt32(&vlog.numActiveIterators, -1) - if num != 0 { - return nil - } - - vlog.filesLock.Lock() - lfs := make([]*logFile, 0, len(vlog.filesToBeDeleted)) - for _, id := range vlog.filesToBeDeleted { - lfs = append(lfs, vlog.filesMap[id]) - delete(vlog.filesMap, id) - } - vlog.filesToBeDeleted = nil - vlog.filesLock.Unlock() - - for _, lf := range lfs { - if err := vlog.deleteLogFile(lf); err != nil { - return err - } - } - return nil -} - -func (vlog *valueLog) deleteLogFile(lf *logFile) error { - if lf == nil { - return nil - } - lf.lock.Lock() - defer lf.lock.Unlock() - - path := vlog.fpath(lf.fid) - if err := lf.munmap(); err != nil { - _ = lf.fd.Close() - return err - } - lf.fmap = nil - if err := lf.fd.Close(); err != nil { - return err - } - return os.Remove(path) -} - -func (vlog *valueLog) dropAll() (int, error) { - // We don't want to block dropAll on any pending transactions. So, don't worry about iterator - // count. - var count int - deleteAll := func() error { - vlog.filesLock.Lock() - defer vlog.filesLock.Unlock() - for _, lf := range vlog.filesMap { - if err := vlog.deleteLogFile(lf); err != nil { - return err - } - count++ - } - vlog.filesMap = make(map[uint32]*logFile) - return nil - } - if err := deleteAll(); err != nil { - return count, err - } - - vlog.db.opt.Infof("Value logs deleted. Creating value log file: 0") - if _, err := vlog.createVlogFile(0); err != nil { - return count, err - } - atomic.StoreUint32(&vlog.maxFid, 0) - return count, nil -} - -// lfDiscardStats keeps track of the amount of data that could be discarded for -// a given logfile. -type lfDiscardStats struct { - sync.RWMutex - m map[uint32]int64 - flushChan chan map[uint32]int64 - closer *y.Closer - updatesSinceFlush int -} - -type valueLog struct { - dirPath string - elog trace.EventLog - - // guards our view of which files exist, which to be deleted, how many active iterators - filesLock sync.RWMutex - filesMap map[uint32]*logFile - filesToBeDeleted []uint32 - // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted. - numActiveIterators int32 - - db *DB - maxFid uint32 // accessed via atomics. - writableLogOffset uint32 // read by read, written by write. Must access via atomics. - numEntriesWritten uint32 - opt Options - - garbageCh chan struct{} - lfDiscardStats *lfDiscardStats -} - -func vlogFilePath(dirPath string, fid uint32) string { - return fmt.Sprintf("%s%s%06d.vlog", dirPath, string(os.PathSeparator), fid) -} - -func (vlog *valueLog) fpath(fid uint32) string { - return vlogFilePath(vlog.dirPath, fid) -} - -func (vlog *valueLog) populateFilesMap() error { - vlog.filesMap = make(map[uint32]*logFile) - - files, err := ioutil.ReadDir(vlog.dirPath) - if err != nil { - return errFile(err, vlog.dirPath, "Unable to open log dir.") - } - - found := make(map[uint64]struct{}) - for _, file := range files { - if !strings.HasSuffix(file.Name(), ".vlog") { - continue - } - fsz := len(file.Name()) - fid, err := strconv.ParseUint(file.Name()[:fsz-5], 10, 32) - if err != nil { - return errFile(err, file.Name(), "Unable to parse log id.") - } - if _, ok := found[fid]; ok { - return errFile(err, file.Name(), "Duplicate file found. Please delete one.") - } - found[fid] = struct{}{} - - lf := &logFile{ - fid: uint32(fid), - path: vlog.fpath(uint32(fid)), - loadingMode: vlog.opt.ValueLogLoadingMode, - } - vlog.filesMap[uint32(fid)] = lf - if vlog.maxFid < uint32(fid) { - vlog.maxFid = uint32(fid) - } - } - return nil -} - -func (vlog *valueLog) createVlogFile(fid uint32) (*logFile, error) { - path := vlog.fpath(fid) - lf := &logFile{ - fid: fid, - path: path, - loadingMode: vlog.opt.ValueLogLoadingMode, - } - // writableLogOffset is only written by write func, by read by Read func. - // To avoid a race condition, all reads and updates to this variable must be - // done via atomics. - atomic.StoreUint32(&vlog.writableLogOffset, 0) - vlog.numEntriesWritten = 0 - - var err error - if lf.fd, err = y.CreateSyncedFile(path, vlog.opt.SyncWrites); err != nil { - return nil, errFile(err, lf.path, "Create value log file") - } - - removeFile := func() { - // Remove the file so that we don't get an error when createVlogFile is - // called for the same fid, again. This could happen if there is an - // transient error because of which we couldn't create a new file - // and the second attempt to create the file succeeds. - y.Check(os.Remove(lf.fd.Name())) - } - - if err = syncDir(vlog.dirPath); err != nil { - removeFile() - return nil, errFile(err, vlog.dirPath, "Sync value log dir") - } - - if err = lf.mmap(2 * vlog.opt.ValueLogFileSize); err != nil { - removeFile() - return nil, errFile(err, lf.path, "Mmap value log file") - } - - vlog.filesLock.Lock() - vlog.filesMap[fid] = lf - vlog.filesLock.Unlock() - - return lf, nil -} - -func errFile(err error, path string, msg string) error { - return fmt.Errorf("%s. Path=%s. Error=%v", msg, path, err) -} - -func (vlog *valueLog) replayLog(lf *logFile, offset uint32, replayFn logEntry) error { - fi, err := lf.fd.Stat() - if err != nil { - return errFile(err, lf.path, "Unable to run file.Stat") - } - - // Alright, let's iterate now. - endOffset, err := vlog.iterate(lf, offset, replayFn) - if err != nil { - return errFile(err, lf.path, "Unable to replay logfile") - } - if int64(endOffset) == fi.Size() { - return nil - } - - // End offset is different from file size. So, we should truncate the file - // to that size. - y.AssertTrue(int64(endOffset) <= fi.Size()) - if !vlog.opt.Truncate { - return ErrTruncateNeeded - } - - // The entire file should be truncated (i.e. it should be deleted). - // If fid == maxFid then it's okay to truncate the entire file since it will be - // used for future additions. Also, it's okay if the last file has size zero. - // We mmap 2*opt.ValueLogSize for the last file. See vlog.Open() function - if endOffset == 0 && lf.fid != vlog.maxFid { - return errDeleteVlogFile - } - if err := lf.fd.Truncate(int64(endOffset)); err != nil { - return errFile(err, lf.path, fmt.Sprintf( - "Truncation needed at offset %d. Can be done manually as well.", endOffset)) - } - return nil -} - -// init initializes the value log struct. This initialization needs to happen -// before compactions start. -func (vlog *valueLog) init(db *DB) { - vlog.opt = db.opt - vlog.db = db - vlog.dirPath = vlog.opt.ValueDir - vlog.elog = y.NoEventLog - if db.opt.EventLogging { - vlog.elog = trace.NewEventLog("Badger", "Valuelog") - } - vlog.garbageCh = make(chan struct{}, 1) // Only allow one GC at a time. - vlog.lfDiscardStats = &lfDiscardStats{ - m: make(map[uint32]int64), - closer: y.NewCloser(1), - flushChan: make(chan map[uint32]int64, 16), - } -} - -func (vlog *valueLog) open(db *DB, ptr valuePointer, replayFn logEntry) error { - go vlog.flushDiscardStats() - if err := vlog.populateFilesMap(); err != nil { - return err - } - // If no files are found, then create a new file. - if len(vlog.filesMap) == 0 { - _, err := vlog.createVlogFile(0) - return err - } - - fids := vlog.sortedFids() - for _, fid := range fids { - lf, ok := vlog.filesMap[fid] - y.AssertTrue(ok) - var flags uint32 - switch { - case vlog.opt.ReadOnly: - // If we have read only, we don't need SyncWrites. - flags |= y.ReadOnly - // Set sync flag. - case vlog.opt.SyncWrites: - flags |= y.Sync - } - - // We cannot mmap the files upfront here. Windows does not like mmapped files to be - // truncated. We might need to truncate files during a replay. - if err := lf.open(vlog.fpath(fid), flags); err != nil { - return err - } - // This file is before the value head pointer. So, we don't need to - // replay it, and can just open it in readonly mode. - if fid < ptr.Fid { - // Mmap the file here, we don't need to replay it. - if err := lf.mmap(int64(lf.size)); err != nil { - return err - } - continue - } - - var offset uint32 - if fid == ptr.Fid { - offset = ptr.Offset + ptr.Len - } - vlog.db.opt.Infof("Replaying file id: %d at offset: %d\n", fid, offset) - now := time.Now() - // Replay and possible truncation done. Now we can open the file as per - // user specified options. - if err := vlog.replayLog(lf, offset, replayFn); err != nil { - // Log file is corrupted. Delete it. - if err == errDeleteVlogFile { - delete(vlog.filesMap, fid) - // Close the fd of the file before deleting the file otherwise windows complaints. - if err := lf.fd.Close(); err != nil { - return errors.Wrapf(err, "failed to close vlog file %s", lf.fd.Name()) - } - path := vlog.fpath(lf.fid) - if err := os.Remove(path); err != nil { - return y.Wrapf(err, "failed to delete empty value log file: %q", path) - } - continue - } - return err - } - vlog.db.opt.Infof("Replay took: %s\n", time.Since(now)) - if fid < vlog.maxFid { - // This file has been replayed. It can now be mmapped. - // For maxFid, the mmap would be done by the specially written code below. - if err := lf.mmap(int64(lf.size)); err != nil { - return err - } - } - } - - // Seek to the end to start writing. - last, ok := vlog.filesMap[vlog.maxFid] - y.AssertTrue(ok) - lastOffset, err := last.fd.Seek(0, io.SeekEnd) - if err != nil { - return errFile(err, last.path, "file.Seek to end") - } - vlog.writableLogOffset = uint32(lastOffset) - - // Update the head to point to the updated tail. Otherwise, even after doing a successful - // replay and closing the DB, the value log head does not get updated, which causes the replay - // to happen repeatedly. - vlog.db.vhead = valuePointer{Fid: vlog.maxFid, Offset: uint32(lastOffset)} - - // Map the file if needed. When we create a file, it is automatically mapped. - if err = last.mmap(2 * db.opt.ValueLogFileSize); err != nil { - return errFile(err, last.path, "Map log file") - } - if err := vlog.populateDiscardStats(); err != nil { - // Print the error and continue. We don't want to prevent value log open if there's an error - // with the fetching discards stats. - db.opt.Errorf("Failed to populate discard stats: %s", err) - } - return nil -} - -func (lf *logFile) open(path string, flags uint32) error { - var err error - if lf.fd, err = y.OpenExistingFile(path, flags); err != nil { - return y.Wrapf(err, "Error while opening file in logfile %s", path) - } - - fi, err := lf.fd.Stat() - if err != nil { - return errFile(err, lf.path, "Unable to run file.Stat") - } - sz := fi.Size() - y.AssertTruef( - sz <= math.MaxUint32, - "file size: %d greater than %d", - uint32(sz), uint32(math.MaxUint32), - ) - lf.size = uint32(sz) - return nil -} - -func (vlog *valueLog) Close() error { - // close flushDiscardStats. - vlog.lfDiscardStats.closer.SignalAndWait() - - vlog.elog.Printf("Stopping garbage collection of values.") - defer vlog.elog.Finish() - - var err error - for id, f := range vlog.filesMap { - f.lock.Lock() // We won’t release the lock. - if munmapErr := f.munmap(); munmapErr != nil && err == nil { - err = munmapErr - } - - maxFid := atomic.LoadUint32(&vlog.maxFid) - if !vlog.opt.ReadOnly && id == maxFid { - // truncate writable log file to correct offset. - if truncErr := f.fd.Truncate( - int64(vlog.woffset())); truncErr != nil && err == nil { - err = truncErr - } - } - - if closeErr := f.fd.Close(); closeErr != nil && err == nil { - err = closeErr - } - } - return err -} - -// sortedFids returns the file id's not pending deletion, sorted. Assumes we have shared access to -// filesMap. -func (vlog *valueLog) sortedFids() []uint32 { - toBeDeleted := make(map[uint32]struct{}) - for _, fid := range vlog.filesToBeDeleted { - toBeDeleted[fid] = struct{}{} - } - ret := make([]uint32, 0, len(vlog.filesMap)) - for fid := range vlog.filesMap { - if _, ok := toBeDeleted[fid]; !ok { - ret = append(ret, fid) - } - } - sort.Slice(ret, func(i, j int) bool { - return ret[i] < ret[j] - }) - return ret -} - -type request struct { - // Input values - Entries []*Entry - // Output values and wait group stuff below - Ptrs []valuePointer - Wg sync.WaitGroup - Err error - ref int32 -} - -func (req *request) reset() { - req.Entries = req.Entries[:0] - req.Ptrs = req.Ptrs[:0] - req.Wg = sync.WaitGroup{} - req.Err = nil - req.ref = 0 -} - -func (req *request) IncrRef() { - atomic.AddInt32(&req.ref, 1) -} - -func (req *request) DecrRef() { - nRef := atomic.AddInt32(&req.ref, -1) - if nRef > 0 { - return - } - req.Entries = nil - requestPool.Put(req) -} - -func (req *request) Wait() error { - req.Wg.Wait() - err := req.Err - req.DecrRef() // DecrRef after writing to DB. - return err -} - -type requests []*request - -func (reqs requests) DecrRef() { - for _, req := range reqs { - req.DecrRef() - } -} - -func (reqs requests) IncrRef() { - for _, req := range reqs { - req.IncrRef() - } -} - -// sync function syncs content of latest value log file to disk. Syncing of value log directory is -// not required here as it happens every time a value log file rotation happens(check createVlogFile -// function). During rotation, previous value log file also gets synced to disk. It only syncs file -// if fid >= vlog.maxFid. In some cases such as replay(while opening db), it might be called with -// fid < vlog.maxFid. To sync irrespective of file id just call it with math.MaxUint32. -func (vlog *valueLog) sync(fid uint32) error { - if vlog.opt.SyncWrites { - return nil - } - - vlog.filesLock.RLock() - maxFid := atomic.LoadUint32(&vlog.maxFid) - // During replay it is possible to get sync call with fid less than maxFid. - // Because older file has already been synced, we can return from here. - if fid < maxFid || len(vlog.filesMap) == 0 { - vlog.filesLock.RUnlock() - return nil - } - curlf := vlog.filesMap[maxFid] - // Sometimes it is possible that vlog.maxFid has been increased but file creation - // with same id is still in progress and this function is called. In those cases - // entry for the file might not be present in vlog.filesMap. - if curlf == nil { - vlog.filesLock.RUnlock() - return nil - } - curlf.lock.RLock() - vlog.filesLock.RUnlock() - - err := curlf.sync() - curlf.lock.RUnlock() - return err -} - -func (vlog *valueLog) woffset() uint32 { - return atomic.LoadUint32(&vlog.writableLogOffset) -} - -// write is thread-unsafe by design and should not be called concurrently. -func (vlog *valueLog) write(reqs []*request) error { - vlog.filesLock.RLock() - maxFid := atomic.LoadUint32(&vlog.maxFid) - curlf := vlog.filesMap[maxFid] - vlog.filesLock.RUnlock() - - var buf bytes.Buffer - flushWrites := func() error { - if buf.Len() == 0 { - return nil - } - vlog.elog.Printf("Flushing buffer of size %d to vlog", buf.Len()) - n, err := curlf.fd.Write(buf.Bytes()) - if err != nil { - return errors.Wrapf(err, "Unable to write to value log file: %q", curlf.path) - } - buf.Reset() - y.NumWrites.Add(1) - y.NumBytesWritten.Add(int64(n)) - vlog.elog.Printf("Done") - atomic.AddUint32(&vlog.writableLogOffset, uint32(n)) - atomic.StoreUint32(&curlf.size, vlog.writableLogOffset) - return nil - } - toDisk := func() error { - if err := flushWrites(); err != nil { - return err - } - if vlog.woffset() > uint32(vlog.opt.ValueLogFileSize) || - vlog.numEntriesWritten > vlog.opt.ValueLogMaxEntries { - if err := curlf.doneWriting(vlog.woffset()); err != nil { - return err - } - - newid := atomic.AddUint32(&vlog.maxFid, 1) - y.AssertTruef(newid > 0, "newid has overflown uint32: %v", newid) - newlf, err := vlog.createVlogFile(newid) - if err != nil { - return err - } - curlf = newlf - atomic.AddInt32(&vlog.db.logRotates, 1) - } - return nil - } - - for i := range reqs { - b := reqs[i] - b.Ptrs = b.Ptrs[:0] - var written int - for j := range b.Entries { - e := b.Entries[j] - if e.skipVlog { - b.Ptrs = append(b.Ptrs, valuePointer{}) - continue - } - var p valuePointer - - p.Fid = curlf.fid - // Use the offset including buffer length so far. - p.Offset = vlog.woffset() + uint32(buf.Len()) - plen, err := encodeEntry(e, &buf) // Now encode the entry into buffer. - if err != nil { - return err - } - p.Len = uint32(plen) - b.Ptrs = append(b.Ptrs, p) - written++ - - // It is possible that the size of the buffer grows beyond the max size of the value - // log (this happens when a transaction contains entries with large value sizes) and - // badger might run into out of memory errors. We flush the buffer here if it's size - // grows beyond the max value log size. - if int64(buf.Len()) > vlog.db.opt.ValueLogFileSize { - if err := flushWrites(); err != nil { - return err - } - } - } - vlog.numEntriesWritten += uint32(written) - // We write to disk here so that all entries that are part of the same transaction are - // written to the same vlog file. - writeNow := - vlog.woffset()+uint32(buf.Len()) > uint32(vlog.opt.ValueLogFileSize) || - vlog.numEntriesWritten > uint32(vlog.opt.ValueLogMaxEntries) - if writeNow { - if err := toDisk(); err != nil { - return err - } - } - } - return toDisk() -} - -// Gets the logFile and acquires and RLock() for the mmap. You must call RUnlock on the file -// (if non-nil) -func (vlog *valueLog) getFileRLocked(fid uint32) (*logFile, error) { - vlog.filesLock.RLock() - defer vlog.filesLock.RUnlock() - ret, ok := vlog.filesMap[fid] - if !ok { - // log file has gone away, will need to retry the operation. - return nil, ErrRetry - } - ret.lock.RLock() - return ret, nil -} - -// Read reads the value log at a given location. -// TODO: Make this read private. -func (vlog *valueLog) Read(vp valuePointer, s *y.Slice) ([]byte, func(), error) { - // Check for valid offset if we are reading from writable log. - maxFid := atomic.LoadUint32(&vlog.maxFid) - if vp.Fid == maxFid && vp.Offset >= vlog.woffset() { - return nil, nil, errors.Errorf( - "Invalid value pointer offset: %d greater than current offset: %d", - vp.Offset, vlog.woffset()) - } - - buf, cb, err := vlog.readValueBytes(vp, s) - if err != nil { - return nil, cb, err - } - - if vlog.opt.VerifyValueChecksum { - hash := crc32.New(y.CastagnoliCrcTable) - if _, err := hash.Write(buf[:len(buf)-crc32.Size]); err != nil { - runCallback(cb) - return nil, nil, errors.Wrapf(err, "failed to write hash for vp %+v", vp) - } - // Fetch checksum from the end of the buffer. - checksum := buf[len(buf)-crc32.Size:] - res := binary.BigEndian.Uint32(checksum) - if hash.Sum32() != res { - runCallback(cb) - return nil, nil, errors.Errorf("checksum mismatch Error: value corrupted for vp: %+v", vp) - } - } - var h header - h.Decode(buf) - n := uint32(headerBufSize) + h.klen - return buf[n : n+h.vlen], cb, nil -} - -func (vlog *valueLog) readValueBytes(vp valuePointer, s *y.Slice) ([]byte, func(), error) { - lf, err := vlog.getFileRLocked(vp.Fid) - if err != nil { - return nil, nil, err - } - - buf, err := lf.read(vp, s) - if vlog.opt.ValueLogLoadingMode == options.MemoryMap { - return buf, lf.lock.RUnlock, err - } - // If we are using File I/O we unlock the file immediately - // and return an empty function as callback. - lf.lock.RUnlock() - return buf, nil, err -} - -// Test helper -func valueBytesToEntry(buf []byte) (e Entry) { - var h header - h.Decode(buf) - n := uint32(headerBufSize) - - e.Key = buf[n : n+h.klen] - n += h.klen - e.meta = h.meta - e.UserMeta = h.userMeta - e.Value = buf[n : n+h.vlen] - return -} - -func (vlog *valueLog) pickLog(head valuePointer, tr trace.Trace) (files []*logFile) { - vlog.filesLock.RLock() - defer vlog.filesLock.RUnlock() - fids := vlog.sortedFids() - if len(fids) <= 1 { - tr.LazyPrintf("Only one or less value log file.") - return nil - } else if head.Fid == 0 { - tr.LazyPrintf("Head pointer is at zero.") - return nil - } - - // Pick a candidate that contains the largest amount of discardable data - candidate := struct { - fid uint32 - discard int64 - }{math.MaxUint32, 0} - vlog.lfDiscardStats.RLock() - for _, fid := range fids { - if fid >= head.Fid { - break - } - if vlog.lfDiscardStats.m[fid] > candidate.discard { - candidate.fid = fid - candidate.discard = vlog.lfDiscardStats.m[fid] - } - } - vlog.lfDiscardStats.RUnlock() - - if candidate.fid != math.MaxUint32 { // Found a candidate - tr.LazyPrintf("Found candidate via discard stats: %v", candidate) - files = append(files, vlog.filesMap[candidate.fid]) - } else { - tr.LazyPrintf("Could not find candidate via discard stats. Randomly picking one.") - } - - // Fallback to randomly picking a log file - var idxHead int - for i, fid := range fids { - if fid == head.Fid { - idxHead = i - break - } - } - if idxHead == 0 { // Not found or first file - tr.LazyPrintf("Could not find any file.") - return nil - } - idx := rand.Intn(idxHead) // Don’t include head.Fid. We pick a random file before it. - if idx > 0 { - idx = rand.Intn(idx + 1) // Another level of rand to favor smaller fids. - } - tr.LazyPrintf("Randomly chose fid: %d", fids[idx]) - files = append(files, vlog.filesMap[fids[idx]]) - return files -} - -func discardEntry(e Entry, vs y.ValueStruct, db *DB) bool { - if vs.Version != y.ParseTs(e.Key) { - // Version not found. Discard. - return true - } - if isDeletedOrExpired(vs.Meta, vs.ExpiresAt) { - return true - } - if (vs.Meta & bitValuePointer) == 0 { - // Key also stores the value in LSM. Discard. - return true - } - if (vs.Meta & bitFinTxn) > 0 { - // Just a txn finish entry. Discard. - return true - } - if bytes.HasPrefix(e.Key, badgerMove) { - // Verify the actual key entry without the badgerPrefix has not been deleted. - // If this is not done the badgerMove entry will be kept forever moving from - // vlog to vlog during rewrites. - avs, err := db.get(e.Key[len(badgerMove):]) - if err != nil { - return false - } - return avs.Version == 0 - } - return false -} - -func (vlog *valueLog) doRunGC(lf *logFile, discardRatio float64, tr trace.Trace) (err error) { - // Update stats before exiting - defer func() { - if err == nil { - vlog.lfDiscardStats.Lock() - delete(vlog.lfDiscardStats.m, lf.fid) - vlog.lfDiscardStats.Unlock() - } - }() - - type reason struct { - total float64 - discard float64 - count int - } - - fi, err := lf.fd.Stat() - if err != nil { - tr.LazyPrintf("Error while finding file size: %v", err) - tr.SetError() - return err - } - - // Set up the sampling window sizes. - sizeWindow := float64(fi.Size()) * 0.1 // 10% of the file as window. - sizeWindowM := sizeWindow / (1 << 20) // in MBs. - countWindow := int(float64(vlog.opt.ValueLogMaxEntries) * 0.01) // 1% of num entries. - tr.LazyPrintf("Size window: %5.2f. Count window: %d.", sizeWindow, countWindow) - - // Pick a random start point for the log. - skipFirstM := float64(rand.Int63n(fi.Size())) // Pick a random starting location. - skipFirstM -= sizeWindow // Avoid hitting EOF by moving back by window. - skipFirstM /= float64(mi) // Convert to MBs. - tr.LazyPrintf("Skip first %5.2f MB of file of size: %d MB", skipFirstM, fi.Size()/mi) - var skipped float64 - - var r reason - start := time.Now() - y.AssertTrue(vlog.db != nil) - s := new(y.Slice) - var numIterations int - _, err = vlog.iterate(lf, 0, func(e Entry, vp valuePointer) error { - numIterations++ - esz := float64(vp.Len) / (1 << 20) // in MBs. - if skipped < skipFirstM { - skipped += esz - return nil - } - - // Sample until we reach the window sizes or exceed 10 seconds. - if r.count > countWindow { - tr.LazyPrintf("Stopping sampling after %d entries.", countWindow) - return errStop - } - if r.total > sizeWindowM { - tr.LazyPrintf("Stopping sampling after reaching window size.") - return errStop - } - if time.Since(start) > 10*time.Second { - tr.LazyPrintf("Stopping sampling after 10 seconds.") - return errStop - } - r.total += esz - r.count++ - - vs, err := vlog.db.get(e.Key) - if err != nil { - return err - } - if discardEntry(e, vs, vlog.db) { - r.discard += esz - return nil - } - - // Value is still present in value log. - y.AssertTrue(len(vs.Value) > 0) - vp.Decode(vs.Value) - - if vp.Fid > lf.fid { - // Value is present in a later log. Discard. - r.discard += esz - return nil - } - if vp.Offset > e.offset { - // Value is present in a later offset, but in the same log. - r.discard += esz - return nil - } - if vp.Fid == lf.fid && vp.Offset == e.offset { - // This is still the active entry. This would need to be rewritten. - - } else { - vlog.elog.Printf("Reason=%+v\n", r) - - buf, cb, err := vlog.readValueBytes(vp, s) - if err != nil { - return errStop - } - ne := valueBytesToEntry(buf) - ne.offset = vp.Offset - ne.print("Latest Entry Header in LSM") - e.print("Latest Entry in Log") - runCallback(cb) - return errors.Errorf("This shouldn't happen. Latest Pointer:%+v. Meta:%v.", - vp, vs.Meta) - } - return nil - }) - - if err != nil { - tr.LazyPrintf("Error while iterating for RunGC: %v", err) - tr.SetError() - return err - } - tr.LazyPrintf("Fid: %d. Skipped: %5.2fMB Num iterations: %d. Data status=%+v\n", - lf.fid, skipped, numIterations, r) - - // If we couldn't sample at least a 1000 KV pairs or at least 75% of the window size, - // and what we can discard is below the threshold, we should skip the rewrite. - if (r.count < countWindow && r.total < sizeWindowM*0.75) || r.discard < discardRatio*r.total { - tr.LazyPrintf("Skipping GC on fid: %d", lf.fid) - return ErrNoRewrite - } - if err = vlog.rewrite(lf, tr); err != nil { - return err - } - tr.LazyPrintf("Done rewriting.") - return nil -} - -func (vlog *valueLog) waitOnGC(lc *y.Closer) { - defer lc.Done() - - <-lc.HasBeenClosed() // Wait for lc to be closed. - - // Block any GC in progress to finish, and don't allow any more writes to runGC by filling up - // the channel of size 1. - vlog.garbageCh <- struct{}{} -} - -func (vlog *valueLog) runGC(discardRatio float64, head valuePointer) error { - select { - case vlog.garbageCh <- struct{}{}: - // Pick a log file for GC. - tr := trace.New("Badger.ValueLog", "GC") - tr.SetMaxEvents(100) - defer func() { - tr.Finish() - <-vlog.garbageCh - }() - - var err error - files := vlog.pickLog(head, tr) - if len(files) == 0 { - tr.LazyPrintf("PickLog returned zero results.") - return ErrNoRewrite - } - tried := make(map[uint32]bool) - for _, lf := range files { - if _, done := tried[lf.fid]; done { - continue - } - tried[lf.fid] = true - err = vlog.doRunGC(lf, discardRatio, tr) - if err == nil { - return vlog.deleteMoveKeysFor(lf.fid, tr) - } - } - return err - default: - return ErrRejected - } -} - -func (vlog *valueLog) updateDiscardStats(stats map[uint32]int64) { - select { - case vlog.lfDiscardStats.flushChan <- stats: - default: - vlog.opt.Warningf("updateDiscardStats called: discard stats flushChan full, " + - "returning without pushing to flushChan") - } -} - -func (vlog *valueLog) flushDiscardStats() { - defer vlog.lfDiscardStats.closer.Done() - - mergeStats := func(stats map[uint32]int64) ([]byte, error) { - vlog.lfDiscardStats.Lock() - defer vlog.lfDiscardStats.Unlock() - for fid, count := range stats { - vlog.lfDiscardStats.m[fid] += count - vlog.lfDiscardStats.updatesSinceFlush++ - } - - if vlog.lfDiscardStats.updatesSinceFlush > discardStatsFlushThreshold { - encodedDS, err := json.Marshal(vlog.lfDiscardStats.m) - if err != nil { - return nil, err - } - vlog.lfDiscardStats.updatesSinceFlush = 0 - return encodedDS, nil - } - return nil, nil - } - - process := func(stats map[uint32]int64) error { - encodedDS, err := mergeStats(stats) - if err != nil || encodedDS == nil { - return err - } - - entries := []*Entry{{ - Key: y.KeyWithTs(lfDiscardStatsKey, 1), - Value: encodedDS, - }} - req, err := vlog.db.sendToWriteCh(entries) - // No special handling of ErrBlockedWrites is required as err is just logged in - // for loop below. - if err != nil { - return errors.Wrapf(err, "failed to push discard stats to write channel") - } - return req.Wait() - } - - closer := vlog.lfDiscardStats.closer - for { - select { - case <-closer.HasBeenClosed(): - // For simplicity just return without processing already present in stats in flushChan. - return - case stats := <-vlog.lfDiscardStats.flushChan: - if err := process(stats); err != nil { - vlog.opt.Errorf("unable to process discardstats with error: %s", err) - } - } - } -} - -// populateDiscardStats populates vlog.lfDiscardStats. -// This function will be called while initializing valueLog. -func (vlog *valueLog) populateDiscardStats() error { - key := y.KeyWithTs(lfDiscardStatsKey, math.MaxUint64) - var statsMap map[uint32]int64 - var val []byte - var vp valuePointer - for { - vs, err := vlog.db.get(key) - if err != nil { - return err - } - // Value doesn't exist. - if vs.Meta == 0 && len(vs.Value) == 0 { - vlog.opt.Debugf("Value log discard stats empty") - return nil - } - vp.Decode(vs.Value) - // Entry stored in LSM tree. - if vs.Meta&bitValuePointer == 0 { - val = y.SafeCopy(val, vs.Value) - break - } - // Read entry from value log. - result, cb, err := vlog.Read(vp, new(y.Slice)) - runCallback(cb) - val = y.SafeCopy(val, result) - // The result is stored in val. We can break the loop from here. - if err == nil { - break - } - if err != ErrRetry { - return err - } - // If we're at this point it means we haven't found the value yet and if the current key has - // badger move prefix, we should break from here since we've already tried the original key - // and the key with move prefix. "val" would be empty since we haven't found the value yet. - if bytes.HasPrefix(key, badgerMove) { - break - } - // If we're at this point it means the discard stats key was moved by the GC and the actual - // entry is the one prefixed by badger move key. - // Prepend existing key with badger move and search for the key. - key = append(badgerMove, key...) - } - - if len(val) == 0 { - return nil - } - if err := json.Unmarshal(val, &statsMap); err != nil { - return errors.Wrapf(err, "failed to unmarshal discard stats") - } - vlog.opt.Debugf("Value Log Discard stats: %v", statsMap) - vlog.lfDiscardStats.flushChan <- statsMap - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/y/error.go b/vendor/github.com/dgraph-io/badger/y/error.go deleted file mode 100644 index 59bb2835..00000000 --- a/vendor/github.com/dgraph-io/badger/y/error.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -// This file contains some functions for error handling. Note that we are moving -// towards using x.Trace, i.e., rpc tracing using net/tracer. But for now, these -// functions are useful for simple checks logged on one machine. -// Some common use cases are: -// (1) You receive an error from external lib, and would like to check/log fatal. -// For this, use x.Check, x.Checkf. These will check for err != nil, which is -// more common in Go. If you want to check for boolean being true, use -// x.Assert, x.Assertf. -// (2) You receive an error from external lib, and would like to pass on with some -// stack trace information. In this case, use x.Wrap or x.Wrapf. -// (3) You want to generate a new error with stack trace info. Use x.Errorf. - -import ( - "fmt" - "log" - - "github.com/pkg/errors" -) - -var debugMode = true - -// Check logs fatal if err != nil. -func Check(err error) { - if err != nil { - log.Fatalf("%+v", Wrap(err)) - } -} - -// Check2 acts as convenience wrapper around Check, using the 2nd argument as error. -func Check2(_ interface{}, err error) { - Check(err) -} - -// AssertTrue asserts that b is true. Otherwise, it would log fatal. -func AssertTrue(b bool) { - if !b { - log.Fatalf("%+v", errors.Errorf("Assert failed")) - } -} - -// AssertTruef is AssertTrue with extra info. -func AssertTruef(b bool, format string, args ...interface{}) { - if !b { - log.Fatalf("%+v", errors.Errorf(format, args...)) - } -} - -// Wrap wraps errors from external lib. -func Wrap(err error) error { - if !debugMode { - return err - } - return errors.Wrap(err, "") -} - -// Wrapf is Wrap with extra info. -func Wrapf(err error, format string, args ...interface{}) error { - if !debugMode { - if err == nil { - return nil - } - return fmt.Errorf(format+" error: %+v", append(args, err)...) - } - return errors.Wrapf(err, format, args...) -} diff --git a/vendor/github.com/dgraph-io/badger/y/event_log.go b/vendor/github.com/dgraph-io/badger/y/event_log.go deleted file mode 100644 index ba9dcb1f..00000000 --- a/vendor/github.com/dgraph-io/badger/y/event_log.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "golang.org/x/net/trace" - -var ( - NoEventLog trace.EventLog = nilEventLog{} -) - -type nilEventLog struct{} - -func (nel nilEventLog) Printf(format string, a ...interface{}) {} - -func (nel nilEventLog) Errorf(format string, a ...interface{}) {} - -func (nel nilEventLog) Finish() {} diff --git a/vendor/github.com/dgraph-io/badger/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/y/file_dsync.go deleted file mode 100644 index ea4d9ab2..00000000 --- a/vendor/github.com/dgraph-io/badger/y/file_dsync.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !dragonfly,!freebsd,!windows,!plan9 - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "golang.org/x/sys/unix" - -func init() { - datasyncFileFlag = unix.O_DSYNC -} diff --git a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/y/file_nodsync.go deleted file mode 100644 index 54a2184e..00000000 --- a/vendor/github.com/dgraph-io/badger/y/file_nodsync.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build dragonfly freebsd windows plan9 - -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "syscall" - -func init() { - datasyncFileFlag = syscall.O_SYNC -} diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync.go b/vendor/github.com/dgraph-io/badger/y/file_sync.go deleted file mode 100644 index 19016ef6..00000000 --- a/vendor/github.com/dgraph-io/badger/y/file_sync.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !darwin go1.12 - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "os" - -// FileSync calls os.File.Sync with the right parameters. -// This function can be removed once we stop supporting Go 1.11 -// on MacOS. -// -// More info: https://golang.org/issue/26650. -func FileSync(f *os.File) error { return f.Sync() } diff --git a/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go b/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go deleted file mode 100644 index 01c79f23..00000000 --- a/vendor/github.com/dgraph-io/badger/y/file_sync_darwin.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build darwin,!go1.12 - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" - "syscall" -) - -// FileSync calls os.File.Sync with the right parameters. -// This function can be removed once we stop supporting Go 1.11 -// on MacOS. -// -// More info: https://golang.org/issue/26650. -func FileSync(f *os.File) error { - _, _, err := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), syscall.F_FULLFSYNC, 0) - if err == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/dgraph-io/badger/y/iterator.go b/vendor/github.com/dgraph-io/badger/y/iterator.go deleted file mode 100644 index d3142c05..00000000 --- a/vendor/github.com/dgraph-io/badger/y/iterator.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "bytes" - "encoding/binary" -) - -// ValueStruct represents the value info that can be associated with a key, but also the internal -// Meta field. -type ValueStruct struct { - Meta byte - UserMeta byte - ExpiresAt uint64 - Value []byte - - Version uint64 // This field is not serialized. Only for internal usage. -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodedSize is the size of the ValueStruct when encoded -func (v *ValueStruct) EncodedSize() uint16 { - sz := len(v.Value) + 2 // meta, usermeta. - if v.ExpiresAt == 0 { - return uint16(sz + 1) - } - - enc := sizeVarint(v.ExpiresAt) - return uint16(sz + enc) -} - -// Decode uses the length of the slice to infer the length of the Value field. -func (v *ValueStruct) Decode(b []byte) { - v.Meta = b[0] - v.UserMeta = b[1] - var sz int - v.ExpiresAt, sz = binary.Uvarint(b[2:]) - v.Value = b[2+sz:] -} - -// Encode expects a slice of length at least v.EncodedSize(). -func (v *ValueStruct) Encode(b []byte) { - b[0] = v.Meta - b[1] = v.UserMeta - sz := binary.PutUvarint(b[2:], v.ExpiresAt) - copy(b[2+sz:], v.Value) -} - -// EncodeTo should be kept in sync with the Encode function above. The reason -// this function exists is to avoid creating byte arrays per key-value pair in -// table/builder.go. -func (v *ValueStruct) EncodeTo(buf *bytes.Buffer) { - buf.WriteByte(v.Meta) - buf.WriteByte(v.UserMeta) - var enc [binary.MaxVarintLen64]byte - sz := binary.PutUvarint(enc[:], v.ExpiresAt) - buf.Write(enc[:sz]) - buf.Write(v.Value) -} - -// Iterator is an interface for a basic iterator. -type Iterator interface { - Next() - Rewind() - Seek(key []byte) - Key() []byte - Value() ValueStruct - Valid() bool - - // All iterators should be closed so that file garbage collection works. - Close() error -} diff --git a/vendor/github.com/dgraph-io/badger/y/metrics.go b/vendor/github.com/dgraph-io/badger/y/metrics.go deleted file mode 100644 index 2de17d10..00000000 --- a/vendor/github.com/dgraph-io/badger/y/metrics.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import "expvar" - -var ( - // LSMSize has size of the LSM in bytes - LSMSize *expvar.Map - // VlogSize has size of the value log in bytes - VlogSize *expvar.Map - // PendingWrites tracks the number of pending writes. - PendingWrites *expvar.Map - - // These are cumulative - - // NumReads has cumulative number of reads - NumReads *expvar.Int - // NumWrites has cumulative number of writes - NumWrites *expvar.Int - // NumBytesRead has cumulative number of bytes read - NumBytesRead *expvar.Int - // NumBytesWritten has cumulative number of bytes written - NumBytesWritten *expvar.Int - // NumLSMGets is number of LMS gets - NumLSMGets *expvar.Map - // NumLSMBloomHits is number of LMS bloom hits - NumLSMBloomHits *expvar.Map - // NumGets is number of gets - NumGets *expvar.Int - // NumPuts is number of puts - NumPuts *expvar.Int - // NumBlockedPuts is number of blocked puts - NumBlockedPuts *expvar.Int - // NumMemtableGets is number of memtable gets - NumMemtableGets *expvar.Int -) - -// These variables are global and have cumulative values for all kv stores. -func init() { - NumReads = expvar.NewInt("badger_disk_reads_total") - NumWrites = expvar.NewInt("badger_disk_writes_total") - NumBytesRead = expvar.NewInt("badger_read_bytes") - NumBytesWritten = expvar.NewInt("badger_written_bytes") - NumLSMGets = expvar.NewMap("badger_lsm_level_gets_total") - NumLSMBloomHits = expvar.NewMap("badger_lsm_bloom_hits_total") - NumGets = expvar.NewInt("badger_gets_total") - NumPuts = expvar.NewInt("badger_puts_total") - NumBlockedPuts = expvar.NewInt("badger_blocked_puts_total") - NumMemtableGets = expvar.NewInt("badger_memtable_gets_total") - LSMSize = expvar.NewMap("badger_lsm_size_bytes") - VlogSize = expvar.NewMap("badger_vlog_size_bytes") - PendingWrites = expvar.NewMap("badger_pending_writes_total") -} diff --git a/vendor/github.com/dgraph-io/badger/y/mmap.go b/vendor/github.com/dgraph-io/badger/y/mmap.go deleted file mode 100644 index 4a477af3..00000000 --- a/vendor/github.com/dgraph-io/badger/y/mmap.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - return mmap(fd, writable, size) -} - -// Munmap unmaps a previously mapped slice. -func Munmap(b []byte) error { - return munmap(b) -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func Madvise(b []byte, readahead bool) error { - return madvise(b, readahead) -} diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_darwin.go b/vendor/github.com/dgraph-io/badger/y/mmap_darwin.go deleted file mode 100644 index 10b756ba..00000000 --- a/vendor/github.com/dgraph-io/badger/y/mmap_darwin.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - mtype := unix.PROT_READ - if writable { - mtype |= unix.PROT_WRITE - } - return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return unix.Munmap(b) -} - -// This is required because the unix package does not support the madvise system call on OS X. -func madvise(b []byte, readahead bool) error { - advice := unix.MADV_NORMAL - if !readahead { - advice = unix.MADV_RANDOM - } - - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), - uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - return e1 - } - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_plan9.go b/vendor/github.com/dgraph-io/badger/y/mmap_plan9.go deleted file mode 100644 index 21db76bf..00000000 --- a/vendor/github.com/dgraph-io/badger/y/mmap_plan9.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" - "syscall" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - return nil, syscall.EPLAN9 -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return syscall.EPLAN9 -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func madvise(b []byte, readahead bool) error { - return syscall.EPLAN9 -} diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go b/vendor/github.com/dgraph-io/badger/y/mmap_unix.go deleted file mode 100644 index 003f5972..00000000 --- a/vendor/github.com/dgraph-io/badger/y/mmap_unix.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build !windows,!darwin,!plan9 - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Mmap uses the mmap system call to memory-map a file. If writable is true, -// memory protection of the pages is set so that they may be written to as well. -func mmap(fd *os.File, writable bool, size int64) ([]byte, error) { - mtype := unix.PROT_READ - if writable { - mtype |= unix.PROT_WRITE - } - return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED) -} - -// Munmap unmaps a previously mapped slice. -func munmap(b []byte) error { - return unix.Munmap(b) -} - -// Madvise uses the madvise system call to give advise about the use of memory -// when using a slice that is memory-mapped to a file. Set the readahead flag to -// false if page references are expected in random order. -func madvise(b []byte, readahead bool) error { - flags := unix.MADV_NORMAL - if !readahead { - flags = unix.MADV_RANDOM - } - return unix.Madvise(b, flags) -} diff --git a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go b/vendor/github.com/dgraph-io/badger/y/mmap_windows.go deleted file mode 100644 index b2419af9..00000000 --- a/vendor/github.com/dgraph-io/badger/y/mmap_windows.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build windows - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -func mmap(fd *os.File, write bool, size int64) ([]byte, error) { - protect := syscall.PAGE_READONLY - access := syscall.FILE_MAP_READ - - if write { - protect = syscall.PAGE_READWRITE - access = syscall.FILE_MAP_WRITE - } - fi, err := fd.Stat() - if err != nil { - return nil, err - } - - // In windows, we cannot mmap a file more than it's actual size. - // So truncate the file to the size of the mmap. - if fi.Size() < size { - if err := fd.Truncate(size); err != nil { - return nil, fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(size >> 32) - sizehi := uint32(size) & 0xffffffff - - handler, err := syscall.CreateFileMapping(syscall.Handle(fd.Fd()), nil, - uint32(protect), sizelo, sizehi, nil) - if err != nil { - return nil, os.NewSyscallError("CreateFileMapping", err) - } - - // Create the memory map. - addr, err := syscall.MapViewOfFile(handler, uint32(access), 0, 0, uintptr(size)) - if addr == 0 { - return nil, os.NewSyscallError("MapViewOfFile", err) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(handler)); err != nil { - return nil, os.NewSyscallError("CloseHandle", err) - } - - // Slice memory layout - // Copied this snippet from golang/sys package - var sl = struct { - addr uintptr - len int - cap int - }{addr, int(size), int(size)} - - // Use unsafe to turn sl into a []byte. - data := *(*[]byte)(unsafe.Pointer(&sl)) - - return data, nil -} - -func munmap(b []byte) error { - return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&b[0]))) -} - -func madvise(b []byte, readahead bool) error { - // Do Nothing. We don’t care about this setting on Windows - return nil -} diff --git a/vendor/github.com/dgraph-io/badger/y/watermark.go b/vendor/github.com/dgraph-io/badger/y/watermark.go deleted file mode 100644 index 2ff70b38..00000000 --- a/vendor/github.com/dgraph-io/badger/y/watermark.go +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright 2016-2018 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "container/heap" - "context" - "sync/atomic" - - "golang.org/x/net/trace" -) - -type uint64Heap []uint64 - -func (u uint64Heap) Len() int { return len(u) } -func (u uint64Heap) Less(i, j int) bool { return u[i] < u[j] } -func (u uint64Heap) Swap(i, j int) { u[i], u[j] = u[j], u[i] } -func (u *uint64Heap) Push(x interface{}) { *u = append(*u, x.(uint64)) } -func (u *uint64Heap) Pop() interface{} { - old := *u - n := len(old) - x := old[n-1] - *u = old[0 : n-1] - return x -} - -// mark contains one of more indices, along with a done boolean to indicate the -// status of the index: begin or done. It also contains waiters, who could be -// waiting for the watermark to reach >= a certain index. -type mark struct { - // Either this is an (index, waiter) pair or (index, done) or (indices, done). - index uint64 - waiter chan struct{} - indices []uint64 - done bool // Set to true if the index is done. -} - -// WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes -// finished or "done" according to a WaterMark once Done(k) has been called -// 1. as many times as Begin(k) has, AND -// 2. a positive number of times. -// -// An index may also become "done" by calling SetDoneUntil at a time such that it is not -// inter-mingled with Begin/Done calls. -// -// Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they -// are 64-bit aligned by putting them at the beginning of the structure. -type WaterMark struct { - doneUntil uint64 - lastIndex uint64 - Name string - markCh chan mark - elog trace.EventLog -} - -// Init initializes a WaterMark struct. MUST be called before using it. -func (w *WaterMark) Init(closer *Closer, eventLogging bool) { - w.markCh = make(chan mark, 100) - if eventLogging { - w.elog = trace.NewEventLog("Watermark", w.Name) - } else { - w.elog = NoEventLog - } - go w.process(closer) -} - -// Begin sets the last index to the given value. -func (w *WaterMark) Begin(index uint64) { - atomic.StoreUint64(&w.lastIndex, index) - w.markCh <- mark{index: index, done: false} -} - -// BeginMany works like Begin but accepts multiple indices. -func (w *WaterMark) BeginMany(indices []uint64) { - atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1]) - w.markCh <- mark{index: 0, indices: indices, done: false} -} - -// Done sets a single index as done. -func (w *WaterMark) Done(index uint64) { - w.markCh <- mark{index: index, done: true} -} - -// DoneMany works like Done but accepts multiple indices. -func (w *WaterMark) DoneMany(indices []uint64) { - w.markCh <- mark{index: 0, indices: indices, done: true} -} - -// DoneUntil returns the maximum index that has the property that all indices -// less than or equal to it are done. -func (w *WaterMark) DoneUntil() uint64 { - return atomic.LoadUint64(&w.doneUntil) -} - -// SetDoneUntil sets the maximum index that has the property that all indices -// less than or equal to it are done. -func (w *WaterMark) SetDoneUntil(val uint64) { - atomic.StoreUint64(&w.doneUntil, val) -} - -// LastIndex returns the last index for which Begin has been called. -func (w *WaterMark) LastIndex() uint64 { - return atomic.LoadUint64(&w.lastIndex) -} - -// WaitForMark waits until the given index is marked as done. -func (w *WaterMark) WaitForMark(ctx context.Context, index uint64) error { - if w.DoneUntil() >= index { - return nil - } - waitCh := make(chan struct{}) - w.markCh <- mark{index: index, waiter: waitCh} - - select { - case <-ctx.Done(): - return ctx.Err() - case <-waitCh: - return nil - } -} - -// process is used to process the Mark channel. This is not thread-safe, -// so only run one goroutine for process. One is sufficient, because -// all goroutine ops use purely memory and cpu. -// Each index has to emit atleast one begin watermark in serial order otherwise waiters -// can get blocked idefinitely. Example: We had an watermark at 100 and a waiter at 101, -// if no watermark is emitted at index 101 then waiter would get stuck indefinitely as it -// can't decide whether the task at 101 has decided not to emit watermark or it didn't get -// scheduled yet. -func (w *WaterMark) process(closer *Closer) { - defer closer.Done() - - var indices uint64Heap - // pending maps raft proposal index to the number of pending mutations for this proposal. - pending := make(map[uint64]int) - waiters := make(map[uint64][]chan struct{}) - - heap.Init(&indices) - var loop uint64 - - processOne := func(index uint64, done bool) { - // If not already done, then set. Otherwise, don't undo a done entry. - prev, present := pending[index] - if !present { - heap.Push(&indices, index) - } - - delta := 1 - if done { - delta = -1 - } - pending[index] = prev + delta - - loop++ - if len(indices) > 0 && loop%10000 == 0 { - min := indices[0] - w.elog.Printf("WaterMark %s: Done entry %4d. Size: %4d Watermark: %-4d Looking for: "+ - "%-4d. Value: %d\n", w.Name, index, len(indices), w.DoneUntil(), min, pending[min]) - } - - // Update mark by going through all indices in order; and checking if they have - // been done. Stop at the first index, which isn't done. - doneUntil := w.DoneUntil() - if doneUntil > index { - AssertTruef(false, "Name: %s doneUntil: %d. Index: %d", w.Name, doneUntil, index) - } - - until := doneUntil - loops := 0 - - for len(indices) > 0 { - min := indices[0] - if done := pending[min]; done > 0 { - break // len(indices) will be > 0. - } - // Even if done is called multiple times causing it to become - // negative, we should still pop the index. - heap.Pop(&indices) - delete(pending, min) - until = min - loops++ - } - - if until != doneUntil { - AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until)) - w.elog.Printf("%s: Done until %d. Loops: %d\n", w.Name, until, loops) - } - - notifyAndRemove := func(idx uint64, toNotify []chan struct{}) { - for _, ch := range toNotify { - close(ch) - } - delete(waiters, idx) // Release the memory back. - } - - if until-doneUntil <= uint64(len(waiters)) { - // Issue #908 showed that if doneUntil is close to 2^60, while until is zero, this loop - // can hog up CPU just iterating over integers creating a busy-wait loop. So, only do - // this path if until - doneUntil is less than the number of waiters. - for idx := doneUntil + 1; idx <= until; idx++ { - if toNotify, ok := waiters[idx]; ok { - notifyAndRemove(idx, toNotify) - } - } - } else { - for idx, toNotify := range waiters { - if idx <= until { - notifyAndRemove(idx, toNotify) - } - } - } // end of notifying waiters. - } - - for { - select { - case <-closer.HasBeenClosed(): - return - case mark := <-w.markCh: - if mark.waiter != nil { - doneUntil := atomic.LoadUint64(&w.doneUntil) - if doneUntil >= mark.index { - close(mark.waiter) - } else { - ws, ok := waiters[mark.index] - if !ok { - waiters[mark.index] = []chan struct{}{mark.waiter} - } else { - waiters[mark.index] = append(ws, mark.waiter) - } - } - } else { - if mark.index > 0 { - processOne(mark.index, mark.done) - } - for _, index := range mark.indices { - processOne(index, mark.done) - } - } - } - } -} diff --git a/vendor/github.com/dgraph-io/badger/y/y.go b/vendor/github.com/dgraph-io/badger/y/y.go deleted file mode 100644 index e594b708..00000000 --- a/vendor/github.com/dgraph-io/badger/y/y.go +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Copyright 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "math" - "os" - "sync" - "time" - - "github.com/pkg/errors" -) - -// ErrEOF indicates an end of file when trying to read from a memory mapped file -// and encountering the end of slice. -var ErrEOF = errors.New("End of mapped region") - -const ( - // Sync indicates that O_DSYNC should be set on the underlying file, - // ensuring that data writes do not return until the data is flushed - // to disk. - Sync = 1 << iota - // ReadOnly opens the underlying file on a read-only basis. - ReadOnly -) - -var ( - // This is O_DSYNC (datasync) on platforms that support it -- see file_unix.go - datasyncFileFlag = 0x0 - - // CastagnoliCrcTable is a CRC32 polynomial table - CastagnoliCrcTable = crc32.MakeTable(crc32.Castagnoli) - - // Dummy channel for nil closers. - dummyCloserChan = make(chan struct{}) -) - -// OpenExistingFile opens an existing file, errors if it doesn't exist. -func OpenExistingFile(filename string, flags uint32) (*os.File, error) { - openFlags := os.O_RDWR - if flags&ReadOnly != 0 { - openFlags = os.O_RDONLY - } - - if flags&Sync != 0 { - openFlags |= datasyncFileFlag - } - return os.OpenFile(filename, openFlags, 0) -} - -// CreateSyncedFile creates a new file (using O_EXCL), errors if it already existed. -func CreateSyncedFile(filename string, sync bool) (*os.File, error) { - flags := os.O_RDWR | os.O_CREATE | os.O_EXCL - if sync { - flags |= datasyncFileFlag - } - return os.OpenFile(filename, flags, 0600) -} - -// OpenSyncedFile creates the file if one doesn't exist. -func OpenSyncedFile(filename string, sync bool) (*os.File, error) { - flags := os.O_RDWR | os.O_CREATE - if sync { - flags |= datasyncFileFlag - } - return os.OpenFile(filename, flags, 0600) -} - -// OpenTruncFile opens the file with O_RDWR | O_CREATE | O_TRUNC -func OpenTruncFile(filename string, sync bool) (*os.File, error) { - flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC - if sync { - flags |= datasyncFileFlag - } - return os.OpenFile(filename, flags, 0600) -} - -// SafeCopy does append(a[:0], src...). -func SafeCopy(a, src []byte) []byte { - return append(a[:0], src...) -} - -// Copy copies a byte slice and returns the copied slice. -func Copy(a []byte) []byte { - b := make([]byte, len(a)) - copy(b, a) - return b -} - -// KeyWithTs generates a new key by appending ts to key. -func KeyWithTs(key []byte, ts uint64) []byte { - out := make([]byte, len(key)+8) - copy(out, key) - binary.BigEndian.PutUint64(out[len(key):], math.MaxUint64-ts) - return out -} - -// ParseTs parses the timestamp from the key bytes. -func ParseTs(key []byte) uint64 { - if len(key) <= 8 { - return 0 - } - return math.MaxUint64 - binary.BigEndian.Uint64(key[len(key)-8:]) -} - -// CompareKeys checks the key without timestamp and checks the timestamp if keyNoTs -// is same. -// a would be sorted higher than aa if we use bytes.compare -// All keys should have timestamp. -func CompareKeys(key1, key2 []byte) int { - AssertTrue(len(key1) > 8 && len(key2) > 8) - if cmp := bytes.Compare(key1[:len(key1)-8], key2[:len(key2)-8]); cmp != 0 { - return cmp - } - return bytes.Compare(key1[len(key1)-8:], key2[len(key2)-8:]) -} - -// ParseKey parses the actual key from the key bytes. -func ParseKey(key []byte) []byte { - if key == nil { - return nil - } - - AssertTrue(len(key) > 8) - return key[:len(key)-8] -} - -// SameKey checks for key equality ignoring the version timestamp suffix. -func SameKey(src, dst []byte) bool { - if len(src) != len(dst) { - return false - } - return bytes.Equal(ParseKey(src), ParseKey(dst)) -} - -// Slice holds a reusable buf, will reallocate if you request a larger size than ever before. -// One problem is with n distinct sizes in random order it'll reallocate log(n) times. -type Slice struct { - buf []byte -} - -// Resize reuses the Slice's buffer (or makes a new one) and returns a slice in that buffer of -// length sz. -func (s *Slice) Resize(sz int) []byte { - if cap(s.buf) < sz { - s.buf = make([]byte, sz) - } - return s.buf[0:sz] -} - -// FixedDuration returns a string representation of the given duration with the -// hours, minutes, and seconds. -func FixedDuration(d time.Duration) string { - str := fmt.Sprintf("%02ds", int(d.Seconds())%60) - if d >= time.Minute { - str = fmt.Sprintf("%02dm", int(d.Minutes())%60) + str - } - if d >= time.Hour { - str = fmt.Sprintf("%02dh", int(d.Hours())) + str - } - return str -} - -// Closer holds the two things we need to close a goroutine and wait for it to finish: a chan -// to tell the goroutine to shut down, and a WaitGroup with which to wait for it to finish shutting -// down. -type Closer struct { - closed chan struct{} - waiting sync.WaitGroup -} - -// NewCloser constructs a new Closer, with an initial count on the WaitGroup. -func NewCloser(initial int) *Closer { - ret := &Closer{closed: make(chan struct{})} - ret.waiting.Add(initial) - return ret -} - -// AddRunning Add()'s delta to the WaitGroup. -func (lc *Closer) AddRunning(delta int) { - lc.waiting.Add(delta) -} - -// Signal signals the HasBeenClosed signal. -func (lc *Closer) Signal() { - close(lc.closed) -} - -// HasBeenClosed gets signaled when Signal() is called. -func (lc *Closer) HasBeenClosed() <-chan struct{} { - if lc == nil { - return dummyCloserChan - } - return lc.closed -} - -// Done calls Done() on the WaitGroup. -func (lc *Closer) Done() { - if lc == nil { - return - } - lc.waiting.Done() -} - -// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done -// calls to balance out.) -func (lc *Closer) Wait() { - lc.waiting.Wait() -} - -// SignalAndWait calls Signal(), then Wait(). -func (lc *Closer) SignalAndWait() { - lc.Signal() - lc.Wait() -} - -// Throttle allows a limited number of workers to run at a time. It also -// provides a mechanism to check for errors encountered by workers and wait for -// them to finish. -type Throttle struct { - once sync.Once - wg sync.WaitGroup - ch chan struct{} - errCh chan error - finishErr error -} - -// NewThrottle creates a new throttle with a max number of workers. -func NewThrottle(max int) *Throttle { - return &Throttle{ - ch: make(chan struct{}, max), - errCh: make(chan error, max), - } -} - -// Do should be called by workers before they start working. It blocks if there -// are already maximum number of workers working. If it detects an error from -// previously Done workers, it would return it. -func (t *Throttle) Do() error { - for { - select { - case t.ch <- struct{}{}: - t.wg.Add(1) - return nil - case err := <-t.errCh: - if err != nil { - return err - } - } - } -} - -// Done should be called by workers when they finish working. They can also -// pass the error status of work done. -func (t *Throttle) Done(err error) { - if err != nil { - t.errCh <- err - } - select { - case <-t.ch: - default: - panic("Throttle Do Done mismatch") - } - t.wg.Done() -} - -// Finish waits until all workers have finished working. It would return any error passed by Done. -// If Finish is called multiple time, it will wait for workers to finish only once(first time). -// From next calls, it will return same error as found on first call. -func (t *Throttle) Finish() error { - t.once.Do(func() { - t.wg.Wait() - close(t.ch) - close(t.errCh) - for err := range t.errCh { - if err != nil { - t.finishErr = err - return - } - } - }) - - return t.finishErr -} diff --git a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml b/vendor/github.com/dgraph-io/ristretto/.deepsource.toml deleted file mode 100644 index 40609eff..00000000 --- a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml +++ /dev/null @@ -1,17 +0,0 @@ -version = 1 - -test_patterns = [ - '**/*_test.go' -] - -exclude_patterns = [ - -] - -[[analyzers]] -name = 'go' -enabled = true - - - [analyzers.meta] - import_path = 'github.com/dgraph-io/ristretto' diff --git a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md b/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md deleted file mode 100644 index 2c985510..00000000 --- a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md +++ /dev/null @@ -1,54 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project will adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html) starting v1.0.0. - -## Unreleased - -### Changed - -### Added - -### Fixed - -## [0.0.3] - 2020-07-06 - -[0.0.3]: https://github.com/dgraph-io/ristretto/compare/v0.0.2..v0.0.3 - -### Changed - -### Added - -### Fixed - -- z: use MemHashString and xxhash.Sum64String ([#153][]) -- Check conflict key before updating expiration map. ([#154][]) -- Fix race condition in Cache.Clear ([#133][]) -- Improve handling of updated items ([#168][]) -- Fix droppedSets count while updating the item ([#171][]) - -## [0.0.2] - 2020-02-24 - -[0.0.2]: https://github.com/dgraph-io/ristretto/compare/v0.0.1..v0.0.2 - -### Added - -- Sets with TTL. ([#122][]) - -### Fixed - -- Fix the way metrics are handled for deletions. ([#111][]) -- Support nil `*Cache` values in `Clear` and `Close`. ([#119][]) -- Delete item immediately. ([#113][]) -- Remove key from policy after TTL eviction. ([#130][]) - -[#111]: https://github.com/dgraph-io/ristretto/issues/111 -[#113]: https://github.com/dgraph-io/ristretto/issues/113 -[#119]: https://github.com/dgraph-io/ristretto/issues/119 -[#122]: https://github.com/dgraph-io/ristretto/issues/122 -[#130]: https://github.com/dgraph-io/ristretto/issues/130 - -## 0.0.1 - -First release. Basic cache functionality based on a LFU policy. diff --git a/vendor/github.com/dgraph-io/ristretto/LICENSE b/vendor/github.com/dgraph-io/ristretto/LICENSE deleted file mode 100644 index d9a10c0d..00000000 --- a/vendor/github.com/dgraph-io/ristretto/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/dgraph-io/ristretto/README.md b/vendor/github.com/dgraph-io/ristretto/README.md deleted file mode 100644 index 7a97f289..00000000 --- a/vendor/github.com/dgraph-io/ristretto/README.md +++ /dev/null @@ -1,211 +0,0 @@ -# Ristretto -[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/dgraph-io/ristretto) -[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/dgraph-io/ristretto) -[![Coverage](https://img.shields.io/badge/coverage-100%25-brightgreen)](https://gocover.io/github.com/dgraph-io/ristretto) -![Tests](https://github.com/dgraph-io/ristretto/workflows/tests/badge.svg) - -Ristretto is a fast, concurrent cache library built with a focus on performance and correctness. - -The motivation to build Ristretto comes from the need for a contention-free -cache in [Dgraph][]. - -**Use [Discuss Issues](https://discuss.dgraph.io/tags/c/issues/35/ristretto/40) for reporting issues about this repository.** - -[Dgraph]: https://github.com/dgraph-io/dgraph - -## Features - -* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class. - * **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces. - * **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter). -* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput. -* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything). -* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation. -* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats. -* **Simple API** - just figure out your ideal `Config` values and you're off and running. - -## Status - -Ristretto is usable but still under active development. We expect it to be production ready in the near future. - -## Table of Contents - -* [Usage](#Usage) - * [Example](#Example) - * [Config](#Config) - * [NumCounters](#Config) - * [MaxCost](#Config) - * [BufferItems](#Config) - * [Metrics](#Config) - * [OnEvict](#Config) - * [KeyToHash](#Config) - * [Cost](#Config) -* [Benchmarks](#Benchmarks) - * [Hit Ratios](#Hit-Ratios) - * [Search](#Search) - * [Database](#Database) - * [Looping](#Looping) - * [CODASYL](#CODASYL) - * [Throughput](#Throughput) - * [Mixed](#Mixed) - * [Read](#Read) - * [Write](#Write) -* [FAQ](#FAQ) - -## Usage - -### Example - -```go -func main() { - cache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: 1e7, // number of keys to track frequency of (10M). - MaxCost: 1 << 30, // maximum cost of cache (1GB). - BufferItems: 64, // number of keys per Get buffer. - }) - if err != nil { - panic(err) - } - - // set a value with a cost of 1 - cache.Set("key", "value", 1) - - // wait for value to pass through buffers - time.Sleep(10 * time.Millisecond) - - value, found := cache.Get("key") - if !found { - panic("missing value") - } - fmt.Println(value) - cache.Del("key") -} -``` - -### Config - -The `Config` struct is passed to `NewCache` when creating Ristretto instances (see the example above). - -**NumCounters** `int64` - -NumCounters is the number of 4-bit access counters to keep for admission and eviction. We've seen good performance in setting this to 10x the number of items you expect to keep in the cache when full. - -For example, if you expect each item to have a cost of 1 and MaxCost is 100, set NumCounters to 1,000. Or, if you use variable cost values but expect the cache to hold around 10,000 items when full, set NumCounters to 100,000. The important thing is the *number of unique items* in the full cache, not necessarily the MaxCost value. - -**MaxCost** `int64` - -MaxCost is how eviction decisions are made. For example, if MaxCost is 100 and a new item with a cost of 1 increases total cache cost to 101, 1 item will be evicted. - -MaxCost can also be used to denote the max size in bytes. For example, if MaxCost is 1,000,000 (1MB) and the cache is full with 1,000 1KB items, a new item (that's accepted) would cause 5 1KB items to be evicted. - -MaxCost could be anything as long as it matches how you're using the cost values when calling Set. - -**BufferItems** `int64` - -BufferItems is the size of the Get buffers. The best value we've found for this is 64. - -If for some reason you see Get performance decreasing with lots of contention (you shouldn't), try increasing this value in increments of 64. This is a fine-tuning mechanism and you probably won't have to touch this. - -**Metrics** `bool` - -Metrics is true when you want real-time logging of a variety of stats. The reason this is a Config flag is because there's a 10% throughput performance overhead. - -**OnEvict** `func(hashes [2]uint64, value interface{}, cost int64)` - -OnEvict is called for every eviction. - -**KeyToHash** `func(key interface{}) [2]uint64` - -KeyToHash is the hashing algorithm used for every key. If this is nil, Ristretto has a variety of [defaults depending on the underlying interface type](https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41). - -Note that if you want 128bit hashes you should use the full `[2]uint64`, -otherwise just fill the `uint64` at the `0` position and it will behave like -any 64bit hash. - -**Cost** `func(value interface{}) int64` - -Cost is an optional function you can pass to the Config in order to evaluate -item cost at runtime, and only for the Set calls that aren't dropped (this is -useful if calculating item cost is particularly expensive and you don't want to -waste time on items that will be dropped anyways). - -To signal to Ristretto that you'd like to use this Cost function: - -1. Set the Cost field to a non-nil function. -2. When calling Set for new items or item updates, use a `cost` of 0. - -## Benchmarks - -The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto. - -### Hit Ratios - -#### Search - -This trace is described as "disk read accesses initiated by a large commercial -search engine in response to various web search requests." - -

- -

- -#### Database - -This trace is described as "a database server running at a commercial site -running an ERP application on top of a commercial database." - -

- -

- -#### Looping - -This trace demonstrates a looping access pattern. - -

- -

- -#### CODASYL - -This trace is described as "references to a CODASYL database for a one hour -period." - -

- -

- -### Throughput - -All throughput benchmarks were ran on an Intel Core i7-8700K (3.7GHz) with 16gb -of RAM. - -#### Mixed - -

- -

- -#### Read - -

- -

- -#### Write - -

- -

- -## FAQ - -### How are you achieving this performance? What shortcuts are you taking? - -We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy. - -As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make it in the cache. - -### Is Ristretto distributed? - -No, it's just like any other Go library that you can import into your project and use in a single process. diff --git a/vendor/github.com/dgraph-io/ristretto/cache.go b/vendor/github.com/dgraph-io/ristretto/cache.go deleted file mode 100644 index 10ff7a8a..00000000 --- a/vendor/github.com/dgraph-io/ristretto/cache.go +++ /dev/null @@ -1,648 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Ristretto is a fast, fixed size, in-memory cache with a dual focus on -// throughput and hit ratio performance. You can easily add Ristretto to an -// existing system and keep the most valuable data where you need it. -package ristretto - -import ( - "bytes" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/dgraph-io/ristretto/z" -) - -var ( - // TODO: find the optimal value for this or make it configurable - setBufSize = 32 * 1024 -) - -type itemCallback func(*Item) - -// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission -// policy and a Sampled LFU eviction policy. You can use the same Cache instance -// from as many goroutines as you want. -type Cache struct { - // store is the central concurrent hashmap where key-value items are stored. - store store - // policy determines what gets let in to the cache and what gets kicked out. - policy policy - // getBuf is a custom ring buffer implementation that gets pushed to when - // keys are read. - getBuf *ringBuffer - // setBuf is a buffer allowing us to batch/drop Sets during times of high - // contention. - setBuf chan *Item - // onEvict is called for item evictions. - onEvict itemCallback - // onReject is called when an item is rejected via admission policy. - onReject itemCallback - // onExit is called whenever a value goes out of scope from the cache. - onExit (func(interface{})) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - keyToHash func(interface{}) (uint64, uint64) - // stop is used to stop the processItems goroutine. - stop chan struct{} - // cost calculates cost from a value. - cost func(value interface{}) int64 - // cleanupTicker is used to periodically check for entries whose TTL has passed. - cleanupTicker *time.Ticker - // Metrics contains a running log of important statistics like hits, misses, - // and dropped items. - Metrics *Metrics -} - -// Config is passed to NewCache for creating new Cache instances. -type Config struct { - // NumCounters determines the number of counters (keys) to keep that hold - // access frequency information. It's generally a good idea to have more - // counters than the max cache capacity, as this will improve eviction - // accuracy and subsequent hit ratios. - // - // For example, if you expect your cache to hold 1,000,000 items when full, - // NumCounters should be 10,000,000 (10x). Each counter takes up 4 bits, so - // keeping 10,000,000 counters would require 5MB of memory. - NumCounters int64 - // MaxCost can be considered as the cache capacity, in whatever units you - // choose to use. - // - // For example, if you want the cache to have a max capacity of 100MB, you - // would set MaxCost to 100,000,000 and pass an item's number of bytes as - // the `cost` parameter for calls to Set. If new items are accepted, the - // eviction process will take care of making room for the new item and not - // overflowing the MaxCost value. - MaxCost int64 - // BufferItems determines the size of Get buffers. - // - // Unless you have a rare use case, using `64` as the BufferItems value - // results in good performance. - BufferItems int64 - // Metrics determines whether cache statistics are kept during the cache's - // lifetime. There *is* some overhead to keeping statistics, so you should - // only set this flag to true when testing or throughput performance isn't a - // major factor. - Metrics bool - // OnEvict is called for every eviction and passes the hashed key, value, - // and cost to the function. - OnEvict func(item *Item) - // OnReject is called for every rejection done via the policy. - OnReject func(item *Item) - // OnExit is called whenever a value is removed from cache. This can be - // used to do manual memory deallocation. Would also be called on eviction - // and rejection of the value. - OnExit func(val interface{}) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - KeyToHash func(key interface{}) (uint64, uint64) - // Cost evaluates a value and outputs a corresponding cost. This function - // is ran after Set is called for a new item or an item update with a cost - // param of 0. - Cost func(value interface{}) int64 -} - -type itemFlag byte - -const ( - itemNew itemFlag = iota - itemDelete - itemUpdate -) - -// Item is passed to setBuf so items can eventually be added to the cache. -type Item struct { - flag itemFlag - Key uint64 - Conflict uint64 - Value interface{} - Cost int64 - Expiration time.Time - wg *sync.WaitGroup -} - -// NewCache returns a new Cache instance and any configuration errors, if any. -func NewCache(config *Config) (*Cache, error) { - switch { - case config.NumCounters == 0: - return nil, errors.New("NumCounters can't be zero") - case config.MaxCost == 0: - return nil, errors.New("MaxCost can't be zero") - case config.BufferItems == 0: - return nil, errors.New("BufferItems can't be zero") - } - policy := newPolicy(config.NumCounters, config.MaxCost) - cache := &Cache{ - store: newStore(), - policy: policy, - getBuf: newRingBuffer(policy, config.BufferItems), - setBuf: make(chan *Item, setBufSize), - keyToHash: config.KeyToHash, - stop: make(chan struct{}), - cost: config.Cost, - cleanupTicker: time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2), - } - cache.onExit = func(val interface{}) { - if config.OnExit != nil && val != nil { - config.OnExit(val) - } - } - cache.onEvict = func(item *Item) { - if config.OnEvict != nil { - config.OnEvict(item) - } - cache.onExit(item.Value) - } - cache.onReject = func(item *Item) { - if config.OnReject != nil { - config.OnReject(item) - } - cache.onExit(item.Value) - } - if cache.keyToHash == nil { - cache.keyToHash = z.KeyToHash - } - if config.Metrics { - cache.collectMetrics() - } - // NOTE: benchmarks seem to show that performance decreases the more - // goroutines we have running cache.processItems(), so 1 should - // usually be sufficient - go cache.processItems() - return cache, nil -} - -func (c *Cache) Wait() { - if c == nil { - return - } - wg := &sync.WaitGroup{} - wg.Add(1) - c.setBuf <- &Item{wg: wg} - wg.Wait() -} - -// Get returns the value (if any) and a boolean representing whether the -// value was found or not. The value can be nil and the boolean can be true at -// the same time. -func (c *Cache) Get(key interface{}) (interface{}, bool) { - if c == nil || key == nil { - return nil, false - } - keyHash, conflictHash := c.keyToHash(key) - c.getBuf.Push(keyHash) - value, ok := c.store.Get(keyHash, conflictHash) - if ok { - c.Metrics.add(hit, keyHash, 1) - } else { - c.Metrics.add(miss, keyHash, 1) - } - return value, ok -} - -// Set attempts to add the key-value item to the cache. If it returns false, -// then the Set was dropped and the key-value item isn't added to the cache. If -// it returns true, there's still a chance it could be dropped by the policy if -// its determined that the key-value item isn't worth keeping, but otherwise the -// item will be added and other items will be evicted in order to make room. -// -// To dynamically evaluate the items cost using the Config.Coster function, set -// the cost parameter to 0 and Coster will be ran when needed in order to find -// the items true cost. -func (c *Cache) Set(key, value interface{}, cost int64) bool { - return c.SetWithTTL(key, value, cost, 0*time.Second) -} - -// SetWithTTL works like Set but adds a key-value pair to the cache that will expire -// after the specified TTL (time to live) has passed. A zero value means the value never -// expires, which is identical to calling Set. A negative value is a no-op and the value -// is discarded. -func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool { - if c == nil || key == nil { - return false - } - - var expiration time.Time - switch { - case ttl == 0: - // No expiration. - break - case ttl < 0: - // Treat this a a no-op. - return false - default: - expiration = time.Now().Add(ttl) - } - - keyHash, conflictHash := c.keyToHash(key) - i := &Item{ - flag: itemNew, - Key: keyHash, - Conflict: conflictHash, - Value: value, - Cost: cost, - Expiration: expiration, - } - // cost is eventually updated. The expiration must also be immediately updated - // to prevent items from being prematurely removed from the map. - if prev, ok := c.store.Update(i); ok { - c.onExit(prev) - i.flag = itemUpdate - } - // Attempt to send item to policy. - select { - case c.setBuf <- i: - return true - default: - if i.flag == itemUpdate { - // Return true if this was an update operation since we've already - // updated the store. For all the other operations (set/delete), we - // return false which means the item was not inserted. - return true - } - c.Metrics.add(dropSets, keyHash, 1) - return false - } -} - -// Del deletes the key-value item from the cache if it exists. -func (c *Cache) Del(key interface{}) { - if c == nil || key == nil { - return - } - keyHash, conflictHash := c.keyToHash(key) - // Delete immediately. - _, prev := c.store.Del(keyHash, conflictHash) - c.onExit(prev) - // If we've set an item, it would be applied slightly later. - // So we must push the same item to `setBuf` with the deletion flag. - // This ensures that if a set is followed by a delete, it will be - // applied in the correct order. - c.setBuf <- &Item{ - flag: itemDelete, - Key: keyHash, - Conflict: conflictHash, - } -} - -// Close stops all goroutines and closes all channels. -func (c *Cache) Close() { - if c == nil || c.stop == nil { - return - } - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - close(c.stop) - c.stop = nil - close(c.setBuf) - c.policy.Close() -} - -// Clear empties the hashmap and zeroes all policy counters. Note that this is -// not an atomic operation (but that shouldn't be a problem as it's assumed that -// Set/Get calls won't be occurring until after this). -func (c *Cache) Clear() { - if c == nil { - return - } - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - - // Clear out the setBuf channel. -loop: - for { - select { - case i := <-c.setBuf: - if i.flag != itemUpdate { - // In itemUpdate, the value is already set in the store. So, no need to call - // onEvict here. - c.onEvict(i) - } - default: - break loop - } - } - - // Clear value hashmap and policy data. - c.policy.Clear() - c.store.Clear(c.onEvict) - // Only reset metrics if they're enabled. - if c.Metrics != nil { - c.Metrics.Clear() - } - // Restart processItems goroutine. - go c.processItems() -} - -// processItems is ran by goroutines processing the Set buffer. -func (c *Cache) processItems() { - startTs := make(map[uint64]time.Time) - numToKeep := 100000 // TODO: Make this configurable via options. - - trackAdmission := func(key uint64) { - if c.Metrics == nil { - return - } - startTs[key] = time.Now() - if len(startTs) > numToKeep { - for k := range startTs { - if len(startTs) <= numToKeep { - break - } - delete(startTs, k) - } - } - } - onEvict := func(i *Item) { - if ts, has := startTs[i.Key]; has { - c.Metrics.trackEviction(int64(time.Since(ts) / time.Second)) - delete(startTs, i.Key) - } - if c.onEvict != nil { - c.onEvict(i) - } - } - - for { - select { - case i := <-c.setBuf: - if i.wg != nil { - i.wg.Done() - continue - } - // Calculate item cost value if new or update. - if i.Cost == 0 && c.cost != nil && i.flag != itemDelete { - i.Cost = c.cost(i.Value) - } - switch i.flag { - case itemNew: - victims, added := c.policy.Add(i.Key, i.Cost) - if added { - c.store.Set(i) - c.Metrics.add(keyAdd, i.Key, 1) - trackAdmission(i.Key) - } else { - c.onReject(i) - } - for _, victim := range victims { - victim.Conflict, victim.Value = c.store.Del(victim.Key, 0) - onEvict(victim) - } - - case itemUpdate: - c.policy.Update(i.Key, i.Cost) - - case itemDelete: - c.policy.Del(i.Key) // Deals with metrics updates. - _, val := c.store.Del(i.Key, i.Conflict) - c.onExit(val) - } - case <-c.cleanupTicker.C: - c.store.Cleanup(c.policy, onEvict) - case <-c.stop: - return - } - } -} - -// collectMetrics just creates a new *Metrics instance and adds the pointers -// to the cache and policy instances. -func (c *Cache) collectMetrics() { - c.Metrics = newMetrics() - c.policy.CollectMetrics(c.Metrics) -} - -type metricType int - -const ( - // The following 2 keep track of hits and misses. - hit = iota - miss - // The following 3 keep track of number of keys added, updated and evicted. - keyAdd - keyUpdate - keyEvict - // The following 2 keep track of cost of keys added and evicted. - costAdd - costEvict - // The following keep track of how many sets were dropped or rejected later. - dropSets - rejectSets - // The following 2 keep track of how many gets were kept and dropped on the - // floor. - dropGets - keepGets - // This should be the final enum. Other enums should be set before this. - doNotUse -) - -func stringFor(t metricType) string { - switch t { - case hit: - return "hit" - case miss: - return "miss" - case keyAdd: - return "keys-added" - case keyUpdate: - return "keys-updated" - case keyEvict: - return "keys-evicted" - case costAdd: - return "cost-added" - case costEvict: - return "cost-evicted" - case dropSets: - return "sets-dropped" - case rejectSets: - return "sets-rejected" // by policy. - case dropGets: - return "gets-dropped" - case keepGets: - return "gets-kept" - default: - return "unidentified" - } -} - -// Metrics is a snapshot of performance statistics for the lifetime of a cache instance. -type Metrics struct { - all [doNotUse][]*uint64 - - mu sync.RWMutex - life *z.HistogramData // Tracks the life expectancy of a key. -} - -func newMetrics() *Metrics { - s := &Metrics{ - life: z.NewHistogramData(z.HistogramBounds(1, 16)), - } - for i := 0; i < doNotUse; i++ { - s.all[i] = make([]*uint64, 256) - slice := s.all[i] - for j := range slice { - slice[j] = new(uint64) - } - } - return s -} - -func (p *Metrics) add(t metricType, hash, delta uint64) { - if p == nil { - return - } - valp := p.all[t] - // Avoid false sharing by padding at least 64 bytes of space between two - // atomic counters which would be incremented. - idx := (hash % 25) * 10 - atomic.AddUint64(valp[idx], delta) -} - -func (p *Metrics) get(t metricType) uint64 { - if p == nil { - return 0 - } - valp := p.all[t] - var total uint64 - for i := range valp { - total += atomic.LoadUint64(valp[i]) - } - return total -} - -// Hits is the number of Get calls where a value was found for the corresponding key. -func (p *Metrics) Hits() uint64 { - return p.get(hit) -} - -// Misses is the number of Get calls where a value was not found for the corresponding key. -func (p *Metrics) Misses() uint64 { - return p.get(miss) -} - -// KeysAdded is the total number of Set calls where a new key-value item was added. -func (p *Metrics) KeysAdded() uint64 { - return p.get(keyAdd) -} - -// KeysUpdated is the total number of Set calls where the value was updated. -func (p *Metrics) KeysUpdated() uint64 { - return p.get(keyUpdate) -} - -// KeysEvicted is the total number of keys evicted. -func (p *Metrics) KeysEvicted() uint64 { - return p.get(keyEvict) -} - -// CostAdded is the sum of costs that have been added (successful Set calls). -func (p *Metrics) CostAdded() uint64 { - return p.get(costAdd) -} - -// CostEvicted is the sum of all costs that have been evicted. -func (p *Metrics) CostEvicted() uint64 { - return p.get(costEvict) -} - -// SetsDropped is the number of Set calls that don't make it into internal -// buffers (due to contention or some other reason). -func (p *Metrics) SetsDropped() uint64 { - return p.get(dropSets) -} - -// SetsRejected is the number of Set calls rejected by the policy (TinyLFU). -func (p *Metrics) SetsRejected() uint64 { - return p.get(rejectSets) -} - -// GetsDropped is the number of Get counter increments that are dropped -// internally. -func (p *Metrics) GetsDropped() uint64 { - return p.get(dropGets) -} - -// GetsKept is the number of Get counter increments that are kept. -func (p *Metrics) GetsKept() uint64 { - return p.get(keepGets) -} - -// Ratio is the number of Hits over all accesses (Hits + Misses). This is the -// percentage of successful Get calls. -func (p *Metrics) Ratio() float64 { - if p == nil { - return 0.0 - } - hits, misses := p.get(hit), p.get(miss) - if hits == 0 && misses == 0 { - return 0.0 - } - return float64(hits) / float64(hits+misses) -} - -func (p *Metrics) trackEviction(numSeconds int64) { - if p == nil { - return - } - p.mu.Lock() - defer p.mu.Unlock() - p.life.Update(numSeconds) -} - -func (p *Metrics) LifeExpectancySeconds() *z.HistogramData { - if p == nil { - return nil - } - p.mu.RLock() - defer p.mu.RUnlock() - return p.life.Copy() -} - -// Clear resets all the metrics. -func (p *Metrics) Clear() { - if p == nil { - return - } - for i := 0; i < doNotUse; i++ { - for j := range p.all[i] { - atomic.StoreUint64(p.all[i][j], 0) - } - } - p.mu.Lock() - p.life = z.NewHistogramData(z.HistogramBounds(1, 16)) - p.mu.Unlock() -} - -// String returns a string representation of the metrics. -func (p *Metrics) String() string { - if p == nil { - return "" - } - var buf bytes.Buffer - for i := 0; i < doNotUse; i++ { - t := metricType(i) - fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t)) - } - fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss)) - fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio()) - return buf.String() -} diff --git a/vendor/github.com/dgraph-io/ristretto/policy.go b/vendor/github.com/dgraph-io/ristretto/policy.go deleted file mode 100644 index 065118d3..00000000 --- a/vendor/github.com/dgraph-io/ristretto/policy.go +++ /dev/null @@ -1,379 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "math" - "sync" - - "github.com/dgraph-io/ristretto/z" -) - -const ( - // lfuSample is the number of items to sample when looking at eviction - // candidates. 5 seems to be the most optimal number [citation needed]. - lfuSample = 5 -) - -// policy is the interface encapsulating eviction/admission behavior. -// -// TODO: remove this interface and just rename defaultPolicy to policy, as we -// are probably only going to use/implement/maintain one policy. -type policy interface { - ringConsumer - // Add attempts to Add the key-cost pair to the Policy. It returns a slice - // of evicted keys and a bool denoting whether or not the key-cost pair - // was added. If it returns true, the key should be stored in cache. - Add(uint64, int64) ([]*Item, bool) - // Has returns true if the key exists in the Policy. - Has(uint64) bool - // Del deletes the key from the Policy. - Del(uint64) - // Cap returns the available capacity. - Cap() int64 - // Close stops all goroutines and closes all channels. - Close() - // Update updates the cost value for the key. - Update(uint64, int64) - // Cost returns the cost value of a key or -1 if missing. - Cost(uint64) int64 - // Optionally, set stats object to track how policy is performing. - CollectMetrics(*Metrics) - // Clear zeroes out all counters and clears hashmaps. - Clear() -} - -func newPolicy(numCounters, maxCost int64) policy { - return newDefaultPolicy(numCounters, maxCost) -} - -type defaultPolicy struct { - sync.Mutex - admit *tinyLFU - evict *sampledLFU - itemsCh chan []uint64 - stop chan struct{} - metrics *Metrics -} - -func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { - p := &defaultPolicy{ - admit: newTinyLFU(numCounters), - evict: newSampledLFU(maxCost), - itemsCh: make(chan []uint64, 3), - stop: make(chan struct{}), - } - go p.processItems() - return p -} - -func (p *defaultPolicy) CollectMetrics(metrics *Metrics) { - p.metrics = metrics - p.evict.metrics = metrics -} - -type policyPair struct { - key uint64 - cost int64 -} - -func (p *defaultPolicy) processItems() { - for { - select { - case items := <-p.itemsCh: - p.Lock() - p.admit.Push(items) - p.Unlock() - case <-p.stop: - return - } - } -} - -func (p *defaultPolicy) Push(keys []uint64) bool { - if len(keys) == 0 { - return true - } - select { - case p.itemsCh <- keys: - p.metrics.add(keepGets, keys[0], uint64(len(keys))) - return true - default: - p.metrics.add(dropGets, keys[0], uint64(len(keys))) - return false - } -} - -// Add decides whether the item with the given key and cost should be accepted by -// the policy. It returns the list of victims that have been evicted and a boolean -// indicating whether the incoming item should be accepted. -func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { - p.Lock() - defer p.Unlock() - - // Cannot add an item bigger than entire cache. - if cost > p.evict.maxCost { - return nil, false - } - - // No need to go any further if the item is already in the cache. - if has := p.evict.updateIfHas(key, cost); has { - // An update does not count as an addition, so return false. - return nil, false - } - - // If the execution reaches this point, the key doesn't exist in the cache. - // Calculate the remaining room in the cache (usually bytes). - room := p.evict.roomLeft(cost) - if room >= 0 { - // There's enough room in the cache to store the new item without - // overflowing. Do that now and stop here. - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return nil, true - } - - // incHits is the hit count for the incoming item. - incHits := p.admit.Estimate(key) - // sample is the eviction candidate pool to be filled via random sampling. - // TODO: perhaps we should use a min heap here. Right now our time - // complexity is N for finding the min. Min heap should bring it down to - // O(lg N). - sample := make([]*policyPair, 0, lfuSample) - // As items are evicted they will be appended to victims. - victims := make([]*Item, 0) - - // Delete victims until there's enough space or a minKey is found that has - // more hits than incoming item. - for ; room < 0; room = p.evict.roomLeft(cost) { - // Fill up empty slots in sample. - sample = p.evict.fillSample(sample) - - // Find minimally used item in sample. - minKey, minHits, minId, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0) - for i, pair := range sample { - // Look up hit count for sample key. - if hits := p.admit.Estimate(pair.key); hits < minHits { - minKey, minHits, minId, minCost = pair.key, hits, i, pair.cost - } - } - - // If the incoming item isn't worth keeping in the policy, reject. - if incHits < minHits { - p.metrics.add(rejectSets, key, 1) - return victims, false - } - - // Delete the victim from metadata. - p.evict.del(minKey) - - // Delete the victim from sample. - sample[minId] = sample[len(sample)-1] - sample = sample[:len(sample)-1] - // Store victim in evicted victims slice. - victims = append(victims, &Item{ - Key: minKey, - Conflict: 0, - Cost: minCost, - }) - } - - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return victims, true -} - -func (p *defaultPolicy) Has(key uint64) bool { - p.Lock() - _, exists := p.evict.keyCosts[key] - p.Unlock() - return exists -} - -func (p *defaultPolicy) Del(key uint64) { - p.Lock() - p.evict.del(key) - p.Unlock() -} - -func (p *defaultPolicy) Cap() int64 { - p.Lock() - capacity := int64(p.evict.maxCost - p.evict.used) - p.Unlock() - return capacity -} - -func (p *defaultPolicy) Update(key uint64, cost int64) { - p.Lock() - p.evict.updateIfHas(key, cost) - p.Unlock() -} - -func (p *defaultPolicy) Cost(key uint64) int64 { - p.Lock() - if cost, found := p.evict.keyCosts[key]; found { - p.Unlock() - return cost - } - p.Unlock() - return -1 -} - -func (p *defaultPolicy) Clear() { - p.Lock() - p.admit.clear() - p.evict.clear() - p.Unlock() -} - -func (p *defaultPolicy) Close() { - // Block until the p.processItems goroutine returns. - p.stop <- struct{}{} - close(p.stop) - close(p.itemsCh) -} - -// sampledLFU is an eviction helper storing key-cost pairs. -type sampledLFU struct { - keyCosts map[uint64]int64 - maxCost int64 - used int64 - metrics *Metrics -} - -func newSampledLFU(maxCost int64) *sampledLFU { - return &sampledLFU{ - keyCosts: make(map[uint64]int64), - maxCost: maxCost, - } -} - -func (p *sampledLFU) roomLeft(cost int64) int64 { - return p.maxCost - (p.used + cost) -} - -func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair { - if len(in) >= lfuSample { - return in - } - for key, cost := range p.keyCosts { - in = append(in, &policyPair{key, cost}) - if len(in) >= lfuSample { - return in - } - } - return in -} - -func (p *sampledLFU) del(key uint64) { - cost, ok := p.keyCosts[key] - if !ok { - return - } - p.used -= cost - delete(p.keyCosts, key) - p.metrics.add(costEvict, key, uint64(cost)) - p.metrics.add(keyEvict, key, 1) -} - -func (p *sampledLFU) add(key uint64, cost int64) { - p.keyCosts[key] = cost - p.used += cost -} - -func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool { - if prev, found := p.keyCosts[key]; found { - // Update the cost of an existing key, but don't worry about evicting. - // Evictions will be handled the next time a new item is added. - p.metrics.add(keyUpdate, key, 1) - if prev > cost { - diff := prev - cost - p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1)) - } else if cost > prev { - diff := cost - prev - p.metrics.add(costAdd, key, uint64(diff)) - } - p.used += cost - prev - p.keyCosts[key] = cost - return true - } - return false -} - -func (p *sampledLFU) clear() { - p.used = 0 - p.keyCosts = make(map[uint64]int64) -} - -// tinyLFU is an admission helper that keeps track of access frequency using -// tiny (4-bit) counters in the form of a count-min sketch. -// tinyLFU is NOT thread safe. -type tinyLFU struct { - freq *cmSketch - door *z.Bloom - incrs int64 - resetAt int64 -} - -func newTinyLFU(numCounters int64) *tinyLFU { - return &tinyLFU{ - freq: newCmSketch(numCounters), - door: z.NewBloomFilter(float64(numCounters), 0.01), - resetAt: numCounters, - } -} - -func (p *tinyLFU) Push(keys []uint64) { - for _, key := range keys { - p.Increment(key) - } -} - -func (p *tinyLFU) Estimate(key uint64) int64 { - hits := p.freq.Estimate(key) - if p.door.Has(key) { - hits++ - } - return hits -} - -func (p *tinyLFU) Increment(key uint64) { - // Flip doorkeeper bit if not already done. - if added := p.door.AddIfNotHas(key); !added { - // Increment count-min counter if doorkeeper bit is already set. - p.freq.Increment(key) - } - p.incrs++ - if p.incrs >= p.resetAt { - p.reset() - } -} - -func (p *tinyLFU) reset() { - // Zero out incrs. - p.incrs = 0 - // clears doorkeeper bits - p.door.Clear() - // halves count-min counters - p.freq.Reset() -} - -func (p *tinyLFU) clear() { - p.incrs = 0 - p.door.Clear() - p.freq.Clear() -} diff --git a/vendor/github.com/dgraph-io/ristretto/ring.go b/vendor/github.com/dgraph-io/ristretto/ring.go deleted file mode 100644 index 5dbed4cc..00000000 --- a/vendor/github.com/dgraph-io/ristretto/ring.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" -) - -// ringConsumer is the user-defined object responsible for receiving and -// processing items in batches when buffers are drained. -type ringConsumer interface { - Push([]uint64) bool -} - -// ringStripe is a singular ring buffer that is not concurrent safe. -type ringStripe struct { - cons ringConsumer - data []uint64 - capa int -} - -func newRingStripe(cons ringConsumer, capa int64) *ringStripe { - return &ringStripe{ - cons: cons, - data: make([]uint64, 0, capa), - capa: int(capa), - } -} - -// Push appends an item in the ring buffer and drains (copies items and -// sends to Consumer) if full. -func (s *ringStripe) Push(item uint64) { - s.data = append(s.data, item) - // Decide if the ring buffer should be drained. - if len(s.data) >= s.capa { - // Send elements to consumer and create a new ring stripe. - if s.cons.Push(s.data) { - s.data = make([]uint64, 0, s.capa) - } else { - s.data = s.data[:0] - } - } -} - -// ringBuffer stores multiple buffers (stripes) and distributes Pushed items -// between them to lower contention. -// -// This implements the "batching" process described in the BP-Wrapper paper -// (section III part A). -type ringBuffer struct { - pool *sync.Pool -} - -// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will -// be called when individual stripes are full and need to drain their elements. -func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer { - // LOSSY buffers use a very simple sync.Pool for concurrently reusing - // stripes. We do lose some stripes due to GC (unheld items in sync.Pool - // are cleared), but the performance gains generally outweigh the small - // percentage of elements lost. The performance primarily comes from - // low-level runtime functions used in the standard library that aren't - // available to us (such as runtime_procPin()). - return &ringBuffer{ - pool: &sync.Pool{ - New: func() interface{} { return newRingStripe(cons, capa) }, - }, - } -} - -// Push adds an element to one of the internal stripes and possibly drains if -// the stripe becomes full. -func (b *ringBuffer) Push(item uint64) { - // Reuse or create a new stripe. - stripe := b.pool.Get().(*ringStripe) - stripe.Push(item) - b.pool.Put(stripe) -} diff --git a/vendor/github.com/dgraph-io/ristretto/sketch.go b/vendor/github.com/dgraph-io/ristretto/sketch.go deleted file mode 100644 index 10f41468..00000000 --- a/vendor/github.com/dgraph-io/ristretto/sketch.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// This package includes multiple probabalistic data structures needed for -// admission/eviction metadata. Most are Counting Bloom Filter variations, but -// a caching-specific feature that is also required is a "freshness" mechanism, -// which basically serves as a "lifetime" process. This freshness mechanism -// was described in the original TinyLFU paper [1], but other mechanisms may -// be better suited for certain data distributions. -// -// [1]: https://arxiv.org/abs/1512.00727 -package ristretto - -import ( - "fmt" - "math/rand" - "time" -) - -// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily -// based on Damian Gryski's CM4 [1]. -// -// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go -type cmSketch struct { - rows [cmDepth]cmRow - seed [cmDepth]uint64 - mask uint64 -} - -const ( - // cmDepth is the number of counter copies to store (think of it as rows). - cmDepth = 4 -) - -func newCmSketch(numCounters int64) *cmSketch { - if numCounters == 0 { - panic("cmSketch: bad numCounters") - } - // Get the next power of 2 for better cache performance. - numCounters = next2Power(numCounters) - sketch := &cmSketch{mask: uint64(numCounters - 1)} - // Initialize rows of counters and seeds. - source := rand.New(rand.NewSource(time.Now().UnixNano())) - for i := 0; i < cmDepth; i++ { - sketch.seed[i] = source.Uint64() - sketch.rows[i] = newCmRow(numCounters) - } - return sketch -} - -// Increment increments the count(ers) for the specified key. -func (s *cmSketch) Increment(hashed uint64) { - for i := range s.rows { - s.rows[i].increment((hashed ^ s.seed[i]) & s.mask) - } -} - -// Estimate returns the value of the specified key. -func (s *cmSketch) Estimate(hashed uint64) int64 { - min := byte(255) - for i := range s.rows { - val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask) - if val < min { - min = val - } - } - return int64(min) -} - -// Reset halves all counter values. -func (s *cmSketch) Reset() { - for _, r := range s.rows { - r.reset() - } -} - -// Clear zeroes all counters. -func (s *cmSketch) Clear() { - for _, r := range s.rows { - r.clear() - } -} - -// cmRow is a row of bytes, with each byte holding two counters. -type cmRow []byte - -func newCmRow(numCounters int64) cmRow { - return make(cmRow, numCounters/2) -} - -func (r cmRow) get(n uint64) byte { - return byte(r[n/2]>>((n&1)*4)) & 0x0f -} - -func (r cmRow) increment(n uint64) { - // Index of the counter. - i := n / 2 - // Shift distance (even 0, odd 4). - s := (n & 1) * 4 - // Counter value. - v := (r[i] >> s) & 0x0f - // Only increment if not max value (overflow wrap is bad for LFU). - if v < 15 { - r[i] += 1 << s - } -} - -func (r cmRow) reset() { - // Halve each counter. - for i := range r { - r[i] = (r[i] >> 1) & 0x77 - } -} - -func (r cmRow) clear() { - // Zero each counter. - for i := range r { - r[i] = 0 - } -} - -func (r cmRow) string() string { - s := "" - for i := uint64(0); i < uint64(len(r)*2); i++ { - s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f) - } - s = s[:len(s)-1] - return s -} - -// next2Power rounds x up to the next power of 2, if it's not already one. -func next2Power(x int64) int64 { - x-- - x |= x >> 1 - x |= x >> 2 - x |= x >> 4 - x |= x >> 8 - x |= x >> 16 - x |= x >> 32 - x++ - return x -} diff --git a/vendor/github.com/dgraph-io/ristretto/store.go b/vendor/github.com/dgraph-io/ristretto/store.go deleted file mode 100644 index e42a98b7..00000000 --- a/vendor/github.com/dgraph-io/ristretto/store.go +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" - "time" -) - -// TODO: Do we need this to be a separate struct from Item? -type storeItem struct { - key uint64 - conflict uint64 - value interface{} - expiration time.Time -} - -// store is the interface fulfilled by all hash map implementations in this -// file. Some hash map implementations are better suited for certain data -// distributions than others, so this allows us to abstract that out for use -// in Ristretto. -// -// Every store is safe for concurrent usage. -type store interface { - // Get returns the value associated with the key parameter. - Get(uint64, uint64) (interface{}, bool) - // Expiration returns the expiration time for this key. - Expiration(uint64) time.Time - // Set adds the key-value pair to the Map or updates the value if it's - // already present. The key-value pair is passed as a pointer to an - // item object. - Set(*Item) - // Del deletes the key-value pair from the Map. - Del(uint64, uint64) (uint64, interface{}) - // Update attempts to update the key with a new value and returns true if - // successful. - Update(*Item) (interface{}, bool) - // Cleanup removes items that have an expired TTL. - Cleanup(policy policy, onEvict itemCallback) - // Clear clears all contents of the store. - Clear(onEvict itemCallback) -} - -// newStore returns the default store implementation. -func newStore() store { - return newShardedMap() -} - -const numShards uint64 = 256 - -type shardedMap struct { - shards []*lockedMap - expiryMap *expirationMap -} - -func newShardedMap() *shardedMap { - sm := &shardedMap{ - shards: make([]*lockedMap, int(numShards)), - expiryMap: newExpirationMap(), - } - for i := range sm.shards { - sm.shards[i] = newLockedMap(sm.expiryMap) - } - return sm -} - -func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) { - return sm.shards[key%numShards].get(key, conflict) -} - -func (sm *shardedMap) Expiration(key uint64) time.Time { - return sm.shards[key%numShards].Expiration(key) -} - -func (sm *shardedMap) Set(i *Item) { - if i == nil { - // If item is nil make this Set a no-op. - return - } - - sm.shards[i.Key%numShards].Set(i) -} - -func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) { - return sm.shards[key%numShards].Del(key, conflict) -} - -func (sm *shardedMap) Update(newItem *Item) (interface{}, bool) { - return sm.shards[newItem.Key%numShards].Update(newItem) -} - -func (sm *shardedMap) Cleanup(policy policy, onEvict itemCallback) { - sm.expiryMap.cleanup(sm, policy, onEvict) -} - -func (sm *shardedMap) Clear(onEvict itemCallback) { - for i := uint64(0); i < numShards; i++ { - sm.shards[i].Clear(onEvict) - } -} - -type lockedMap struct { - sync.RWMutex - data map[uint64]storeItem - em *expirationMap -} - -func newLockedMap(em *expirationMap) *lockedMap { - return &lockedMap{ - data: make(map[uint64]storeItem), - em: em, - } -} - -func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) { - m.RLock() - item, ok := m.data[key] - m.RUnlock() - if !ok { - return nil, false - } - if conflict != 0 && (conflict != item.conflict) { - return nil, false - } - - // Handle expired items. - if !item.expiration.IsZero() && time.Now().After(item.expiration) { - return nil, false - } - return item.value, true -} - -func (m *lockedMap) Expiration(key uint64) time.Time { - m.RLock() - defer m.RUnlock() - return m.data[key].expiration -} - -func (m *lockedMap) Set(i *Item) { - if i == nil { - // If the item is nil make this Set a no-op. - return - } - - m.Lock() - defer m.Unlock() - item, ok := m.data[i.Key] - - if ok { - // The item existed already. We need to check the conflict key and reject the - // update if they do not match. Only after that the expiration map is updated. - if i.Conflict != 0 && (i.Conflict != item.conflict) { - return - } - m.em.update(i.Key, i.Conflict, item.expiration, i.Expiration) - } else { - // The value is not in the map already. There's no need to return anything. - // Simply add the expiration map. - m.em.add(i.Key, i.Conflict, i.Expiration) - } - - m.data[i.Key] = storeItem{ - key: i.Key, - conflict: i.Conflict, - value: i.Value, - expiration: i.Expiration, - } -} - -func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) { - m.Lock() - item, ok := m.data[key] - if !ok { - m.Unlock() - return 0, nil - } - if conflict != 0 && (conflict != item.conflict) { - m.Unlock() - return 0, nil - } - - if !item.expiration.IsZero() { - m.em.del(key, item.expiration) - } - - delete(m.data, key) - m.Unlock() - return item.conflict, item.value -} - -func (m *lockedMap) Update(newItem *Item) (interface{}, bool) { - m.Lock() - item, ok := m.data[newItem.Key] - if !ok { - m.Unlock() - return nil, false - } - if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) { - m.Unlock() - return nil, false - } - - m.em.update(newItem.Key, newItem.Conflict, item.expiration, newItem.Expiration) - m.data[newItem.Key] = storeItem{ - key: newItem.Key, - conflict: newItem.Conflict, - value: newItem.Value, - expiration: newItem.Expiration, - } - - m.Unlock() - return item.value, true -} - -func (m *lockedMap) Clear(onEvict itemCallback) { - m.Lock() - i := &Item{} - if onEvict != nil { - for _, si := range m.data { - i.Key = si.key - i.Conflict = si.conflict - i.Value = si.value - onEvict(i) - } - } - m.data = make(map[uint64]storeItem) - m.Unlock() -} diff --git a/vendor/github.com/dgraph-io/ristretto/test.sh b/vendor/github.com/dgraph-io/ristretto/test.sh deleted file mode 100644 index 2bdcc250..00000000 --- a/vendor/github.com/dgraph-io/ristretto/test.sh +++ /dev/null @@ -1,20 +0,0 @@ -#! /bin/sh - -starttest() { - set -e - GO111MODULE=on go test -race ./... -} - -if [ -z "${TEAMCITY_VERSION}" ]; then - # running locally, so start test in a container - docker run --rm --name ristretto-test -ti \ - -v `pwd`:/go/src/github.com/dgraph-io/ristretto \ - --workdir /go/src/github.com/dgraph-io/ristretto \ - # TEAMCITY_VERSION=local will avoid recursive calls, when it would be running in container - --env TEAMCITY_VERSION=local \ - golang:1.13 \ - sh test.sh -else - # running in teamcity, since teamcity itself run this in container, let's simply run this - starttest -fi diff --git a/vendor/github.com/dgraph-io/ristretto/ttl.go b/vendor/github.com/dgraph-io/ristretto/ttl.go deleted file mode 100644 index 337976ad..00000000 --- a/vendor/github.com/dgraph-io/ristretto/ttl.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" - "time" -) - -var ( - // TODO: find the optimal value or make it configurable. - bucketDurationSecs = int64(5) -) - -func storageBucket(t time.Time) int64 { - return (t.Unix() / bucketDurationSecs) + 1 -} - -func cleanupBucket(t time.Time) int64 { - // The bucket to cleanup is always behind the storage bucket by one so that - // no elements in that bucket (which might not have expired yet) are deleted. - return storageBucket(t) - 1 -} - -// bucket type is a map of key to conflict. -type bucket map[uint64]uint64 - -// expirationMap is a map of bucket number to the corresponding bucket. -type expirationMap struct { - sync.RWMutex - buckets map[int64]bucket -} - -func newExpirationMap() *expirationMap { - return &expirationMap{ - buckets: make(map[int64]bucket), - } -} - -func (m *expirationMap) add(key, conflict uint64, expiration time.Time) { - if m == nil { - return - } - - // Items that don't expire don't need to be in the expiration map. - if expiration.IsZero() { - return - } - - bucketNum := storageBucket(expiration) - m.Lock() - defer m.Unlock() - - b, ok := m.buckets[bucketNum] - if !ok { - b = make(bucket) - m.buckets[bucketNum] = b - } - b[key] = conflict -} - -func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time.Time) { - if m == nil { - return - } - - m.Lock() - defer m.Unlock() - - oldBucketNum := storageBucket(oldExpTime) - oldBucket, ok := m.buckets[oldBucketNum] - if ok { - delete(oldBucket, key) - } - - newBucketNum := storageBucket(newExpTime) - newBucket, ok := m.buckets[newBucketNum] - if !ok { - newBucket = make(bucket) - m.buckets[newBucketNum] = newBucket - } - newBucket[key] = conflict -} - -func (m *expirationMap) del(key uint64, expiration time.Time) { - if m == nil { - return - } - - bucketNum := storageBucket(expiration) - m.Lock() - defer m.Unlock() - _, ok := m.buckets[bucketNum] - if !ok { - return - } - delete(m.buckets[bucketNum], key) -} - -// cleanup removes all the items in the bucket that was just completed. It deletes -// those items from the store, and calls the onEvict function on those items. -// This function is meant to be called periodically. -func (m *expirationMap) cleanup(store store, policy policy, onEvict itemCallback) { - if m == nil { - return - } - - m.Lock() - now := time.Now() - bucketNum := cleanupBucket(now) - keys := m.buckets[bucketNum] - delete(m.buckets, bucketNum) - m.Unlock() - - for key, conflict := range keys { - // Sanity check. Verify that the store agrees that this key is expired. - if store.Expiration(key).After(now) { - continue - } - - cost := policy.Cost(key) - policy.Del(key) - _, value := store.Del(key, conflict) - - if onEvict != nil { - onEvict(&Item{Key: key, - Conflict: conflict, - Value: value, - Cost: cost, - }) - } - } -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/LICENSE b/vendor/github.com/dgraph-io/ristretto/z/LICENSE deleted file mode 100644 index 0860cbfe..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/LICENSE +++ /dev/null @@ -1,64 +0,0 @@ -bbloom.go - -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -rtutil.go - -// MIT License - -// Copyright (c) 2019 Ewan Chou - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -Modifications: - -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - diff --git a/vendor/github.com/dgraph-io/ristretto/z/README.md b/vendor/github.com/dgraph-io/ristretto/z/README.md deleted file mode 100644 index 6d77e146..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/README.md +++ /dev/null @@ -1,129 +0,0 @@ -## bbloom: a bitset Bloom filter for go/golang -=== - -package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. - -NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom - -=== - -changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. - -This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". -Nonetheless bbloom should work with any other form of entries. - -~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~ - -Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash ) - -Minimum hashset size is: 512 ([4]uint64; will be set automatically). - -###install - -```sh -go get github.com/AndreasBriese/bbloom -``` - -###test -+ change to folder ../bbloom -+ create wordlist in file "words.txt" (you might use `python permut.py`) -+ run 'go test -bench=.' within the folder - -```go -go test -bench=. -``` - -~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~ - -using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively) - -### usage - -after installation add - -```go -import ( - ... - "github.com/AndreasBriese/bbloom" - ... - ) -``` - -at your header. In the program use - -```go -// create a bloom filter for 65536 items and 1 % wrong-positive ratio -bf := bbloom.New(float64(1<<16), float64(0.01)) - -// or -// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly -// bf = bbloom.New(float64(650000), float64(7)) -// or -bf = bbloom.New(650000.0, 7.0) - -// add one item -bf.Add([]byte("butter")) - -// Number of elements added is exposed now -// Note: ElemNum will not be included in JSON export (for compatability to older version) -nOfElementsInFilter := bf.ElemNum - -// check if item is in the filter -isIn := bf.Has([]byte("butter")) // should be true -isNotIn := bf.Has([]byte("Butter")) // should be false - -// 'add only if item is new' to the bloomfilter -added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set -added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new - -// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS -// add one item -bf.AddTS([]byte("peanutbutter")) -// check if item is in the filter -isIn = bf.HasTS([]byte("peanutbutter")) // should be true -isNotIn = bf.HasTS([]byte("peanutButter")) // should be false -// 'add only if item is new' to the bloomfilter -added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set -added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new - -// convert to JSON ([]byte) -Json := bf.JSONMarshal() - -// bloomfilters Mutex is exposed for external un-/locking -// i.e. mutex lock while doing JSON conversion -bf.Mtx.Lock() -Json = bf.JSONMarshal() -bf.Mtx.Unlock() - -// restore a bloom filter from storage -bfNew := bbloom.JSONUnmarshal(Json) - -isInNew := bfNew.Has([]byte("butter")) // should be true -isNotInNew := bfNew.Has([]byte("Butter")) // should be false - -``` - -to work with the bloom filter. - -### why 'fast'? - -It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: - - - Bloom filter (filter size 524288, 7 hashlocs) - github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op) - github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op) - github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op) - github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op) - - github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op) - github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op) - github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op) - github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op) - github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op) - github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op) - -(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz) - - -With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. diff --git a/vendor/github.com/dgraph-io/ristretto/z/allocator.go b/vendor/github.com/dgraph-io/ristretto/z/allocator.go deleted file mode 100644 index af486a8d..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/allocator.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import "fmt" - -// Allocator amortizes the cost of small allocations by allocating memory in bigger chunks. -// Internally it uses z.Calloc to allocate memory. Once allocated, the memory is not moved, -// so it is safe to use the allocated bytes to unsafe cast them to Go struct pointers. -type Allocator struct { - pageSize int - curBuf int - curIdx int - buffers [][]byte - size uint64 -} - -// NewAllocator creates an allocator starting with the given size. -func NewAllocator(sz int) *Allocator { - return &Allocator{pageSize: sz} -} - -// Size returns the size of the allocations so far. -func (a *Allocator) Size() uint64 { - return a.size -} - -// Release would release the memory back. Remember to make this call to avoid memory leaks. -func (a *Allocator) Release() { - for _, b := range a.buffers { - Free(b) - } -} - -const maxAlloc = 1 << 30 - -func (a *Allocator) MaxAlloc() int { - return maxAlloc -} - -// Allocate would allocate a byte slice of length sz. It is safe to use this memory to unsafe cast -// to Go structs. -func (a *Allocator) Allocate(sz int) []byte { - if len(a.buffers) == 0 { - buf := Calloc(a.pageSize) - a.buffers = append(a.buffers, buf) - } - - if sz >= maxAlloc { - panic(fmt.Sprintf("Allocate call exceeds max allocation possible."+ - " Requested: %d. Max Allowed: %d\n", sz, maxAlloc)) - } - cb := a.buffers[a.curBuf] - if len(cb) < a.curIdx+sz { - for { - a.pageSize *= 2 // Do multiply by 2 here. - if a.pageSize >= sz { - break - } - } - if a.pageSize > maxAlloc { - a.pageSize = maxAlloc - } - - buf := Calloc(a.pageSize) - a.buffers = append(a.buffers, buf) - a.curBuf++ - a.curIdx = 0 - cb = a.buffers[a.curBuf] - } - - slice := cb[a.curIdx : a.curIdx+sz] - a.curIdx += sz - a.size += uint64(sz) - return slice -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go b/vendor/github.com/dgraph-io/ristretto/z/bbloom.go deleted file mode 100644 index c80559d2..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go +++ /dev/null @@ -1,203 +0,0 @@ -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package z - -import ( - "bytes" - "encoding/json" - "log" - "math" - "unsafe" -) - -// helper -var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} - -func getSize(ui64 uint64) (size uint64, exponent uint64) { - if ui64 < uint64(512) { - ui64 = uint64(512) - } - size = uint64(1) - for size < ui64 { - size <<= 1 - exponent++ - } - return size, exponent -} - -func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) { - size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2) - locs := math.Ceil(float64(0.69314718056) * size / numEntries) - return uint64(size), uint64(locs) -} - -// NewBloomFilter returns a new bloomfilter. -func NewBloomFilter(params ...float64) (bloomfilter *Bloom) { - var entries, locs uint64 - if len(params) == 2 { - if params[1] < 1 { - entries, locs = calcSizeByWrongPositives(params[0], params[1]) - } else { - entries, locs = uint64(params[0]), uint64(params[1]) - } - } else { - log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" + - " i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries)," + - " float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))") - } - size, exponent := getSize(entries) - bloomfilter = &Bloom{ - sizeExp: exponent, - size: size - 1, - setLocs: locs, - shift: 64 - exponent, - } - bloomfilter.Size(size) - return bloomfilter -} - -// Bloom filter -type Bloom struct { - bitset []uint64 - ElemNum uint64 - sizeExp uint64 - size uint64 - setLocs uint64 - shift uint64 -} - -// <--- http://www.cse.yorku.ca/~oz/hash.html -// modified Berkeley DB Hash (32bit) -// hash is casted to l, h = 16bit fragments -// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { -// hash := uint64(len(*b)) -// for _, c := range *b { -// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash -// } -// h = hash >> bl.shift -// l = hash << bl.shift >> bl.shift -// return l, h -// } - -// Add adds hash of a key to the bloomfilter. -func (bl *Bloom) Add(hash uint64) { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - bl.Set((h + i*l) & bl.size) - bl.ElemNum++ - } -} - -// Has checks if bit(s) for entry hash is/are set, -// returns true if the hash was added to the Bloom Filter. -func (bl Bloom) Has(hash uint64) bool { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - if !bl.IsSet((h + i*l) & bl.size) { - return false - } - } - return true -} - -// AddIfNotHas only Adds hash, if it's not present in the bloomfilter. -// Returns true if hash was added. -// Returns false if hash was already registered in the bloomfilter. -func (bl *Bloom) AddIfNotHas(hash uint64) bool { - if bl.Has(hash) { - return false - } - bl.Add(hash) - return true -} - -// Size makes Bloom filter with as bitset of size sz. -func (bl *Bloom) Size(sz uint64) { - bl.bitset = make([]uint64, sz>>6) -} - -// Clear resets the Bloom filter. -func (bl *Bloom) Clear() { - for i := range bl.bitset { - bl.bitset[i] = 0 - } -} - -// Set sets the bit[idx] of bitset. -func (bl *Bloom) Set(idx uint64) { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - *(*uint8)(ptr) |= mask[idx%8] -} - -// IsSet checks if bit[idx] of bitset is set, returns true/false. -func (bl *Bloom) IsSet(idx uint64) bool { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1 - return r == 1 -} - -// bloomJSONImExport -// Im/Export structure used by JSONMarshal / JSONUnmarshal -type bloomJSONImExport struct { - FilterSet []byte - SetLocs uint64 -} - -// NewWithBoolset takes a []byte slice and number of locs per entry, -// returns the bloomfilter with a bitset populated according to the input []byte. -func newWithBoolset(bs *[]byte, locs uint64) *Bloom { - bloomfilter := NewBloomFilter(float64(len(*bs)<<3), float64(locs)) - for i, b := range *bs { - *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b - } - return bloomfilter -} - -// JSONUnmarshal takes JSON-Object (type bloomJSONImExport) as []bytes -// returns bloom32 / bloom64 object. -func JSONUnmarshal(dbData []byte) (*Bloom, error) { - bloomImEx := bloomJSONImExport{} - if err := json.Unmarshal(dbData, &bloomImEx); err != nil { - return nil, err - } - buf := bytes.NewBuffer(bloomImEx.FilterSet) - bs := buf.Bytes() - bf := newWithBoolset(&bs, bloomImEx.SetLocs) - return bf, nil -} - -// JSONMarshal returns JSON-object (type bloomJSONImExport) as []byte. -func (bl Bloom) JSONMarshal() []byte { - bloomImEx := bloomJSONImExport{} - bloomImEx.SetLocs = bl.setLocs - bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3) - for i := range bloomImEx.FilterSet { - bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) + - uintptr(i))) - } - data, err := json.Marshal(bloomImEx) - if err != nil { - log.Fatal("json.Marshal failed: ", err) - } - return data -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/buffer.go b/vendor/github.com/dgraph-io/ristretto/z/buffer.go deleted file mode 100644 index c63b1ec1..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/buffer.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "encoding/binary" -) - -// Buffer is equivalent of bytes.Buffer without the ability to read. It uses z.Calloc to allocate -// memory, which depending upon how the code is compiled could use jemalloc for allocations. -type Buffer struct { - buf []byte - offset int -} - -// NewBuffer would allocate a buffer of size sz upfront. -func NewBuffer(sz int) *Buffer { - return &Buffer{ - buf: Calloc(sz), - offset: 0, - } -} - -// Len would return the number of bytes written to the buffer so far. -func (b *Buffer) Len() int { - return b.offset -} - -// Bytes would return all the written bytes as a slice. -func (b *Buffer) Bytes() []byte { - return b.buf[0:b.offset] -} - -// smallBufferSize is an initial allocation minimal capacity. -const smallBufferSize = 64 - -// Grow would grow the buffer to have at least n more bytes. In case the buffer is at capacity, it -// would reallocate twice the size of current capacity + n, to ensure n bytes can be written to the -// buffer without further allocation. -func (b *Buffer) Grow(n int) { - // In this case, len and cap are the same. - if len(b.buf) == 0 && n <= smallBufferSize { - b.buf = Calloc(smallBufferSize) - return - } else if b.buf == nil { - b.buf = Calloc(n) - return - } - if b.offset+n < len(b.buf) { - return - } - - sz := 2*len(b.buf) + n - newBuf := Calloc(sz) - copy(newBuf, b.buf[:b.offset]) - Free(b.buf) - b.buf = newBuf -} - -// Allocate is a way to get a slice of size n back from the buffer. This slice can be directly -// written to. Warning: Allocate is not thread-safe. The byte slice returned MUST be used before -// further calls to Buffer. -func (b *Buffer) Allocate(n int) []byte { - b.Grow(n) - off := b.offset - b.offset += n - return b.buf[off:b.offset] -} - -func (b *Buffer) writeLen(sz int) { - buf := b.Allocate(4) - binary.BigEndian.PutUint32(buf, uint32(sz)) -} - -// SliceAllocate would encode the size provided into the buffer, followed by a call to Allocate, -// hence returning the slice of size sz. This can be used to allocate a lot of small buffers into -// this big buffer. -// Note that SliceAllocate should NOT be mixed with normal calls to Write. Otherwise, SliceOffsets -// won't work. -func (b *Buffer) SliceAllocate(sz int) []byte { - b.Grow(4 + sz) - b.writeLen(sz) - return b.Allocate(sz) -} - -// SliceOffsets would return the offsets of all slices written to the buffer. -// TODO: Perhaps keep the offsets separate in another buffer, and allow access to slices via index. -func (b *Buffer) SliceOffsets(offsets []int) []int { - start := 0 - for start < b.offset { - offsets = append(offsets, start) - sz := binary.BigEndian.Uint32(b.buf[start:]) - start += 4 + int(sz) - } - return offsets -} - -// Slice would return the slice written at offset. -func (b *Buffer) Slice(offset int) []byte { - sz := binary.BigEndian.Uint32(b.buf[offset:]) - start := offset + 4 - return b.buf[start : start+int(sz)] -} - -// Write would write p bytes to the buffer. -func (b *Buffer) Write(p []byte) (n int, err error) { - b.Grow(len(p)) - n = copy(b.buf[b.offset:], p) - b.offset += n - return n, nil -} - -// Reset would reset the buffer to be reused. -func (b *Buffer) Reset() { - b.offset = 0 -} - -// Release would free up the memory allocated by the buffer. Once the usage of buffer is done, it is -// important to call Release, otherwise a memory leak can happen. -func (b *Buffer) Release() { - Free(b.buf) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc.go deleted file mode 100644 index 71980f76..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc.go +++ /dev/null @@ -1,11 +0,0 @@ -package z - -import "sync/atomic" - -var numBytes int64 - -// NumAllocBytes returns the number of bytes allocated using calls to z.Calloc. The allocations -// could be happening via either Go or jemalloc, depending upon the build flags. -func NumAllocBytes() int64 { - return atomic.LoadInt64(&numBytes) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go deleted file mode 100644 index db36d985..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build 386 amd64p32 arm armbe mips mipsle mips64p32 mips64p32le ppc sparc - -package z - -const ( - // MaxArrayLen is a safe maximum length for slices on this architecture. - MaxArrayLen = 1<<31 - 1 -) diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go deleted file mode 100644 index 7e2c5da7..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build amd64 arm64 arm64be ppc64 ppc64le mips64 mips64le s390x sparc64 - -package z - -const ( - // MaxArrayLen is a safe maximum length for slices on this architecture. - MaxArrayLen = 1<<50 - 1 -) diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go deleted file mode 100644 index 2a10683a..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build jemalloc - -package z - -/* -#cgo LDFLAGS: -L/usr/local/lib -Wl,-rpath,/usr/local/lib -ljemalloc -lm -lstdc++ -pthread -ldl -#include -#include -*/ -import "C" -import ( - "sync/atomic" - "unsafe" -) - -// The go:linkname directives provides backdoor access to private functions in -// the runtime. Below we're accessing the throw function. - -//go:linkname throw runtime.throw -func throw(s string) - -// New allocates a slice of size n. The returned slice is from manually managed -// memory and MUST be released by calling Free. Failure to do so will result in -// a memory leak. -// -// Compile jemalloc with ./configure --with-jemalloc-prefix="je_" -// https://android.googlesource.com/platform/external/jemalloc_new/+/6840b22e8e11cb68b493297a5cd757d6eaa0b406/TUNING.md -// These two config options seems useful for frequent allocations and deallocations in -// multi-threaded programs (like we have). -// JE_MALLOC_CONF="background_thread:true,metadata_thp:auto" -// -// Compile Go program with `go build -tags=jemalloc` to enable this. -func Calloc(n int) []byte { - if n == 0 { - return make([]byte, 0) - } - // We need to be conscious of the Cgo pointer passing rules: - // - // https://golang.org/cmd/cgo/#hdr-Passing_pointers - // - // ... - // Note: the current implementation has a bug. While Go code is permitted - // to write nil or a C pointer (but not a Go pointer) to C memory, the - // current implementation may sometimes cause a runtime error if the - // contents of the C memory appear to be a Go pointer. Therefore, avoid - // passing uninitialized C memory to Go code if the Go code is going to - // store pointer values in it. Zero out the memory in C before passing it - // to Go. - - ptr := C.je_calloc(C.size_t(n), 1) - if ptr == nil { - // NB: throw is like panic, except it guarantees the process will be - // terminated. The call below is exactly what the Go runtime invokes when - // it cannot allocate memory. - throw("out of memory") - } - atomic.AddInt64(&numBytes, int64(n)) - // Interpret the C pointer as a pointer to a Go array, then slice. - return (*[MaxArrayLen]byte)(unsafe.Pointer(ptr))[:n:n] -} - -// CallocNoRef does the exact same thing as Calloc with jemalloc enabled. -func CallocNoRef(n int) []byte { - return Calloc(n) -} - -// Free frees the specified slice. -func Free(b []byte) { - if sz := cap(b); sz != 0 { - if len(b) == 0 { - b = b[:cap(b)] - } - ptr := unsafe.Pointer(&b[0]) - C.je_free(ptr) - atomic.AddInt64(&numBytes, -int64(sz)) - } -} - -func StatsPrint() { - opts := C.CString("mdablxe") - C.je_malloc_stats_print(nil, nil, opts) - C.free(unsafe.Pointer(opts)) -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go b/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go deleted file mode 100644 index 25bfaa1e..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -// +build !jemalloc - -package z - -import ( - "fmt" -) - -// Provides versions of New and Free when cgo is not available (e.g. cross -// compilation). - -// Calloc allocates a slice of size n. -func Calloc(n int) []byte { - return make([]byte, n) -} - -// CallocNoRef will not give you memory back without jemalloc. -func CallocNoRef(n int) []byte { - // We do the add here just to stay compatible with a corresponding Free call. - return nil -} - -// Free does not do anything in this mode. -func Free(b []byte) {} - -func StatsPrint() { - fmt.Println("Using Go memory") -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/histogram.go b/vendor/github.com/dgraph-io/ristretto/z/histogram.go deleted file mode 100644 index 119aea5f..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/histogram.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "fmt" - "math" - "strings" - - "github.com/dustin/go-humanize" -) - -// Creates bounds for an histogram. The bounds are powers of two of the form -// [2^min_exponent, ..., 2^max_exponent]. -func HistogramBounds(minExponent, maxExponent uint32) []float64 { - var bounds []float64 - for i := minExponent; i <= maxExponent; i++ { - bounds = append(bounds, float64(int(1)< histogram.Max { - histogram.Max = value - } - if value < histogram.Min { - histogram.Min = value - } - - histogram.Sum += value - histogram.Count++ - - for index := 0; index <= len(histogram.Bounds); index++ { - // Allocate value in the last buckets if we reached the end of the Bounds array. - if index == len(histogram.Bounds) { - histogram.CountPerBucket[index]++ - break - } - - if value < int64(histogram.Bounds[index]) { - histogram.CountPerBucket[index]++ - break - } - } -} - -// Mean returns the mean value for the histogram. -func (histogram *HistogramData) Mean() float64 { - if histogram.Count == 0 { - return 0 - } - return float64(histogram.Sum) / float64(histogram.Count) -} - -// String converts the histogram data into human-readable string. -func (histogram *HistogramData) String() string { - if histogram == nil { - return "" - } - var b strings.Builder - - b.WriteString("\n -- Histogram: \n") - b.WriteString(fmt.Sprintf("Min value: %d \n", histogram.Min)) - b.WriteString(fmt.Sprintf("Max value: %d \n", histogram.Max)) - b.WriteString(fmt.Sprintf("Mean: %.2f \n", histogram.Mean())) - - numBounds := len(histogram.Bounds) - for index, count := range histogram.CountPerBucket { - if count == 0 { - continue - } - - // The last bucket represents the bucket that contains the range from - // the last bound up to infinity so it's processed differently than the - // other buckets. - if index == len(histogram.CountPerBucket)-1 { - lowerBound := uint64(histogram.Bounds[numBounds-1]) - page := float64(count*100) / float64(histogram.Count) - b.WriteString(fmt.Sprintf("[%s, %s) %d %.2f%% \n", - humanize.IBytes(lowerBound), "infinity", count, page)) - continue - } - - upperBound := uint64(histogram.Bounds[index]) - lowerBound := uint64(0) - if index > 0 { - lowerBound = uint64(histogram.Bounds[index-1]) - } - - page := float64(count*100) / float64(histogram.Count) - b.WriteString(fmt.Sprintf("[%s, %s) %d %.2f%% \n", - humanize.IBytes(lowerBound), humanize.IBytes(upperBound), count, page)) - } - b.WriteString(" --\n") - return b.String() -} diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go b/vendor/github.com/dgraph-io/ristretto/z/rtutil.go deleted file mode 100644 index 16aff0c9..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go +++ /dev/null @@ -1,64 +0,0 @@ -// MIT License - -// Copyright (c) 2019 Ewan Chou - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package z - -import ( - "unsafe" -) - -// NanoTime returns the current time in nanoseconds from a monotonic clock. -//go:linkname NanoTime runtime.nanotime -func NanoTime() int64 - -// CPUTicks is a faster alternative to NanoTime to measure time duration. -//go:linkname CPUTicks runtime.cputicks -func CPUTicks() int64 - -type stringStruct struct { - str unsafe.Pointer - len int -} - -//go:noescape -//go:linkname memhash runtime.memhash -func memhash(p unsafe.Pointer, h, s uintptr) uintptr - -// MemHash is the hash function used by go map, it utilizes available hardware instructions(behaves -// as aeshash if aes instruction is available). -// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash. -func MemHash(data []byte) uint64 { - ss := (*stringStruct)(unsafe.Pointer(&data)) - return uint64(memhash(ss.str, 0, uintptr(ss.len))) -} - -// MemHashString is the hash function used by go map, it utilizes available hardware instructions -// (behaves as aeshash if aes instruction is available). -// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash. -func MemHashString(str string) uint64 { - ss := (*stringStruct)(unsafe.Pointer(&str)) - return uint64(memhash(ss.str, 0, uintptr(ss.len))) -} - -// FastRand is a fast thread local random function. -//go:linkname FastRand runtime.fastrand -func FastRand() uint32 diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.s b/vendor/github.com/dgraph-io/ristretto/z/rtutil.s deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/dgraph-io/ristretto/z/z.go b/vendor/github.com/dgraph-io/ristretto/z/z.go deleted file mode 100644 index a25e10d1..00000000 --- a/vendor/github.com/dgraph-io/ristretto/z/z.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package z - -import ( - "context" - "sync" - - "github.com/cespare/xxhash" -) - -// TODO: Figure out a way to re-use memhash for the second uint64 hash, we -// already know that appending bytes isn't reliable for generating a -// second hash (see Ristretto PR #88). -// -// We also know that while the Go runtime has a runtime memhash128 -// function, it's not possible to use it to generate [2]uint64 or -// anything resembling a 128bit hash, even though that's exactly what -// we need in this situation. -func KeyToHash(key interface{}) (uint64, uint64) { - if key == nil { - return 0, 0 - } - switch k := key.(type) { - case uint64: - return k, 0 - case string: - return MemHashString(k), xxhash.Sum64String(k) - case []byte: - return MemHash(k), xxhash.Sum64(k) - case byte: - return uint64(k), 0 - case int: - return uint64(k), 0 - case int32: - return uint64(k), 0 - case uint32: - return uint64(k), 0 - case int64: - return uint64(k), 0 - default: - panic("Key type not supported") - } -} - -var ( - dummyCloserChan <-chan struct{} -) - -// Closer holds the two things we need to close a goroutine and wait for it to -// finish: a chan to tell the goroutine to shut down, and a WaitGroup with -// which to wait for it to finish shutting down. -type Closer struct { - waiting sync.WaitGroup - - ctx context.Context - cancel context.CancelFunc -} - -// NewCloser constructs a new Closer, with an initial count on the WaitGroup. -func NewCloser(initial int) *Closer { - ret := &Closer{} - ret.ctx, ret.cancel = context.WithCancel(context.Background()) - ret.waiting.Add(initial) - return ret -} - -// AddRunning Add()'s delta to the WaitGroup. -func (lc *Closer) AddRunning(delta int) { - lc.waiting.Add(delta) -} - -// Ctx can be used to get a context, which would automatically get cancelled when Signal is called. -func (lc *Closer) Ctx() context.Context { - if lc == nil { - return context.Background() - } - return lc.ctx -} - -// Signal signals the HasBeenClosed signal. -func (lc *Closer) Signal() { - // Todo(ibrahim): Change Signal to return error on next badger breaking change. - lc.cancel() -} - -// HasBeenClosed gets signaled when Signal() is called. -func (lc *Closer) HasBeenClosed() <-chan struct{} { - if lc == nil { - return dummyCloserChan - } - return lc.ctx.Done() -} - -// Done calls Done() on the WaitGroup. -func (lc *Closer) Done() { - if lc == nil { - return - } - lc.waiting.Done() -} - -// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done -// calls to balance out.) -func (lc *Closer) Wait() { - lc.waiting.Wait() -} - -// SignalAndWait calls Signal(), then Wait(). -func (lc *Closer) SignalAndWait() { - lc.Signal() - lc.Wait() -} diff --git a/vendor/github.com/dgryski/go-farm/.gitignore b/vendor/github.com/dgryski/go-farm/.gitignore deleted file mode 100644 index 36029ab5..00000000 --- a/vendor/github.com/dgryski/go-farm/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -*.exe -*.test -*.prof - -target diff --git a/vendor/github.com/dgryski/go-farm/.travis.yml b/vendor/github.com/dgryski/go-farm/.travis.yml deleted file mode 100644 index a6422d3f..00000000 --- a/vendor/github.com/dgryski/go-farm/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -language: go - -sudo: false - -branches: - except: - - release - -branches: - only: - - master - - develop - - travis - -go: - - 1.12.x - - 1.13.x - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; - - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; - - go get github.com/mattn/goveralls - -before_script: - - make deps - -script: - - make qa - -after_failure: - - cat ./target/test/report.xml - -after_success: - - if [ "$TRAVIS_GO_VERSION" = "1.9" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/dgryski/go-farm/LICENSE b/vendor/github.com/dgryski/go-farm/LICENSE deleted file mode 100644 index 0f188485..00000000 --- a/vendor/github.com/dgryski/go-farm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2014-2017 Damian Gryski -Copyright (c) 2016-2017 Nicola Asuni - Tecnick.com - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/dgryski/go-farm/Makefile b/vendor/github.com/dgryski/go-farm/Makefile deleted file mode 100644 index 7592736e..00000000 --- a/vendor/github.com/dgryski/go-farm/Makefile +++ /dev/null @@ -1,187 +0,0 @@ -# MAKEFILE -# -# @author Nicola Asuni -# @link https://github.com/dgryski/go-farm -# -# This file is intended to be executed in a Linux-compatible system. -# It also assumes that the project has been cloned in the right path under GOPATH: -# $GOPATH/src/github.com/dgryski/go-farm -# -# ------------------------------------------------------------------------------ - -# List special make targets that are not associated with files -.PHONY: help all test format fmtcheck vet lint coverage cyclo misspell errcheck staticcheck astscan qa deps clean nuke - -# Use bash as shell (Note: Ubuntu now uses dash which doesn't support PIPESTATUS). -SHELL=/bin/bash - -# CVS path (path to the parent dir containing the project) -CVSPATH=github.com/dgryski - -# Project owner -OWNER=dgryski - -# Project vendor -VENDOR=dgryski - -# Project name -PROJECT=go-farm - -# Project version -VERSION=$(shell cat VERSION) - -# Name of RPM or DEB package -PKGNAME=${VENDOR}-${PROJECT} - -# Current directory -CURRENTDIR=$(shell pwd) - -# GO lang path -ifneq ($(GOPATH),) - ifeq ($(findstring $(GOPATH),$(CURRENTDIR)),) - # the defined GOPATH is not valid - GOPATH= - endif -endif -ifeq ($(GOPATH),) - # extract the GOPATH - GOPATH=$(firstword $(subst /src/, ,$(CURRENTDIR))) -endif - -# --- MAKE TARGETS --- - -# Display general help about this command -help: - @echo "" - @echo "$(PROJECT) Makefile." - @echo "GOPATH=$(GOPATH)" - @echo "The following commands are available:" - @echo "" - @echo " make qa : Run all the tests" - @echo " make test : Run the unit tests" - @echo "" - @echo " make format : Format the source code" - @echo " make fmtcheck : Check if the source code has been formatted" - @echo " make vet : Check for suspicious constructs" - @echo " make lint : Check for style errors" - @echo " make coverage : Generate the coverage report" - @echo " make cyclo : Generate the cyclomatic complexity report" - @echo " make misspell : Detect commonly misspelled words in source files" - @echo " make staticcheck : Run staticcheck - @echo " make errcheck : Check that error return values are used" - @echo " make astscan : GO AST scanner" - @echo "" - @echo " make docs : Generate source code documentation" - @echo "" - @echo " make deps : Get the dependencies" - @echo " make clean : Remove any build artifact" - @echo " make nuke : Deletes any intermediate file" - @echo "" - - -# Alias for help target -all: help - -# Run the unit tests -test: - @mkdir -p target/test - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go test \ - -covermode=atomic \ - -bench=. \ - -race \ - -cpuprofile=target/report/cpu.out \ - -memprofile=target/report/mem.out \ - -mutexprofile=target/report/mutex.out \ - -coverprofile=target/report/coverage.out \ - -v ./... | \ - tee >(PATH=$(GOPATH)/bin:$(PATH) go-junit-report > target/test/report.xml); \ - test $${PIPESTATUS[0]} -eq 0 - -# Format the source code -format: - @find . -type f -name "*.go" -exec gofmt -s -w {} \; - -# Check if the source code has been formatted -fmtcheck: - @mkdir -p target - @find . -type f -name "*.go" -exec gofmt -s -d {} \; | tee target/format.diff - @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } - -# Check for syntax errors -vet: - GOPATH=$(GOPATH) go vet . - -# Check for style errors -lint: - GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint . - -# Generate the coverage report -coverage: - @mkdir -p target/report - GOPATH=$(GOPATH) \ - go tool cover -html=target/report/coverage.out -o target/report/coverage.html - -# Report cyclomatic complexity -cyclo: - @mkdir -p target/report - GOPATH=$(GOPATH) gocyclo -avg ./ | tee target/report/cyclo.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Detect commonly misspelled words in source files -misspell: - @mkdir -p target/report - GOPATH=$(GOPATH) misspell -error ./ | tee target/report/misspell.txt ; test $${PIPESTATUS[0]} -eq 0 - -# Check that error return values are used -errcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) errcheck ./ | tee target/report/errcheck.txt - - -# staticcheck -staticcheck: - @mkdir -p target/report - GOPATH=$(GOPATH) staticcheck ./... | tee target/report/staticcheck.txt - - -# AST scanner -astscan: - @mkdir -p target/report - GOPATH=$(GOPATH) gas .//*.go | tee target/report/astscan.txt - -# Generate source docs -docs: - @mkdir -p target/docs - nohup sh -c 'GOPATH=$(GOPATH) godoc -http=127.0.0.1:6060' > target/godoc_server.log 2>&1 & - wget --directory-prefix=target/docs/ --execute robots=off --retry-connrefused --recursive --no-parent --adjust-extension --page-requisites --convert-links http://127.0.0.1:6060/pkg/github.com/${VENDOR}/${PROJECT}/ ; kill -9 `lsof -ti :6060` - @echo ''${PKGNAME}' Documentation ...' > target/docs/index.html - -# Alias to run all quality-assurance checks -qa: fmtcheck test vet lint coverage cyclo misspell errcheck astscan - -# --- INSTALL --- - -# Get the dependencies -deps: - GOPATH=$(GOPATH) go get ./... - GOPATH=$(GOPATH) go get golang.org/x/lint/golint - GOPATH=$(GOPATH) go get github.com/jstemmer/go-junit-report - GOPATH=$(GOPATH) go get github.com/axw/gocov/gocov - GOPATH=$(GOPATH) go get github.com/fzipp/gocyclo - GOPATH=$(GOPATH) go get github.com/gordonklaus/ineffassign - GOPATH=$(GOPATH) go get github.com/client9/misspell/cmd/misspell - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/structcheck - GOPATH=$(GOPATH) go get github.com/opennota/check/cmd/varcheck - GOPATH=$(GOPATH) go get github.com/kisielk/errcheck - GOPATH=$(GOPATH) go get honnef.co/go/tools/cmd/staticcheck - GOPATH=$(GOPATH) go get github.com/GoASTScanner/gas - -# Remove any build artifact -clean: - GOPATH=$(GOPATH) go clean ./... - -# Deletes any intermediate file -nuke: - rm -rf ./target - GOPATH=$(GOPATH) go clean -i ./... diff --git a/vendor/github.com/dgryski/go-farm/README.md b/vendor/github.com/dgryski/go-farm/README.md deleted file mode 100644 index 0784f90f..00000000 --- a/vendor/github.com/dgryski/go-farm/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# go-farm - -*Google's FarmHash hash functions implemented in Go* - -[![Master Branch](https://img.shields.io/badge/-master:-gray.svg)](https://github.com/dgryski/go-farm/tree/master) -[![Master Build Status](https://secure.travis-ci.org/dgryski/go-farm.png?branch=master)](https://travis-ci.org/dgryski/go-farm?branch=master) -[![Master Coverage Status](https://coveralls.io/repos/dgryski/go-farm/badge.svg?branch=master&service=github)](https://coveralls.io/github/dgryski/go-farm?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/dgryski/go-farm)](https://goreportcard.com/report/github.com/dgryski/go-farm) -[![GoDoc](https://godoc.org/github.com/dgryski/go-farm?status.svg)](http://godoc.org/github.com/dgryski/go-farm) - -## Description - -FarmHash, a family of hash functions. - -This is a (mechanical) translation of the non-SSE4/non-AESNI hash functions from Google's FarmHash (https://github.com/google/farmhash). - - -FarmHash provides hash functions for strings and other data. -The functions mix the input bits thoroughly but are not suitable for cryptography. - -All members of the FarmHash family were designed with heavy reliance on previous work by Jyrki Alakuijala, Austin Appleby, Bob Jenkins, and others. - -For more information please consult https://github.com/google/farmhash - - -## Getting started - -This application is written in Go language, please refer to the guides in https://golang.org for getting started. - -This project include a Makefile that allows you to test and build the project with simple commands. -To see all available options: -```bash -make help -``` - -## Running all tests - -Before committing the code, please check if it passes all tests using -```bash -make qa -``` - -## License - -As this is a highly derivative work, I have placed it under the same license as the original implementation. See the -LICENSE file for details. diff --git a/vendor/github.com/dgryski/go-farm/VERSION b/vendor/github.com/dgryski/go-farm/VERSION deleted file mode 100644 index 38f77a65..00000000 --- a/vendor/github.com/dgryski/go-farm/VERSION +++ /dev/null @@ -1 +0,0 @@ -2.0.1 diff --git a/vendor/github.com/dgryski/go-farm/basics.go b/vendor/github.com/dgryski/go-farm/basics.go deleted file mode 100644 index ec7076c0..00000000 --- a/vendor/github.com/dgryski/go-farm/basics.go +++ /dev/null @@ -1,32 +0,0 @@ -package farm - -import "math/bits" - -// Some primes between 2^63 and 2^64 for various uses. -const k0 uint64 = 0xc3a5c85c97cb3127 -const k1 uint64 = 0xb492b66fbe98f273 -const k2 uint64 = 0x9ae16a3b2f90404f - -// Magic numbers for 32-bit hashing. Copied from Murmur3. -const c1 uint32 = 0xcc9e2d51 -const c2 uint32 = 0x1b873593 - -// A 32-bit to 32-bit integer hash copied from Murmur3. -func fmix(h uint32) uint32 { - h ^= h >> 16 - h *= 0x85ebca6b - h ^= h >> 13 - h *= 0xc2b2ae35 - h ^= h >> 16 - return h -} - -func mur(a, h uint32) uint32 { - // Helper from Murmur3 for combining two 32-bit values. - a *= c1 - a = bits.RotateLeft32(a, -17) - a *= c2 - h ^= a - h = bits.RotateLeft32(h, -19) - return h*5 + 0xe6546b64 -} diff --git a/vendor/github.com/dgryski/go-farm/farmhashcc.go b/vendor/github.com/dgryski/go-farm/farmhashcc.go deleted file mode 100644 index 3e68ae3a..00000000 --- a/vendor/github.com/dgryski/go-farm/farmhashcc.go +++ /dev/null @@ -1,194 +0,0 @@ -package farm - -import ( - "encoding/binary" - "math/bits" -) - -// This file provides a 32-bit hash equivalent to CityHash32 (v1.1.1) -// and a 128-bit hash equivalent to CityHash128 (v1.1.1). It also provides -// a seeded 32-bit hash function similar to CityHash32. - -func hash32Len13to24Seed(s []byte, seed uint32) uint32 { - slen := len(s) - a := binary.LittleEndian.Uint32(s[-4+(slen>>1) : -4+(slen>>1)+4]) - b := binary.LittleEndian.Uint32(s[4 : 4+4]) - c := binary.LittleEndian.Uint32(s[slen-8 : slen-8+4]) - d := binary.LittleEndian.Uint32(s[(slen >> 1) : (slen>>1)+4]) - e := binary.LittleEndian.Uint32(s[0 : 0+4]) - f := binary.LittleEndian.Uint32(s[slen-4 : slen-4+4]) - h := d*c1 + uint32(slen) + seed - a = bits.RotateLeft32(a, -12) + f - h = mur(c, h) + a - a = bits.RotateLeft32(a, -3) + c - h = mur(e, h) + a - a = bits.RotateLeft32(a+f, -12) + d - h = mur(b^seed, h) + a - return fmix(h) -} - -func hash32Len0to4(s []byte, seed uint32) uint32 { - slen := len(s) - b := seed - c := uint32(9) - for i := 0; i < slen; i++ { - v := int8(s[i]) - b = (b * c1) + uint32(v) - c ^= b - } - return fmix(mur(b, mur(uint32(slen), c))) -} - -func hash128to64(x uint128) uint64 { - // Murmur-inspired hashing. - const mul uint64 = 0x9ddfea08eb382d69 - a := (x.lo ^ x.hi) * mul - a ^= (a >> 47) - b := (x.hi ^ a) * mul - b ^= (b >> 47) - b *= mul - return b -} - -type uint128 struct { - lo uint64 - hi uint64 -} - -// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings -// of any length representable in signed long. Based on City and Murmur. -func cityMurmur(s []byte, seed uint128) uint128 { - slen := len(s) - a := seed.lo - b := seed.hi - var c uint64 - var d uint64 - l := slen - 16 - if l <= 0 { // len <= 16 - a = shiftMix(a*k1) * k1 - c = b*k1 + hashLen0to16(s) - if slen >= 8 { - d = shiftMix(a + binary.LittleEndian.Uint64(s[0:0+8])) - } else { - d = shiftMix(a + c) - } - } else { // len > 16 - c = hashLen16(binary.LittleEndian.Uint64(s[slen-8:slen-8+8])+k1, a) - d = hashLen16(b+uint64(slen), c+binary.LittleEndian.Uint64(s[slen-16:slen-16+8])) - a += d - for { - a ^= shiftMix(binary.LittleEndian.Uint64(s[0:0+8])*k1) * k1 - a *= k1 - b ^= a - c ^= shiftMix(binary.LittleEndian.Uint64(s[8:8+8])*k1) * k1 - c *= k1 - d ^= c - s = s[16:] - l -= 16 - if l <= 0 { - break - } - } - } - a = hashLen16(a, c) - b = hashLen16(d, b) - return uint128{a ^ b, hashLen16(b, a)} -} - -func cityHash128WithSeed(s []byte, seed uint128) uint128 { - slen := len(s) - if slen < 128 { - return cityMurmur(s, seed) - } - - endIdx := ((slen - 1) / 128) * 128 - lastBlockIdx := endIdx + ((slen - 1) & 127) - 127 - last := s[lastBlockIdx:] - - // We expect len >= 128 to be the common case. Keep 56 bytes of state: - // v, w, x, y, and z. - var v1, v2 uint64 - var w1, w2 uint64 - x := seed.lo - y := seed.hi - z := uint64(slen) * k1 - v1 = bits.RotateLeft64(y^k1, -49)*k1 + binary.LittleEndian.Uint64(s[0:0+8]) - v2 = bits.RotateLeft64(v1, -42)*k1 + binary.LittleEndian.Uint64(s[8:8+8]) - w1 = bits.RotateLeft64(y+z, -35)*k1 + x - w2 = bits.RotateLeft64(x+binary.LittleEndian.Uint64(s[88:88+8]), -53) * k1 - - // This is the same inner loop as CityHash64(), manually unrolled. - for { - x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1 - y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1 - x ^= w2 - y += v1 + binary.LittleEndian.Uint64(s[40:40+8]) - z = bits.RotateLeft64(z+w1, -33) * k1 - v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1) - w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8])) - z, x = x, z - s = s[64:] - x = bits.RotateLeft64(x+y+v1+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1 - y = bits.RotateLeft64(y+v2+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1 - x ^= w2 - y += v1 + binary.LittleEndian.Uint64(s[40:40+8]) - z = bits.RotateLeft64(z+w1, -33) * k1 - v1, v2 = weakHashLen32WithSeeds(s, v2*k1, x+w1) - w1, w2 = weakHashLen32WithSeeds(s[32:], z+w2, y+binary.LittleEndian.Uint64(s[16:16+8])) - z, x = x, z - s = s[64:] - slen -= 128 - if slen < 128 { - break - } - } - x += bits.RotateLeft64(v1+z, -49) * k0 - y = y*k0 + bits.RotateLeft64(w2, -37) - z = z*k0 + bits.RotateLeft64(w1, -27) - w1 *= 9 - v1 *= k0 - // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s. - for tailDone := 0; tailDone < slen; { - tailDone += 32 - y = bits.RotateLeft64(x+y, -42)*k0 + v2 - w1 += binary.LittleEndian.Uint64(last[128-tailDone+16 : 128-tailDone+16+8]) - x = x*k0 + w1 - z += w2 + binary.LittleEndian.Uint64(last[128-tailDone:128-tailDone+8]) - w2 += v1 - v1, v2 = weakHashLen32WithSeeds(last[128-tailDone:], v1+z, v2) - v1 *= k0 - } - - // At this point our 56 bytes of state should contain more than - // enough information for a strong 128-bit hash. We use two - // different 56-byte-to-8-byte hashes to get a 16-byte final result. - x = hashLen16(x, v1) - y = hashLen16(y+z, w1) - return uint128{hashLen16(x+v2, w2) + y, - hashLen16(x+w2, y+v2)} -} - -func cityHash128(s []byte) uint128 { - slen := len(s) - if slen >= 16 { - return cityHash128WithSeed(s[16:], uint128{binary.LittleEndian.Uint64(s[0 : 0+8]), binary.LittleEndian.Uint64(s[8:8+8]) + k0}) - } - return cityHash128WithSeed(s, uint128{k0, k1}) -} - -// Fingerprint128 is a 128-bit fingerprint function for byte-slices -func Fingerprint128(s []byte) (lo, hi uint64) { - h := cityHash128(s) - return h.lo, h.hi -} - -// Hash128 is a 128-bit hash function for byte-slices -func Hash128(s []byte) (lo, hi uint64) { - return Fingerprint128(s) -} - -// Hash128WithSeed is a 128-bit hash function for byte-slices and a 128-bit seed -func Hash128WithSeed(s []byte, seed0, seed1 uint64) (lo, hi uint64) { - h := cityHash128WithSeed(s, uint128{seed0, seed1}) - return h.lo, h.hi -} diff --git a/vendor/github.com/dgryski/go-farm/farmhashmk.go b/vendor/github.com/dgryski/go-farm/farmhashmk.go deleted file mode 100644 index 8e4c7428..00000000 --- a/vendor/github.com/dgryski/go-farm/farmhashmk.go +++ /dev/null @@ -1,102 +0,0 @@ -package farm - -import ( - "encoding/binary" - "math/bits" -) - -func hash32Len5to12(s []byte, seed uint32) uint32 { - slen := len(s) - a := uint32(len(s)) - b := uint32(len(s) * 5) - c := uint32(9) - d := b + seed - a += binary.LittleEndian.Uint32(s[0 : 0+4]) - b += binary.LittleEndian.Uint32(s[slen-4 : slen-4+4]) - c += binary.LittleEndian.Uint32(s[((slen >> 1) & 4) : ((slen>>1)&4)+4]) - return fmix(seed ^ mur(c, mur(b, mur(a, d)))) -} - -// Hash32 hashes a byte slice and returns a uint32 hash value -func Hash32(s []byte) uint32 { - - slen := len(s) - - if slen <= 24 { - if slen <= 12 { - if slen <= 4 { - return hash32Len0to4(s, 0) - } - return hash32Len5to12(s, 0) - } - return hash32Len13to24Seed(s, 0) - } - - // len > 24 - h := uint32(slen) - g := c1 * uint32(slen) - f := g - a0 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-4:slen-4+4])*c1, -17) * c2 - a1 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-8:slen-8+4])*c1, -17) * c2 - a2 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-16:slen-16+4])*c1, -17) * c2 - a3 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-12:slen-12+4])*c1, -17) * c2 - a4 := bits.RotateLeft32(binary.LittleEndian.Uint32(s[slen-20:slen-20+4])*c1, -17) * c2 - h ^= a0 - h = bits.RotateLeft32(h, -19) - h = h*5 + 0xe6546b64 - h ^= a2 - h = bits.RotateLeft32(h, -19) - h = h*5 + 0xe6546b64 - g ^= a1 - g = bits.RotateLeft32(g, -19) - g = g*5 + 0xe6546b64 - g ^= a3 - g = bits.RotateLeft32(g, -19) - g = g*5 + 0xe6546b64 - f += a4 - f = bits.RotateLeft32(f, -19) + 113 - for len(s) > 20 { - a := binary.LittleEndian.Uint32(s[0 : 0+4]) - b := binary.LittleEndian.Uint32(s[4 : 4+4]) - c := binary.LittleEndian.Uint32(s[8 : 8+4]) - d := binary.LittleEndian.Uint32(s[12 : 12+4]) - e := binary.LittleEndian.Uint32(s[16 : 16+4]) - h += a - g += b - f += c - h = mur(d, h) + e - g = mur(c, g) + a - f = mur(b+e*c1, f) + d - f += g - g += f - s = s[20:] - } - g = bits.RotateLeft32(g, -11) * c1 - g = bits.RotateLeft32(g, -17) * c1 - f = bits.RotateLeft32(f, -11) * c1 - f = bits.RotateLeft32(f, -17) * c1 - h = bits.RotateLeft32(h+g, -19) - h = h*5 + 0xe6546b64 - h = bits.RotateLeft32(h, -17) * c1 - h = bits.RotateLeft32(h+f, -19) - h = h*5 + 0xe6546b64 - h = bits.RotateLeft32(h, -17) * c1 - return h -} - -// Hash32WithSeed hashes a byte slice and a uint32 seed and returns a uint32 hash value -func Hash32WithSeed(s []byte, seed uint32) uint32 { - slen := len(s) - - if slen <= 24 { - if slen >= 13 { - return hash32Len13to24Seed(s, seed*c1) - } - if slen >= 5 { - return hash32Len5to12(s, seed) - } - return hash32Len0to4(s, seed) - } - h := hash32Len13to24Seed(s[:24], seed^uint32(slen)) - return mur(Hash32(s[24:])+seed, h) -} diff --git a/vendor/github.com/dgryski/go-farm/farmhashna.go b/vendor/github.com/dgryski/go-farm/farmhashna.go deleted file mode 100644 index ac62edd3..00000000 --- a/vendor/github.com/dgryski/go-farm/farmhashna.go +++ /dev/null @@ -1,161 +0,0 @@ -package farm - -import ( - "encoding/binary" - "math/bits" -) - -func shiftMix(val uint64) uint64 { - return val ^ (val >> 47) -} - -func hashLen16(u, v uint64) uint64 { - return hash128to64(uint128{u, v}) -} - -func hashLen16Mul(u, v, mul uint64) uint64 { - // Murmur-inspired hashing. - a := (u ^ v) * mul - a ^= (a >> 47) - b := (v ^ a) * mul - b ^= (b >> 47) - b *= mul - return b -} - -func hashLen0to16(s []byte) uint64 { - slen := uint64(len(s)) - if slen >= 8 { - mul := k2 + slen*2 - a := binary.LittleEndian.Uint64(s[0:0+8]) + k2 - b := binary.LittleEndian.Uint64(s[int(slen-8) : int(slen-8)+8]) - c := bits.RotateLeft64(b, -37)*mul + a - d := (bits.RotateLeft64(a, -25) + b) * mul - return hashLen16Mul(c, d, mul) - } - - if slen >= 4 { - mul := k2 + slen*2 - a := binary.LittleEndian.Uint32(s[0 : 0+4]) - return hashLen16Mul(slen+(uint64(a)<<3), uint64(binary.LittleEndian.Uint32(s[int(slen-4):int(slen-4)+4])), mul) - } - if slen > 0 { - a := s[0] - b := s[slen>>1] - c := s[slen-1] - y := uint32(a) + (uint32(b) << 8) - z := uint32(slen) + (uint32(c) << 2) - return shiftMix(uint64(y)*k2^uint64(z)*k0) * k2 - } - return k2 -} - -// This probably works well for 16-byte strings as well, but it may be overkill -// in that case. -func hashLen17to32(s []byte) uint64 { - slen := len(s) - mul := k2 + uint64(slen*2) - a := binary.LittleEndian.Uint64(s[0:0+8]) * k1 - b := binary.LittleEndian.Uint64(s[8 : 8+8]) - c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul - d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 - return hashLen16Mul(bits.RotateLeft64(a+b, -43)+bits.RotateLeft64(c, -30)+d, a+bits.RotateLeft64(b+k2, -18)+c, mul) -} - -// Return a 16-byte hash for 48 bytes. Quick and dirty. -// Callers do best to use "random-looking" values for a and b. -func weakHashLen32WithSeedsWords(w, x, y, z, a, b uint64) (uint64, uint64) { - a += w - b = bits.RotateLeft64(b+a+z, -21) - c := a - a += x - a += y - b += bits.RotateLeft64(a, -44) - return a + z, b + c -} - -// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty. -func weakHashLen32WithSeeds(s []byte, a, b uint64) (uint64, uint64) { - return weakHashLen32WithSeedsWords(binary.LittleEndian.Uint64(s[0:0+8]), - binary.LittleEndian.Uint64(s[8:8+8]), - binary.LittleEndian.Uint64(s[16:16+8]), - binary.LittleEndian.Uint64(s[24:24+8]), - a, - b) -} - -// Return an 8-byte hash for 33 to 64 bytes. -func hashLen33to64(s []byte) uint64 { - slen := len(s) - mul := k2 + uint64(slen)*2 - a := binary.LittleEndian.Uint64(s[0:0+8]) * k2 - b := binary.LittleEndian.Uint64(s[8 : 8+8]) - c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul - d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 - y := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d - z := hashLen16Mul(y, a+bits.RotateLeft64(b+k2, -18)+c, mul) - e := binary.LittleEndian.Uint64(s[16:16+8]) * mul - f := binary.LittleEndian.Uint64(s[24 : 24+8]) - g := (y + binary.LittleEndian.Uint64(s[slen-32:slen-32+8])) * mul - h := (z + binary.LittleEndian.Uint64(s[slen-24:slen-24+8])) * mul - return hashLen16Mul(bits.RotateLeft64(e+f, -43)+bits.RotateLeft64(g, -30)+h, e+bits.RotateLeft64(f+a, -18)+g, mul) -} - -func naHash64(s []byte) uint64 { - slen := len(s) - var seed uint64 = 81 - if slen <= 32 { - if slen <= 16 { - return hashLen0to16(s) - } - return hashLen17to32(s) - } - if slen <= 64 { - return hashLen33to64(s) - } - // For strings over 64 bytes we loop. - // Internal state consists of 56 bytes: v, w, x, y, and z. - v := uint128{0, 0} - w := uint128{0, 0} - x := seed*k2 + binary.LittleEndian.Uint64(s[0:0+8]) - y := seed*k1 + 113 - z := shiftMix(y*k2+113) * k2 - // Set end so that after the loop we have 1 to 64 bytes left to process. - endIdx := ((slen - 1) / 64) * 64 - last64Idx := endIdx + ((slen - 1) & 63) - 63 - last64 := s[last64Idx:] - for len(s) > 64 { - x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * k1 - y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * k1 - x ^= w.hi - y += v.lo + binary.LittleEndian.Uint64(s[40:40+8]) - z = bits.RotateLeft64(z+w.lo, -33) * k1 - v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*k1, x+w.lo) - w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8])) - x, z = z, x - s = s[64:] - } - mul := k1 + ((z & 0xff) << 1) - // Make s point to the last 64 bytes of input. - s = last64 - w.lo += (uint64(slen-1) & 63) - v.lo += w.lo - w.lo += v.lo - x = bits.RotateLeft64(x+y+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul - y = bits.RotateLeft64(y+v.hi+binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul - x ^= w.hi * 9 - y += v.lo*9 + binary.LittleEndian.Uint64(s[40:40+8]) - z = bits.RotateLeft64(z+w.lo, -33) * mul - v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo) - w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8])) - x, z = z, x - return hashLen16Mul(hashLen16Mul(v.lo, w.lo, mul)+shiftMix(y)*k0+z, hashLen16Mul(v.hi, w.hi, mul)+x, mul) -} - -func naHash64WithSeed(s []byte, seed uint64) uint64 { - return naHash64WithSeeds(s, k2, seed) -} - -func naHash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 { - return hashLen16(naHash64(s)-seed0, seed1) -} diff --git a/vendor/github.com/dgryski/go-farm/farmhashuo.go b/vendor/github.com/dgryski/go-farm/farmhashuo.go deleted file mode 100644 index 7328fc70..00000000 --- a/vendor/github.com/dgryski/go-farm/farmhashuo.go +++ /dev/null @@ -1,122 +0,0 @@ -package farm - -import ( - "encoding/binary" - "math/bits" -) - -func uoH(x, y, mul uint64, r uint) uint64 { - a := (x ^ y) * mul - a ^= (a >> 47) - b := (y ^ a) * mul - return bits.RotateLeft64(b, -int(r)) * mul -} - -// Hash64WithSeeds hashes a byte slice and two uint64 seeds and returns a uint64 hash value -func Hash64WithSeeds(s []byte, seed0, seed1 uint64) uint64 { - slen := len(s) - if slen <= 64 { - return naHash64WithSeeds(s, seed0, seed1) - } - - // For strings over 64 bytes we loop. - // Internal state consists of 64 bytes: u, v, w, x, y, and z. - x := seed0 - y := seed1*k2 + 113 - z := shiftMix(y*k2) * k2 - v := uint128{seed0, seed1} - var w uint128 - u := x - z - x *= k2 - mul := k2 + (u & 0x82) - - // Set end so that after the loop we have 1 to 64 bytes left to process. - endIdx := ((slen - 1) / 64) * 64 - last64Idx := endIdx + ((slen - 1) & 63) - 63 - last64 := s[last64Idx:] - - for len(s) > 64 { - a0 := binary.LittleEndian.Uint64(s[0 : 0+8]) - a1 := binary.LittleEndian.Uint64(s[8 : 8+8]) - a2 := binary.LittleEndian.Uint64(s[16 : 16+8]) - a3 := binary.LittleEndian.Uint64(s[24 : 24+8]) - a4 := binary.LittleEndian.Uint64(s[32 : 32+8]) - a5 := binary.LittleEndian.Uint64(s[40 : 40+8]) - a6 := binary.LittleEndian.Uint64(s[48 : 48+8]) - a7 := binary.LittleEndian.Uint64(s[56 : 56+8]) - x += a0 + a1 - y += a2 - z += a3 - v.lo += a4 - v.hi += a5 + a1 - w.lo += a6 - w.hi += a7 - - x = bits.RotateLeft64(x, -26) - x *= 9 - y = bits.RotateLeft64(y, -29) - z *= mul - v.lo = bits.RotateLeft64(v.lo, -33) - v.hi = bits.RotateLeft64(v.hi, -30) - w.lo ^= x - w.lo *= 9 - z = bits.RotateLeft64(z, -32) - z += w.hi - w.hi += z - z *= 9 - u, y = y, u - - z += a0 + a6 - v.lo += a2 - v.hi += a3 - w.lo += a4 - w.hi += a5 + a6 - x += a1 - y += a7 - - y += v.lo - v.lo += x - y - v.hi += w.lo - w.lo += v.hi - w.hi += x - y - x += w.hi - w.hi = bits.RotateLeft64(w.hi, -34) - u, z = z, u - s = s[64:] - } - // Make s point to the last 64 bytes of input. - s = last64 - u *= 9 - v.hi = bits.RotateLeft64(v.hi, -28) - v.lo = bits.RotateLeft64(v.lo, -20) - w.lo += (uint64(slen-1) & 63) - u += y - y += u - x = bits.RotateLeft64(y-x+v.lo+binary.LittleEndian.Uint64(s[8:8+8]), -37) * mul - y = bits.RotateLeft64(y^v.hi^binary.LittleEndian.Uint64(s[48:48+8]), -42) * mul - x ^= w.hi * 9 - y += v.lo + binary.LittleEndian.Uint64(s[40:40+8]) - z = bits.RotateLeft64(z+w.lo, -33) * mul - v.lo, v.hi = weakHashLen32WithSeeds(s, v.hi*mul, x+w.lo) - w.lo, w.hi = weakHashLen32WithSeeds(s[32:], z+w.hi, y+binary.LittleEndian.Uint64(s[16:16+8])) - return uoH(hashLen16Mul(v.lo+x, w.lo^y, mul)+z-u, - uoH(v.hi+y, w.hi+z, k2, 30)^x, - k2, - 31) -} - -// Hash64WithSeed hashes a byte slice and a uint64 seed and returns a uint64 hash value -func Hash64WithSeed(s []byte, seed uint64) uint64 { - if len(s) <= 64 { - return naHash64WithSeed(s, seed) - } - return Hash64WithSeeds(s, 0, seed) -} - -// Hash64 hashes a byte slice and returns a uint64 hash value -func uoHash64(s []byte) uint64 { - if len(s) <= 64 { - return naHash64(s) - } - return Hash64WithSeeds(s, 81, 0) -} diff --git a/vendor/github.com/dgryski/go-farm/farmhashxo.go b/vendor/github.com/dgryski/go-farm/farmhashxo.go deleted file mode 100644 index 9234212a..00000000 --- a/vendor/github.com/dgryski/go-farm/farmhashxo.go +++ /dev/null @@ -1,104 +0,0 @@ -package farm - -import ( - "encoding/binary" - "math/bits" -) - -func h32(s []byte, mul uint64) uint64 { - slen := len(s) - a := binary.LittleEndian.Uint64(s[0:0+8]) * k1 - b := binary.LittleEndian.Uint64(s[8 : 8+8]) - c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul - d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 - u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d - v := a + bits.RotateLeft64(b+k2, -18) + c - a = shiftMix((u ^ v) * mul) - b = shiftMix((v ^ a) * mul) - return b -} - -func h32Seeds(s []byte, mul, seed0, seed1 uint64) uint64 { - slen := len(s) - a := binary.LittleEndian.Uint64(s[0:0+8]) * k1 - b := binary.LittleEndian.Uint64(s[8 : 8+8]) - c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul - d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 - u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d + seed0 - v := a + bits.RotateLeft64(b+k2, -18) + c + seed1 - a = shiftMix((u ^ v) * mul) - b = shiftMix((v ^ a) * mul) - return b -} - -func xohashLen33to64(s []byte) uint64 { - slen := len(s) - mul0 := k2 - 30 - mul1 := k2 - 30 + 2*uint64(slen) - - var h0 uint64 - { - s := s[0:32] - mul := mul0 - slen := len(s) - a := binary.LittleEndian.Uint64(s[0:0+8]) * k1 - b := binary.LittleEndian.Uint64(s[8 : 8+8]) - c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul - d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 - u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d - v := a + bits.RotateLeft64(b+k2, -18) + c - a = shiftMix((u ^ v) * mul) - b = shiftMix((v ^ a) * mul) - h0 = b - } - - var h1 uint64 - { - s := s[slen-32:] - mul := mul1 - slen := len(s) - a := binary.LittleEndian.Uint64(s[0:0+8]) * k1 - b := binary.LittleEndian.Uint64(s[8 : 8+8]) - c := binary.LittleEndian.Uint64(s[slen-8:slen-8+8]) * mul - d := binary.LittleEndian.Uint64(s[slen-16:slen-16+8]) * k2 - u := bits.RotateLeft64(a+b, -43) + bits.RotateLeft64(c, -30) + d - v := a + bits.RotateLeft64(b+k2, -18) + c - a = shiftMix((u ^ v) * mul) - b = shiftMix((v ^ a) * mul) - h1 = b - } - - r := ((h1 * mul1) + h0) * mul1 - return r -} - -func xohashLen65to96(s []byte) uint64 { - slen := len(s) - - mul0 := k2 - 114 - mul1 := k2 - 114 + 2*uint64(slen) - h0 := h32(s[:32], mul0) - h1 := h32(s[32:64], mul1) - h2 := h32Seeds(s[slen-32:], mul1, h0, h1) - return (h2*9 + (h0 >> 17) + (h1 >> 21)) * mul1 -} - -func Hash64(s []byte) uint64 { - slen := len(s) - - if slen <= 32 { - if slen <= 16 { - return hashLen0to16(s) - } else { - return hashLen17to32(s) - } - } else if slen <= 64 { - return xohashLen33to64(s) - } else if slen <= 96 { - return xohashLen65to96(s) - } else if slen <= 256 { - return naHash64(s) - } else { - return uoHash64(s) - } -} diff --git a/vendor/github.com/dgryski/go-farm/fp_amd64.s b/vendor/github.com/dgryski/go-farm/fp_amd64.s deleted file mode 100644 index 2b8fa324..00000000 --- a/vendor/github.com/dgryski/go-farm/fp_amd64.s +++ /dev/null @@ -1,951 +0,0 @@ -// Code generated by command: go run asm.go -out=fp_amd64.s -go111=false. DO NOT EDIT. - -// +build amd64,!purego - -#include "textflag.h" - -// func Fingerprint64(s []byte) uint64 -TEXT ·Fingerprint64(SB), NOSPLIT, $0-32 - MOVQ s_base+0(FP), CX - MOVQ s_len+8(FP), AX - CMPQ AX, $0x10 - JG check32 - CMPQ AX, $0x08 - JL check4 - MOVQ (CX), DX - MOVQ AX, BX - SUBQ $0x08, BX - ADDQ CX, BX - MOVQ (BX), BX - MOVQ $0x9ae16a3b2f90404f, BP - ADDQ BP, DX - SHLQ $0x01, AX - ADDQ BP, AX - MOVQ BX, BP - RORQ $0x25, BP - IMULQ AX, BP - ADDQ DX, BP - RORQ $0x19, DX - ADDQ BX, DX - IMULQ AX, DX - XORQ DX, BP - IMULQ AX, BP - MOVQ BP, BX - SHRQ $0x2f, BX - XORQ BP, BX - XORQ BX, DX - IMULQ AX, DX - MOVQ DX, BX - SHRQ $0x2f, BX - XORQ DX, BX - IMULQ AX, BX - MOVQ BX, ret+24(FP) - RET - -check4: - CMPQ AX, $0x04 - JL check0 - MOVQ $0x9ae16a3b2f90404f, DX - MOVQ AX, BX - SHLQ $0x01, BX - ADDQ DX, BX - MOVL (CX), SI - SHLQ $0x03, SI - ADDQ AX, SI - SUBQ $0x04, AX - ADDQ AX, CX - MOVL (CX), DI - XORQ DI, SI - IMULQ BX, SI - MOVQ SI, DX - SHRQ $0x2f, DX - XORQ SI, DX - XORQ DX, DI - IMULQ BX, DI - MOVQ DI, DX - SHRQ $0x2f, DX - XORQ DI, DX - IMULQ BX, DX - MOVQ DX, ret+24(FP) - RET - -check0: - TESTQ AX, AX - JZ empty - MOVBQZX (CX), DX - MOVQ AX, BX - SHRQ $0x01, BX - ADDQ CX, BX - MOVBQZX (BX), BP - MOVQ AX, BX - SUBQ $0x01, BX - ADDQ CX, BX - MOVBQZX (BX), BX - SHLQ $0x08, BP - ADDQ BP, DX - SHLQ $0x02, BX - ADDQ BX, AX - MOVQ $0xc3a5c85c97cb3127, BX - IMULQ BX, AX - MOVQ $0x9ae16a3b2f90404f, BX - IMULQ BX, DX - XORQ DX, AX - MOVQ AX, DX - SHRQ $0x2f, DX - XORQ AX, DX - IMULQ BX, DX - MOVQ DX, ret+24(FP) - RET - -empty: - MOVQ $0x9ae16a3b2f90404f, DX - MOVQ DX, ret+24(FP) - RET - -check32: - CMPQ AX, $0x20 - JG check64 - MOVQ AX, DX - SHLQ $0x01, DX - MOVQ $0x9ae16a3b2f90404f, BX - ADDQ BX, DX - MOVQ (CX), BP - MOVQ $0xb492b66fbe98f273, SI - IMULQ SI, BP - MOVQ 8(CX), SI - MOVQ AX, DI - SUBQ $0x10, DI - ADDQ CX, DI - MOVQ 8(DI), R12 - IMULQ DX, R12 - MOVQ (DI), DI - IMULQ BX, DI - MOVQ BP, R13 - ADDQ SI, R13 - RORQ $0x2b, R13 - ADDQ DI, R13 - MOVQ R12, DI - RORQ $0x1e, DI - ADDQ DI, R13 - ADDQ R12, BP - ADDQ BX, SI - RORQ $0x12, SI - ADDQ SI, BP - XORQ BP, R13 - IMULQ DX, R13 - MOVQ R13, BX - SHRQ $0x2f, BX - XORQ R13, BX - XORQ BX, BP - IMULQ DX, BP - MOVQ BP, BX - SHRQ $0x2f, BX - XORQ BP, BX - IMULQ DX, BX - MOVQ BX, ret+24(FP) - RET - -check64: - CMPQ AX, $0x40 - JG long - MOVQ AX, DX - SHLQ $0x01, DX - MOVQ $0x9ae16a3b2f90404f, BX - ADDQ BX, DX - MOVQ (CX), BP - IMULQ BX, BP - MOVQ 8(CX), SI - MOVQ AX, DI - SUBQ $0x10, DI - ADDQ CX, DI - MOVQ 8(DI), R12 - IMULQ DX, R12 - MOVQ (DI), DI - IMULQ BX, DI - MOVQ BP, R13 - ADDQ SI, R13 - RORQ $0x2b, R13 - ADDQ DI, R13 - MOVQ R12, DI - RORQ $0x1e, DI - ADDQ DI, R13 - ADDQ BP, R12 - ADDQ BX, SI - RORQ $0x12, SI - ADDQ SI, R12 - MOVQ R13, BX - XORQ R12, BX - IMULQ DX, BX - MOVQ BX, SI - SHRQ $0x2f, SI - XORQ BX, SI - XORQ SI, R12 - IMULQ DX, R12 - MOVQ R12, BX - SHRQ $0x2f, BX - XORQ R12, BX - IMULQ DX, BX - MOVQ 16(CX), SI - IMULQ DX, SI - MOVQ 24(CX), DI - MOVQ AX, R12 - SUBQ $0x20, R12 - ADDQ CX, R12 - MOVQ (R12), R14 - ADDQ R13, R14 - IMULQ DX, R14 - MOVQ 8(R12), R12 - ADDQ BX, R12 - IMULQ DX, R12 - MOVQ SI, BX - ADDQ DI, BX - RORQ $0x2b, BX - ADDQ R12, BX - MOVQ R14, R12 - RORQ $0x1e, R12 - ADDQ R12, BX - ADDQ R14, SI - ADDQ BP, DI - RORQ $0x12, DI - ADDQ DI, SI - XORQ SI, BX - IMULQ DX, BX - MOVQ BX, BP - SHRQ $0x2f, BP - XORQ BX, BP - XORQ BP, SI - IMULQ DX, SI - MOVQ SI, BX - SHRQ $0x2f, BX - XORQ SI, BX - IMULQ DX, BX - MOVQ BX, ret+24(FP) - RET - -long: - XORQ R8, R8 - XORQ R9, R9 - XORQ R10, R10 - XORQ R11, R11 - MOVQ $0x01529cba0ca458ff, DX - ADDQ (CX), DX - MOVQ $0x226bb95b4e64b6d4, BX - MOVQ $0x134a747f856d0526, BP - MOVQ AX, SI - SUBQ $0x01, SI - MOVQ $0xffffffffffffffc0, DI - ANDQ DI, SI - MOVQ AX, DI - SUBQ $0x01, DI - ANDQ $0x3f, DI - SUBQ $0x3f, DI - ADDQ SI, DI - MOVQ DI, SI - ADDQ CX, SI - MOVQ AX, DI - -loop: - MOVQ $0xb492b66fbe98f273, R12 - ADDQ BX, DX - ADDQ R8, DX - ADDQ 8(CX), DX - RORQ $0x25, DX - IMULQ R12, DX - ADDQ R9, BX - ADDQ 48(CX), BX - RORQ $0x2a, BX - IMULQ R12, BX - XORQ R11, DX - ADDQ R8, BX - ADDQ 40(CX), BX - ADDQ R10, BP - RORQ $0x21, BP - IMULQ R12, BP - IMULQ R12, R9 - MOVQ DX, R8 - ADDQ R10, R8 - ADDQ (CX), R9 - ADDQ R9, R8 - ADDQ 24(CX), R8 - RORQ $0x15, R8 - MOVQ R9, R10 - ADDQ 8(CX), R9 - ADDQ 16(CX), R9 - MOVQ R9, R13 - RORQ $0x2c, R13 - ADDQ R13, R8 - ADDQ 24(CX), R9 - ADDQ R10, R8 - XCHGQ R9, R8 - ADDQ BP, R11 - MOVQ BX, R10 - ADDQ 16(CX), R10 - ADDQ 32(CX), R11 - ADDQ R11, R10 - ADDQ 56(CX), R10 - RORQ $0x15, R10 - MOVQ R11, R13 - ADDQ 40(CX), R11 - ADDQ 48(CX), R11 - MOVQ R11, R14 - RORQ $0x2c, R14 - ADDQ R14, R10 - ADDQ 56(CX), R11 - ADDQ R13, R10 - XCHGQ R11, R10 - XCHGQ BP, DX - ADDQ $0x40, CX - SUBQ $0x40, DI - CMPQ DI, $0x40 - JG loop - MOVQ SI, CX - MOVQ BP, DI - ANDQ $0xff, DI - SHLQ $0x01, DI - ADDQ R12, DI - MOVQ SI, CX - SUBQ $0x01, AX - ANDQ $0x3f, AX - ADDQ AX, R10 - ADDQ R10, R8 - ADDQ R8, R10 - ADDQ BX, DX - ADDQ R8, DX - ADDQ 8(CX), DX - RORQ $0x25, DX - IMULQ DI, DX - ADDQ R9, BX - ADDQ 48(CX), BX - RORQ $0x2a, BX - IMULQ DI, BX - MOVQ $0x00000009, AX - IMULQ R11, AX - XORQ AX, DX - MOVQ $0x00000009, AX - IMULQ R8, AX - ADDQ AX, BX - ADDQ 40(CX), BX - ADDQ R10, BP - RORQ $0x21, BP - IMULQ DI, BP - IMULQ DI, R9 - MOVQ DX, R8 - ADDQ R10, R8 - ADDQ (CX), R9 - ADDQ R9, R8 - ADDQ 24(CX), R8 - RORQ $0x15, R8 - MOVQ R9, AX - ADDQ 8(CX), R9 - ADDQ 16(CX), R9 - MOVQ R9, SI - RORQ $0x2c, SI - ADDQ SI, R8 - ADDQ 24(CX), R9 - ADDQ AX, R8 - XCHGQ R9, R8 - ADDQ BP, R11 - MOVQ BX, R10 - ADDQ 16(CX), R10 - ADDQ 32(CX), R11 - ADDQ R11, R10 - ADDQ 56(CX), R10 - RORQ $0x15, R10 - MOVQ R11, AX - ADDQ 40(CX), R11 - ADDQ 48(CX), R11 - MOVQ R11, SI - RORQ $0x2c, SI - ADDQ SI, R10 - ADDQ 56(CX), R11 - ADDQ AX, R10 - XCHGQ R11, R10 - XCHGQ BP, DX - XORQ R10, R8 - IMULQ DI, R8 - MOVQ R8, AX - SHRQ $0x2f, AX - XORQ R8, AX - XORQ AX, R10 - IMULQ DI, R10 - MOVQ R10, AX - SHRQ $0x2f, AX - XORQ R10, AX - IMULQ DI, AX - ADDQ BP, AX - MOVQ BX, CX - SHRQ $0x2f, CX - XORQ BX, CX - MOVQ $0xc3a5c85c97cb3127, BX - IMULQ BX, CX - ADDQ CX, AX - XORQ R11, R9 - IMULQ DI, R9 - MOVQ R9, CX - SHRQ $0x2f, CX - XORQ R9, CX - XORQ CX, R11 - IMULQ DI, R11 - MOVQ R11, CX - SHRQ $0x2f, CX - XORQ R11, CX - IMULQ DI, CX - ADDQ DX, CX - XORQ CX, AX - IMULQ DI, AX - MOVQ AX, DX - SHRQ $0x2f, DX - XORQ AX, DX - XORQ DX, CX - IMULQ DI, CX - MOVQ CX, AX - SHRQ $0x2f, AX - XORQ CX, AX - IMULQ DI, AX - MOVQ AX, ret+24(FP) - RET - -// func Fingerprint32(s []byte) uint32 -TEXT ·Fingerprint32(SB), NOSPLIT, $0-28 - MOVQ s_base+0(FP), AX - MOVQ s_len+8(FP), CX - CMPQ CX, $0x18 - JG long - CMPQ CX, $0x0c - JG hash_13_24 - CMPQ CX, $0x04 - JG hash_5_12 - XORL DX, DX - MOVL $0x00000009, BX - TESTQ CX, CX - JZ done - MOVQ CX, BP - MOVL $0xcc9e2d51, DI - IMULL DI, DX - MOVBLSX (AX), SI - ADDL SI, DX - XORL DX, BX - SUBQ $0x01, BP - TESTQ BP, BP - JZ done - IMULL DI, DX - MOVBLSX 1(AX), SI - ADDL SI, DX - XORL DX, BX - SUBQ $0x01, BP - TESTQ BP, BP - JZ done - IMULL DI, DX - MOVBLSX 2(AX), SI - ADDL SI, DX - XORL DX, BX - SUBQ $0x01, BP - TESTQ BP, BP - JZ done - IMULL DI, DX - MOVBLSX 3(AX), SI - ADDL SI, DX - XORL DX, BX - SUBQ $0x01, BP - TESTQ BP, BP - JZ done - -done: - MOVL CX, BP - MOVL $0xcc9e2d51, SI - IMULL SI, BP - RORL $0x11, BP - MOVL $0x1b873593, SI - IMULL SI, BP - XORL BP, BX - RORL $0x13, BX - LEAL (BX)(BX*4), BP - LEAL 3864292196(BP), BX - MOVL $0xcc9e2d51, BP - IMULL BP, DX - RORL $0x11, DX - MOVL $0x1b873593, BP - IMULL BP, DX - XORL DX, BX - RORL $0x13, BX - LEAL (BX)(BX*4), DX - LEAL 3864292196(DX), BX - MOVL BX, DX - SHRL $0x10, DX - XORL DX, BX - MOVL $0x85ebca6b, DX - IMULL DX, BX - MOVL BX, DX - SHRL $0x0d, DX - XORL DX, BX - MOVL $0xc2b2ae35, DX - IMULL DX, BX - MOVL BX, DX - SHRL $0x10, DX - XORL DX, BX - MOVL BX, ret+24(FP) - RET - -hash_5_12: - MOVL CX, DX - MOVL DX, BX - SHLL $0x02, BX - ADDL DX, BX - MOVL $0x00000009, BP - MOVL BX, SI - ADDL (AX), DX - MOVQ CX, DI - SUBQ $0x04, DI - ADDQ AX, DI - ADDL (DI), BX - MOVQ CX, DI - SHRQ $0x01, DI - ANDQ $0x04, DI - ADDQ AX, DI - ADDL (DI), BP - MOVL $0xcc9e2d51, DI - IMULL DI, DX - RORL $0x11, DX - MOVL $0x1b873593, DI - IMULL DI, DX - XORL DX, SI - RORL $0x13, SI - LEAL (SI)(SI*4), DX - LEAL 3864292196(DX), SI - MOVL $0xcc9e2d51, DX - IMULL DX, BX - RORL $0x11, BX - MOVL $0x1b873593, DX - IMULL DX, BX - XORL BX, SI - RORL $0x13, SI - LEAL (SI)(SI*4), BX - LEAL 3864292196(BX), SI - MOVL $0xcc9e2d51, DX - IMULL DX, BP - RORL $0x11, BP - MOVL $0x1b873593, DX - IMULL DX, BP - XORL BP, SI - RORL $0x13, SI - LEAL (SI)(SI*4), BP - LEAL 3864292196(BP), SI - MOVL SI, DX - SHRL $0x10, DX - XORL DX, SI - MOVL $0x85ebca6b, DX - IMULL DX, SI - MOVL SI, DX - SHRL $0x0d, DX - XORL DX, SI - MOVL $0xc2b2ae35, DX - IMULL DX, SI - MOVL SI, DX - SHRL $0x10, DX - XORL DX, SI - MOVL SI, ret+24(FP) - RET - -hash_13_24: - MOVQ CX, DX - SHRQ $0x01, DX - ADDQ AX, DX - MOVL -4(DX), BX - MOVL 4(AX), BP - MOVQ CX, SI - ADDQ AX, SI - MOVL -8(SI), DI - MOVL (DX), DX - MOVL (AX), R8 - MOVL -4(SI), SI - MOVL $0xcc9e2d51, R9 - IMULL DX, R9 - ADDL CX, R9 - RORL $0x0c, BX - ADDL SI, BX - MOVL DI, R10 - MOVL $0xcc9e2d51, R11 - IMULL R11, R10 - RORL $0x11, R10 - MOVL $0x1b873593, R11 - IMULL R11, R10 - XORL R10, R9 - RORL $0x13, R9 - LEAL (R9)(R9*4), R10 - LEAL 3864292196(R10), R9 - ADDL BX, R9 - RORL $0x03, BX - ADDL DI, BX - MOVL $0xcc9e2d51, DI - IMULL DI, R8 - RORL $0x11, R8 - MOVL $0x1b873593, DI - IMULL DI, R8 - XORL R8, R9 - RORL $0x13, R9 - LEAL (R9)(R9*4), R8 - LEAL 3864292196(R8), R9 - ADDL BX, R9 - ADDL SI, BX - RORL $0x0c, BX - ADDL DX, BX - MOVL $0xcc9e2d51, DX - IMULL DX, BP - RORL $0x11, BP - MOVL $0x1b873593, DX - IMULL DX, BP - XORL BP, R9 - RORL $0x13, R9 - LEAL (R9)(R9*4), BP - LEAL 3864292196(BP), R9 - ADDL BX, R9 - MOVL R9, DX - SHRL $0x10, DX - XORL DX, R9 - MOVL $0x85ebca6b, DX - IMULL DX, R9 - MOVL R9, DX - SHRL $0x0d, DX - XORL DX, R9 - MOVL $0xc2b2ae35, DX - IMULL DX, R9 - MOVL R9, DX - SHRL $0x10, DX - XORL DX, R9 - MOVL R9, ret+24(FP) - RET - -long: - MOVL CX, DX - MOVL $0xcc9e2d51, BX - IMULL DX, BX - MOVL BX, BP - MOVQ CX, SI - ADDQ AX, SI - MOVL $0xcc9e2d51, DI - MOVL $0x1b873593, R8 - MOVL -4(SI), R9 - IMULL DI, R9 - RORL $0x11, R9 - IMULL R8, R9 - XORL R9, DX - RORL $0x13, DX - MOVL DX, R9 - SHLL $0x02, R9 - ADDL R9, DX - ADDL $0xe6546b64, DX - MOVL -8(SI), R9 - IMULL DI, R9 - RORL $0x11, R9 - IMULL R8, R9 - XORL R9, BX - RORL $0x13, BX - MOVL BX, R9 - SHLL $0x02, R9 - ADDL R9, BX - ADDL $0xe6546b64, BX - MOVL -16(SI), R9 - IMULL DI, R9 - RORL $0x11, R9 - IMULL R8, R9 - XORL R9, DX - RORL $0x13, DX - MOVL DX, R9 - SHLL $0x02, R9 - ADDL R9, DX - ADDL $0xe6546b64, DX - MOVL -12(SI), R9 - IMULL DI, R9 - RORL $0x11, R9 - IMULL R8, R9 - XORL R9, BX - RORL $0x13, BX - MOVL BX, R9 - SHLL $0x02, R9 - ADDL R9, BX - ADDL $0xe6546b64, BX - PREFETCHT0 (AX) - MOVL -20(SI), SI - IMULL DI, SI - RORL $0x11, SI - IMULL R8, SI - ADDL SI, BP - RORL $0x13, BP - ADDL $0x71, BP - -loop80: - CMPQ CX, $0x64 - JL loop20 - PREFETCHT0 20(AX) - MOVL (AX), SI - ADDL SI, DX - MOVL 4(AX), DI - ADDL DI, BX - MOVL 8(AX), R8 - ADDL R8, BP - MOVL 12(AX), R9 - MOVL R9, R11 - MOVL $0xcc9e2d51, R10 - IMULL R10, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R10 - IMULL R10, R11 - XORL R11, DX - RORL $0x13, DX - LEAL (DX)(DX*4), R11 - LEAL 3864292196(R11), DX - MOVL 16(AX), R10 - ADDL R10, DX - MOVL R8, R11 - MOVL $0xcc9e2d51, R8 - IMULL R8, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R8 - IMULL R8, R11 - XORL R11, BX - RORL $0x13, BX - LEAL (BX)(BX*4), R11 - LEAL 3864292196(R11), BX - ADDL SI, BX - MOVL $0xcc9e2d51, SI - IMULL SI, R10 - MOVL R10, R11 - ADDL DI, R11 - MOVL $0xcc9e2d51, SI - IMULL SI, R11 - RORL $0x11, R11 - MOVL $0x1b873593, SI - IMULL SI, R11 - XORL R11, BP - RORL $0x13, BP - LEAL (BP)(BP*4), R11 - LEAL 3864292196(R11), BP - ADDL R9, BP - ADDL BX, BP - ADDL BP, BX - PREFETCHT0 40(AX) - MOVL 20(AX), SI - ADDL SI, DX - MOVL 24(AX), DI - ADDL DI, BX - MOVL 28(AX), R8 - ADDL R8, BP - MOVL 32(AX), R9 - MOVL R9, R11 - MOVL $0xcc9e2d51, R10 - IMULL R10, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R10 - IMULL R10, R11 - XORL R11, DX - RORL $0x13, DX - LEAL (DX)(DX*4), R11 - LEAL 3864292196(R11), DX - MOVL 36(AX), R10 - ADDL R10, DX - MOVL R8, R11 - MOVL $0xcc9e2d51, R8 - IMULL R8, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R8 - IMULL R8, R11 - XORL R11, BX - RORL $0x13, BX - LEAL (BX)(BX*4), R11 - LEAL 3864292196(R11), BX - ADDL SI, BX - MOVL $0xcc9e2d51, SI - IMULL SI, R10 - MOVL R10, R11 - ADDL DI, R11 - MOVL $0xcc9e2d51, SI - IMULL SI, R11 - RORL $0x11, R11 - MOVL $0x1b873593, SI - IMULL SI, R11 - XORL R11, BP - RORL $0x13, BP - LEAL (BP)(BP*4), R11 - LEAL 3864292196(R11), BP - ADDL R9, BP - ADDL BX, BP - ADDL BP, BX - PREFETCHT0 60(AX) - MOVL 40(AX), SI - ADDL SI, DX - MOVL 44(AX), DI - ADDL DI, BX - MOVL 48(AX), R8 - ADDL R8, BP - MOVL 52(AX), R9 - MOVL R9, R11 - MOVL $0xcc9e2d51, R10 - IMULL R10, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R10 - IMULL R10, R11 - XORL R11, DX - RORL $0x13, DX - LEAL (DX)(DX*4), R11 - LEAL 3864292196(R11), DX - MOVL 56(AX), R10 - ADDL R10, DX - MOVL R8, R11 - MOVL $0xcc9e2d51, R8 - IMULL R8, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R8 - IMULL R8, R11 - XORL R11, BX - RORL $0x13, BX - LEAL (BX)(BX*4), R11 - LEAL 3864292196(R11), BX - ADDL SI, BX - MOVL $0xcc9e2d51, SI - IMULL SI, R10 - MOVL R10, R11 - ADDL DI, R11 - MOVL $0xcc9e2d51, SI - IMULL SI, R11 - RORL $0x11, R11 - MOVL $0x1b873593, SI - IMULL SI, R11 - XORL R11, BP - RORL $0x13, BP - LEAL (BP)(BP*4), R11 - LEAL 3864292196(R11), BP - ADDL R9, BP - ADDL BX, BP - ADDL BP, BX - PREFETCHT0 80(AX) - MOVL 60(AX), SI - ADDL SI, DX - MOVL 64(AX), DI - ADDL DI, BX - MOVL 68(AX), R8 - ADDL R8, BP - MOVL 72(AX), R9 - MOVL R9, R11 - MOVL $0xcc9e2d51, R10 - IMULL R10, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R10 - IMULL R10, R11 - XORL R11, DX - RORL $0x13, DX - LEAL (DX)(DX*4), R11 - LEAL 3864292196(R11), DX - MOVL 76(AX), R10 - ADDL R10, DX - MOVL R8, R11 - MOVL $0xcc9e2d51, R8 - IMULL R8, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R8 - IMULL R8, R11 - XORL R11, BX - RORL $0x13, BX - LEAL (BX)(BX*4), R11 - LEAL 3864292196(R11), BX - ADDL SI, BX - MOVL $0xcc9e2d51, SI - IMULL SI, R10 - MOVL R10, R11 - ADDL DI, R11 - MOVL $0xcc9e2d51, SI - IMULL SI, R11 - RORL $0x11, R11 - MOVL $0x1b873593, SI - IMULL SI, R11 - XORL R11, BP - RORL $0x13, BP - LEAL (BP)(BP*4), R11 - LEAL 3864292196(R11), BP - ADDL R9, BP - ADDL BX, BP - ADDL BP, BX - ADDQ $0x50, AX - SUBQ $0x50, CX - JMP loop80 - -loop20: - CMPQ CX, $0x14 - JLE after - MOVL (AX), SI - ADDL SI, DX - MOVL 4(AX), DI - ADDL DI, BX - MOVL 8(AX), R8 - ADDL R8, BP - MOVL 12(AX), R9 - MOVL R9, R11 - MOVL $0xcc9e2d51, R10 - IMULL R10, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R10 - IMULL R10, R11 - XORL R11, DX - RORL $0x13, DX - LEAL (DX)(DX*4), R11 - LEAL 3864292196(R11), DX - MOVL 16(AX), R10 - ADDL R10, DX - MOVL R8, R11 - MOVL $0xcc9e2d51, R8 - IMULL R8, R11 - RORL $0x11, R11 - MOVL $0x1b873593, R8 - IMULL R8, R11 - XORL R11, BX - RORL $0x13, BX - LEAL (BX)(BX*4), R11 - LEAL 3864292196(R11), BX - ADDL SI, BX - MOVL $0xcc9e2d51, SI - IMULL SI, R10 - MOVL R10, R11 - ADDL DI, R11 - MOVL $0xcc9e2d51, SI - IMULL SI, R11 - RORL $0x11, R11 - MOVL $0x1b873593, SI - IMULL SI, R11 - XORL R11, BP - RORL $0x13, BP - LEAL (BP)(BP*4), R11 - LEAL 3864292196(R11), BP - ADDL R9, BP - ADDL BX, BP - ADDL BP, BX - ADDQ $0x14, AX - SUBQ $0x14, CX - JMP loop20 - -after: - MOVL $0xcc9e2d51, AX - RORL $0x0b, BX - IMULL AX, BX - RORL $0x11, BX - IMULL AX, BX - RORL $0x0b, BP - IMULL AX, BP - RORL $0x11, BP - IMULL AX, BP - ADDL BX, DX - RORL $0x13, DX - MOVL DX, CX - SHLL $0x02, CX - ADDL CX, DX - ADDL $0xe6546b64, DX - RORL $0x11, DX - IMULL AX, DX - ADDL BP, DX - RORL $0x13, DX - MOVL DX, CX - SHLL $0x02, CX - ADDL CX, DX - ADDL $0xe6546b64, DX - RORL $0x11, DX - IMULL AX, DX - MOVL DX, ret+24(FP) - RET diff --git a/vendor/github.com/dgryski/go-farm/fp_generic.go b/vendor/github.com/dgryski/go-farm/fp_generic.go deleted file mode 100644 index 2cfa1b9d..00000000 --- a/vendor/github.com/dgryski/go-farm/fp_generic.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !amd64 purego - -package farm - -// Fingerprint64 is a 64-bit fingerprint function for byte-slices -func Fingerprint64(s []byte) uint64 { - return naHash64(s) -} - -// Fingerprint32 is a 32-bit fingerprint function for byte-slices -func Fingerprint32(s []byte) uint32 { - return Hash32(s) -} diff --git a/vendor/github.com/dgryski/go-farm/fp_stub.go b/vendor/github.com/dgryski/go-farm/fp_stub.go deleted file mode 100644 index 94fff8de..00000000 --- a/vendor/github.com/dgryski/go-farm/fp_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// Code generated by command: go run asm.go -out=fp_amd64.s -stubs=fp_stub.go. DO NOT EDIT. - -// +build amd64,!purego - -package farm - -func Fingerprint64(s []byte) uint64 - -func Fingerprint32(s []byte) uint32 diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml deleted file mode 100644 index ba95cdd1..00000000 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a9..00000000 --- a/vendor/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 7d0b16b3..00000000 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,124 +0,0 @@ -# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize`. - -See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for -complete documentation. - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83 MB` or `79 MiB` (whichever you prefer). - -Example: - -```go -fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. -``` - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - -```go -fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. -``` - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - -```go -fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. -``` - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - -```go -fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. -``` - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - -```go -fmt.Printf("%f", 2.24) // 2.240000 -fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 -fmt.Printf("%f", 2.0) // 2.000000 -fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 -``` - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - -```go -humanize.SI(0.00000000223, "M") // 2.23 nM -``` - -## English-specific functions - -The following functions are in the `humanize/english` subpackage. - -### Plurals - -Simple English pluralization - -```go -english.PluralWord(1, "object", "") // object -english.PluralWord(42, "object", "") // objects -english.PluralWord(2, "bus", "") // buses -english.PluralWord(99, "locus", "loci") // loci - -english.Plural(1, "object", "") // 1 object -english.Plural(42, "object", "") // 42 objects -english.Plural(2, "bus", "") // 2 buses -english.Plural(99, "locus", "loci") // 99 loci -``` - -### Word series - -Format comma-separated words lists with conjuctions: - -```go -english.WordSeries([]string{"foo"}, "and") // foo -english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar -english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz - -english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz -``` - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc337..00000000 --- a/vendor/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 1a2bf617..00000000 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,173 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%d B", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83 MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79 MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42 MB") -> 42000000, nil -// ParseBigBytes("42 mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - val := &big.Rat{} - _, err := fmt.Sscanf(num, "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index 0b498f48..00000000 --- a/vendor/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,143 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%d B", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83 MB -func Bytes(s uint64) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79 MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42 MB") -> 42000000, nil -// ParseBytes("42 mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - f, err := strconv.ParseFloat(num, 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index 520ae3e5..00000000 --- a/vendor/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,116 +0,0 @@ -package humanize - -import ( - "bytes" - "math" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - - // Min int64 can't be negated to a usable value, so it has to be special cased. - if v == math.MinInt64 { - return "-9,223,372,036,854,775,808" - } - - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Commaf(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// CommafWithDigits works like the Commaf but limits the resulting -// string to the given number of decimal places. -// -// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 -func CommafWithDigits(f float64, decimals int) string { - return stripTrailingDigits(Commaf(f), decimals) -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:], ",") -} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go deleted file mode 100644 index 620690de..00000000 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "bytes" - "math/big" - "strings" -) - -// BigCommaf produces a string form of the given big.Float in base 10 -// with commas after every three orders of magnitude. -func BigCommaf(v *big.Float) string { - buf := &bytes.Buffer{} - if v.Sign() < 0 { - buf.Write([]byte{'-'}) - v.Abs(v) - } - - comma := []byte{','} - - parts := strings.Split(v.Text('f', -1), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index 1c62b640..00000000 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,46 +0,0 @@ -package humanize - -import ( - "strconv" - "strings" -) - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -func stripTrailingDigits(s string, digits int) string { - if i := strings.Index(s, "."); i >= 0 { - if digits <= 0 { - return s[:i] - } - i++ - if i+digits >= len(s) { - return s - } - return s[:i+digits] - } - return s -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} - -// FtoaWithDigits converts a float to a string but limits the resulting string -// to the given number of decimal places, and no trailing zeros. -func FtoaWithDigits(num float64, digits int) string { - return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) -} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a2c2da31..00000000 --- a/vendor/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83 MB" or -"79 MiB" (whichever you prefer). -*/ -package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go deleted file mode 100644 index dec61865..00000000 --- a/vendor/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.FormatInt(int64(intf), 10) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a86..00000000 --- a/vendor/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go deleted file mode 100644 index ae659e0e..00000000 --- a/vendor/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,123 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([\-0-9.]+)\s?([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - mag := math.Abs(input) - exponent := math.Floor(logn(mag, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := mag / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1 M instead of 1000 k - if value == 1000.0 { - exponent += 3 - value = mag / math.Pow(10, exponent) - } - - value = math.Copysign(value, input) - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, "B") -> 1 MB -// e.g. SI(2.2345e-12, "F") -> 2.2345 pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + " " + prefix + unit -} - -// SIWithDigits works like SI but limits the resulting string to the -// given number of decimal places. -// -// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB -// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF -func SIWithDigits(input float64, decimals int, unit string) string { - value, prefix := ComputeSI(input) - return FtoaWithDigits(value, decimals) + " " + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go deleted file mode 100644 index dd3fbf5e..00000000 --- a/vendor/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,117 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Day = 24 * time.Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -// A RelTimeMagnitude struct contains a relative time point at which -// the relative format of time will switch to a new format string. A -// slice of these in ascending order by their "D" field is passed to -// CustomRelTime to format durations. -// -// The Format field is a string that may contain a "%s" which will be -// replaced with the appropriate signed label (e.g. "ago" or "from -// now") and a "%d" that will be replaced by the quantity. -// -// The DivBy field is the amount of time the time difference must be -// divided by in order to display correctly. -// -// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" -// DivBy should be time.Minute so whatever the duration is will be -// expressed in minutes. -type RelTimeMagnitude struct { - D time.Duration - Format string - DivBy time.Duration -} - -var defaultMagnitudes = []RelTimeMagnitude{ - {time.Second, "now", time.Second}, - {2 * time.Second, "1 second %s", 1}, - {time.Minute, "%d seconds %s", time.Second}, - {2 * time.Minute, "1 minute %s", 1}, - {time.Hour, "%d minutes %s", time.Minute}, - {2 * time.Hour, "1 hour %s", 1}, - {Day, "%d hours %s", time.Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) -} - -// CustomRelTime formats a time into a relative string. -// -// It takes two times two labels and a table of relative time formats. -// In addition to the generic time delta string (e.g. 5 minutes), the -// labels are used applied so that the label corresponding to the -// smaller time is applied. -func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { - lbl := albl - diff := b.Sub(a) - - if a.After(b) { - lbl = blbl - diff = a.Sub(b) - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].D > diff - }) - - if n >= len(magnitudes) { - n = len(magnitudes) - 1 - } - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.Format { - if escaped { - switch ch { - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.DivBy) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.Format, args...) -} diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad89585..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 4cd0cbaf..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/vendor/github.com/fsnotify/fsnotify/.mailmap b/vendor/github.com/fsnotify/fsnotify/.mailmap deleted file mode 100644 index a04f2907..00000000 --- a/vendor/github.com/fsnotify/fsnotify/.mailmap +++ /dev/null @@ -1,2 +0,0 @@ -Chris Howey -Nathan Youngman <4566+nathany@users.noreply.github.com> diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5e..00000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -é“å“¥ diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index a438fe4b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,339 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.5.1] - 2021-08-24 - -* Revert Add AddRaw to not follow symlinks - -## [1.5.0] - 2021-08-20 - -* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381) -* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298) -* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289) -* CI: Use GitHub Actions for CI and cover go 1.12-1.17 - [#378](https://github.com/fsnotify/fsnotify/pull/378) - [#381](https://github.com/fsnotify/fsnotify/pull/381) - [#385](https://github.com/fsnotify/fsnotify/pull/385) -* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) - -## [1.4.7] - 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## [1.4.2] - 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## [1.4.1] - 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## [1.4.0] - 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## [1.3.1] - 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## [1.3.0] - 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## [1.2.10] - 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## [1.2.9] - 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## [1.2.8] - 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## [1.2.5] - 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## [1.2.1] - 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## [1.2.0] - 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## [1.1.1] - 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## [1.1.0] - 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [1.0.4] - 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## [1.0.3] - 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## [1.0.2] - 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## [1.0.0] - 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## [0.9.3] - 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## [0.9.2] - 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## [0.9.1] - 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## [0.9.0] - 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## [0.8.12] - 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## [0.8.11] - 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## [0.8.10] - 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## [0.8.9] - 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## [0.8.8] - 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## [0.8.7] - 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## [0.8.6] - 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## [0.8.5] - 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## [0.8.4] - 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## [0.8.3] - 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## [0.8.2] - 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## [0.8.1] - 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## [0.8.0] - 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## [0.7.4] - 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## [0.7.3] - 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## [0.7.2] - 2012-09-01 - -* kqueue: events for created directories - -## [0.7.1] - 2012-07-14 - -* [Fix] for renaming files - -## [0.7.0] - 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## [0.6.0] - 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## [0.5.1] - 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## [0.5.0] - 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## [0.4.0] - 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## [0.3.0] - 2012-02-19 - -* kqueue: add files when watch directory - -## [0.2.0] - 2011-12-30 - -* update to latest Go weekly code - -## [0.1.0] - 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index 828a60b2..00000000 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index e180c8fb..00000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index df57b1b2..00000000 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# File system notifications for Go - -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) - -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: - -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. - -## Usage - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} -``` - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - -## FAQ - -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** - -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 - -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md - -## Related Projects - -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) - diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 0f4ee52e..00000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !plan9 -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "errors" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index eb87699b..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index e9ff9439..00000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 368f5b79..00000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 36cc3845..00000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 98cd8476..00000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index c02b75f7..00000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342a..00000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/log/README.md b/vendor/github.com/go-kit/kit/log/README.md deleted file mode 100644 index a201a3d9..00000000 --- a/vendor/github.com/go-kit/kit/log/README.md +++ /dev/null @@ -1,151 +0,0 @@ -# package log - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled -logging, and so on. It can be used for both typical application log events, -and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are -_data_, and warrant some level of schematic rigor. Using a stricter, -key/value-oriented message format for our logs, containing contextual and -semantic information, makes it much easier to get insight into the -operational activity of the systems we build. Consequently, `package log` is -of the strong belief that "[the benefits of structured logging outweigh the -minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier -than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -w := log.NewSyncWriter(os.Stderr) -logger := log.NewLogfmtLogger(w) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Contextual Loggers - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - logger = log.With(logger, "instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.With(logger, "component", "worker")).Run() - NewSlacker(log.With(logger, "component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, you need to pipe all of your logging through the -stdlib log package, you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) -logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Levels - -Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/kit/log/level). - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like is the product of much iteration -and evaluation. For more details on the evolution of the Logger interface, -see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), -a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see -[#63](https://github.com/go-kit/kit/issues/63), -[#76](https://github.com/go-kit/kit/pull/76), -[#131](https://github.com/go-kit/kit/issues/131), -[#157](https://github.com/go-kit/kit/pull/157), -[#164](https://github.com/go-kit/kit/issues/164), and -[#252](https://github.com/go-kit/kit/pull/252) -to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, -like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/kit/log/level), -are of course welcome. Good proposals should - -- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/kit/log#With), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/kit/log#Caller) in any wrapped contextual loggers, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/kit/log/doc.go b/vendor/github.com/go-kit/kit/log/doc.go deleted file mode 100644 index 918c0af4..00000000 --- a/vendor/github.com/go-kit/kit/log/doc.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package log provides a structured logger. -// -// Structured logging produces logs easily consumed later by humans or -// machines. Humans might be interested in debugging errors, or tracing -// specific requests. Machines might be interested in counting interesting -// events, or aggregating information for off-line processing. In both cases, -// it is important that the log messages are structured and actionable. -// Package log is designed to encourage both of these best practices. -// -// Basic Usage -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. The Logger interface has a single method, Log, which -// accepts a sequence of alternating key/value pairs, which this package names -// keyvals. -// -// type Logger interface { -// Log(keyvals ...interface{}) error -// } -// -// Here is an example of a function using a Logger to create log events. -// -// func RunTask(task Task, logger log.Logger) string { -// logger.Log("taskID", task.ID, "event", "starting task") -// ... -// logger.Log("taskID", task.ID, "event", "task complete") -// } -// -// The keys in the above example are "taskID" and "event". The values are -// task.ID, "starting task", and "task complete". Every key is followed -// immediately by its value. -// -// Keys are usually plain strings. Values may be any type that has a sensible -// encoding in the chosen log format. With structured logging it is a good -// idea to log simple values without formatting them. This practice allows -// the chosen logger to encode values in the most appropriate way. -// -// Contextual Loggers -// -// A contextual logger stores keyvals that it includes in all log events. -// Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With and WithPrefix add context to -// a logger. We can use With to improve the RunTask example. -// -// func RunTask(task Task, logger log.Logger) string { -// logger = log.With(logger, "taskID", task.ID) -// logger.Log("event", "starting task") -// ... -// taskHelper(task.Cmd, logger) -// ... -// logger.Log("event", "task complete") -// } -// -// The improved version emits the same log events as the original for the -// first and last calls to Log. Passing the contextual logger to taskHelper -// enables each log event created by taskHelper to include the task.ID even -// though taskHelper does not have access to that value. Using contextual -// loggers this way simplifies producing log output that enables tracing the -// life cycle of individual tasks. (See the Contextual example for the full -// code of the above snippet.) -// -// Dynamic Contextual Values -// -// A Valuer function stored in a contextual logger generates a new value each -// time an event is logged. The Valuer example demonstrates how this feature -// works. -// -// Valuers provide the basis for consistently logging timestamps and source -// code location. The log package defines several valuers for that purpose. -// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and -// DefaultCaller. A common logger initialization sequence that ensures all log -// entries contain a timestamp and source location looks like this: -// -// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) -// -// Concurrent Safety -// -// Applications with multiple goroutines want each log event written to the -// same logger to remain separate from other log events. Package log provides -// two simple solutions for concurrent safe logging. -// -// NewSyncWriter wraps an io.Writer and serializes each call to its Write -// method. Using a SyncWriter has the benefit that the smallest practical -// portion of the logging logic is performed within a mutex, but it requires -// the formatting Logger to make only one call to Write per log event. -// -// NewSyncLogger wraps any Logger and serializes each call to its Log method. -// Using a SyncLogger has the benefit that it guarantees each log event is -// handled atomically within the wrapped logger, but it typically serializes -// both the formatting and output logic. Use a SyncLogger if the formatting -// logger may perform multiple writes per log event. -// -// Error Handling -// -// This package relies on the practice of wrapping or decorating loggers with -// other loggers to provide composable pieces of functionality. It also means -// that Logger.Log must return an error because some -// implementations—especially those that output log data to an io.Writer—may -// encounter errors that cannot be handled locally. This in turn means that -// Loggers that wrap other loggers should return errors from the wrapped -// logger up the stack. -// -// Fortunately, the decorator pattern also provides a way to avoid the -// necessity to check for errors every time an application calls Logger.Log. -// An application required to panic whenever its Logger encounters -// an error could initialize its logger as follows. -// -// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger := log.LoggerFunc(func(keyvals ...interface{}) error { -// if err := fmtlogger.Log(keyvals...); err != nil { -// panic(err) -// } -// return nil -// }) -package log diff --git a/vendor/github.com/go-kit/kit/log/json_logger.go b/vendor/github.com/go-kit/kit/log/json_logger.go deleted file mode 100644 index 0cedbf82..00000000 --- a/vendor/github.com/go-kit/kit/log/json_logger.go +++ /dev/null @@ -1,91 +0,0 @@ -package log - -import ( - "encoding" - "encoding/json" - "fmt" - "io" - "reflect" -) - -type jsonLogger struct { - io.Writer -} - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewJSONLogger(w io.Writer) Logger { - return &jsonLogger{w} -} - -func (l *jsonLogger) Log(keyvals ...interface{}) error { - n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd - m := make(map[string]interface{}, n) - for i := 0; i < len(keyvals); i += 2 { - k := keyvals[i] - var v interface{} = ErrMissingValue - if i+1 < len(keyvals) { - v = keyvals[i+1] - } - merge(m, k, v) - } - enc := json.NewEncoder(l.Writer) - enc.SetEscapeHTML(false) - return enc.Encode(m) -} - -func merge(dst map[string]interface{}, k, v interface{}) { - var key string - switch x := k.(type) { - case string: - key = x - case fmt.Stringer: - key = safeString(x) - default: - key = fmt.Sprint(x) - } - - // We want json.Marshaler and encoding.TextMarshaller to take priority over - // err.Error() and v.String(). But json.Marshall (called later) does that by - // default so we force a no-op if it's one of those 2 case. - switch x := v.(type) { - case json.Marshaler: - case encoding.TextMarshaler: - case error: - v = safeError(x) - case fmt.Stringer: - v = safeString(x) - } - - dst[key] = v -} - -func safeString(str fmt.Stringer) (s string) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { - panic(panicVal) - } - } - }() - s = str.String() - return -} - -func safeError(err error) (s interface{}) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s = nil - } else { - panic(panicVal) - } - } - }() - s = err.Error() - return -} diff --git a/vendor/github.com/go-kit/kit/log/level/doc.go b/vendor/github.com/go-kit/kit/log/level/doc.go deleted file mode 100644 index 505d307b..00000000 --- a/vendor/github.com/go-kit/kit/log/level/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package level implements leveled logging on top of Go kit's log package. To -// use the level package, create a logger as per normal in your func main, and -// wrap it with level.NewFilter. -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.AllowInfo()) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error -// helper methods to emit leveled log events. -// -// logger.Log("foo", "bar") // as normal, no level -// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) -// if value > 100 { -// level.Error(logger).Log("value", value) -// } -// -// NewFilter allows precise control over what happens when a log event is -// emitted without a level key, or if a squelched level is used. Check the -// Option functions for details. -package level diff --git a/vendor/github.com/go-kit/kit/log/level/level.go b/vendor/github.com/go-kit/kit/log/level/level.go deleted file mode 100644 index fceafc45..00000000 --- a/vendor/github.com/go-kit/kit/log/level/level.go +++ /dev/null @@ -1,205 +0,0 @@ -package level - -import "github.com/go-kit/kit/log" - -// Error returns a logger that includes a Key/ErrorValue pair. -func Error(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), ErrorValue()) -} - -// Warn returns a logger that includes a Key/WarnValue pair. -func Warn(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), WarnValue()) -} - -// Info returns a logger that includes a Key/InfoValue pair. -func Info(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), InfoValue()) -} - -// Debug returns a logger that includes a Key/DebugValue pair. -func Debug(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), DebugValue()) -} - -// NewFilter wraps next and implements level filtering. See the commentary on -// the Option functions for a detailed description of how to configure levels. -// If no options are provided, all leveled log events created with Debug, -// Info, Warn or Error helper methods are squelched and non-leveled log -// events are passed to next unmodified. -func NewFilter(next log.Logger, options ...Option) log.Logger { - l := &logger{ - next: next, - } - for _, option := range options { - option(l) - } - return l -} - -type logger struct { - next log.Logger - allowed level - squelchNoLevel bool - errNotAllowed error - errNoLevel error -} - -func (l *logger) Log(keyvals ...interface{}) error { - var hasLevel, levelAllowed bool - for i := 1; i < len(keyvals); i += 2 { - if v, ok := keyvals[i].(*levelValue); ok { - hasLevel = true - levelAllowed = l.allowed&v.level != 0 - break - } - } - if !hasLevel && l.squelchNoLevel { - return l.errNoLevel - } - if hasLevel && !levelAllowed { - return l.errNotAllowed - } - return l.next.Log(keyvals...) -} - -// Option sets a parameter for the leveled logger. -type Option func(*logger) - -// AllowAll is an alias for AllowDebug. -func AllowAll() Option { - return AllowDebug() -} - -// AllowDebug allows error, warn, info and debug level log events to pass. -func AllowDebug() Option { - return allowed(levelError | levelWarn | levelInfo | levelDebug) -} - -// AllowInfo allows error, warn and info level log events to pass. -func AllowInfo() Option { - return allowed(levelError | levelWarn | levelInfo) -} - -// AllowWarn allows error and warn level log events to pass. -func AllowWarn() Option { - return allowed(levelError | levelWarn) -} - -// AllowError allows only error level log events to pass. -func AllowError() Option { - return allowed(levelError) -} - -// AllowNone allows no leveled log events to pass. -func AllowNone() Option { - return allowed(0) -} - -func allowed(allowed level) Option { - return func(l *logger) { l.allowed = allowed } -} - -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return func(l *logger) { l.errNotAllowed = err } -} - -// SquelchNoLevel instructs Log to squelch log events with no level, so that -// they don't proceed through to the wrapped logger. If SquelchNoLevel is set -// to true and a log event is squelched in this way, the error value -// configured with ErrNoLevel is returned to the caller. -func SquelchNoLevel(squelch bool) Option { - return func(l *logger) { l.squelchNoLevel = squelch } -} - -// ErrNoLevel sets the error to return from Log when it squelches a log event -// with no level. By default, ErrNoLevel is nil; in this case the log event is -// squelched with no error. -func ErrNoLevel(err error) Option { - return func(l *logger) { l.errNoLevel = err } -} - -// NewInjector wraps next and returns a logger that adds a Key/level pair to -// the beginning of log events that don't already contain a level. In effect, -// this gives a default level to logs without a level. -func NewInjector(next log.Logger, level Value) log.Logger { - return &injector{ - next: next, - level: level, - } -} - -type injector struct { - next log.Logger - level interface{} -} - -func (l *injector) Log(keyvals ...interface{}) error { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(*levelValue); ok { - return l.next.Log(keyvals...) - } - } - kvs := make([]interface{}, len(keyvals)+2) - kvs[0], kvs[1] = key, l.level - copy(kvs[2:], keyvals) - return l.next.Log(kvs...) -} - -// Value is the interface that each of the canonical level values implement. -// It contains unexported methods that prevent types from other packages from -// implementing it and guaranteeing that NewFilter can distinguish the levels -// defined in this package from all other values. -type Value interface { - String() string - levelVal() -} - -// Key returns the unique key added to log events by the loggers in this -// package. -func Key() interface{} { return key } - -// ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return errorValue } - -// WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return warnValue } - -// InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return infoValue } - -// DebugValue returns the unique value added to log events by Warn. -func DebugValue() Value { return debugValue } - -var ( - // key is of type interface{} so that it allocates once during package - // initialization and avoids allocating every time the value is added to a - // []interface{} later. - key interface{} = "level" - - errorValue = &levelValue{level: levelError, name: "error"} - warnValue = &levelValue{level: levelWarn, name: "warn"} - infoValue = &levelValue{level: levelInfo, name: "info"} - debugValue = &levelValue{level: levelDebug, name: "debug"} -) - -type level byte - -const ( - levelDebug level = 1 << iota - levelInfo - levelWarn - levelError -) - -type levelValue struct { - name string - level -} - -func (v *levelValue) String() string { return v.name } -func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/kit/log/log.go b/vendor/github.com/go-kit/kit/log/log.go deleted file mode 100644 index 66a9e2fd..00000000 --- a/vendor/github.com/go-kit/kit/log/log.go +++ /dev/null @@ -1,135 +0,0 @@ -package log - -import "errors" - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies or retains any of its elements must make a copy first. -type Logger interface { - Log(keyvals ...interface{}) error -} - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = errors.New("(MISSING)") - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With or -// WithPrefix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func With(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - return &context{ - logger: l.logger, - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - keyvals: kvs[:len(kvs):len(kvs)], - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// WithPrefix returns a new contextual logger with keyvals prepended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With or WithPrefix, keyvals is prepended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.keyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(kvs, l.keyvals...) - return &context{ - logger: l.logger, - keyvals: kvs, - hasValuer: l.hasValuer || containsValuer(keyvals), - } -} - -// context is the Logger implementation returned by With and WithPrefix. It -// wraps a Logger and holds keyvals that it includes in all log events. Its -// Log method calls bindValues to generate values for each Valuer in the -// context keyvals. -// -// A context must always have the same number of stack frames between calls to -// its Log method and the eventual binding of Valuers to their value. This -// requirement comes from the functional requirement to allow a context to -// resolve application call site information for a Caller stored in the -// context. To do this we must be able to predict the number of logging -// functions on the stack when bindValues is called. -// -// Two implementation details provide the needed stack depth consistency. -// -// 1. newContext avoids introducing an additional layer when asked to -// wrap another context. -// 2. With and WithPrefix avoid introducing an additional layer by -// returning a newly constructed context with a merged keyvals rather -// than simply wrapping the existing context. -type context struct { - logger Logger - keyvals []interface{} - hasValuer bool -} - -func newContext(logger Logger) *context { - if c, ok := logger.(*context); ok { - return c - } - return &context{logger: logger} -} - -// Log replaces all value elements (odd indexes) containing a Valuer in the -// stored context with their generated value, appends keyvals, and passes the -// result to the wrapped Logger. -func (l *context) Log(keyvals ...interface{}) error { - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - if l.hasValuer { - // If no keyvals were appended above then we must copy l.keyvals so - // that future log events will reevaluate the stored Valuers. - if len(keyvals) == 0 { - kvs = append([]interface{}{}, l.keyvals...) - } - bindValues(kvs[:len(l.keyvals)]) - } - return l.logger.Log(kvs...) -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc func(...interface{}) error - -// Log implements Logger by calling f(keyvals...). -func (f LoggerFunc) Log(keyvals ...interface{}) error { - return f(keyvals...) -} diff --git a/vendor/github.com/go-kit/kit/log/logfmt_logger.go b/vendor/github.com/go-kit/kit/log/logfmt_logger.go deleted file mode 100644 index a0030529..00000000 --- a/vendor/github.com/go-kit/kit/log/logfmt_logger.go +++ /dev/null @@ -1,62 +0,0 @@ -package log - -import ( - "bytes" - "io" - "sync" - - "github.com/go-logfmt/logfmt" -) - -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *logfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type logfmtLogger struct { - w io.Writer -} - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return &logfmtLogger{w} -} - -func (l logfmtLogger) Log(keyvals ...interface{}) error { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-kit/kit/log/nop_logger.go b/vendor/github.com/go-kit/kit/log/nop_logger.go deleted file mode 100644 index 1047d626..00000000 --- a/vendor/github.com/go-kit/kit/log/nop_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package log - -type nopLogger struct{} - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { return nopLogger{} } - -func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/kit/log/stdlib.go b/vendor/github.com/go-kit/kit/log/stdlib.go deleted file mode 100644 index ff96b5de..00000000 --- a/vendor/github.com/go-kit/kit/log/stdlib.go +++ /dev/null @@ -1,116 +0,0 @@ -package log - -import ( - "io" - "log" - "regexp" - "strings" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter struct{} - -// Write implements io.Writer. -func (w StdlibWriter) Write(p []byte) (int, error) { - log.Print(strings.TrimSpace(string(p))) - return len(p), nil -} - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter struct { - Logger - timestampKey string - fileKey string - messageKey string -} - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption func(*StdlibAdapter) - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.timestampKey = key } -} - -// FileKey sets the key for the file and line field. By default, it's "caller". -func FileKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.fileKey = key } -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.messageKey = key } -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - a := StdlibAdapter{ - Logger: logger, - timestampKey: "ts", - fileKey: "caller", - messageKey: "msg", - } - for _, option := range options { - option(&a) - } - return a -} - -func (a StdlibAdapter) Write(p []byte) (int, error) { - result := subexps(p) - keyvals := []interface{}{} - var timestamp string - if date, ok := result["date"]; ok && date != "" { - timestamp = date - } - if time, ok := result["time"]; ok && time != "" { - if timestamp != "" { - timestamp += " " - } - timestamp += time - } - if timestamp != "" { - keyvals = append(keyvals, a.timestampKey, timestamp) - } - if file, ok := result["file"]; ok && file != "" { - keyvals = append(keyvals, a.fileKey, file) - } - if msg, ok := result["msg"]; ok { - keyvals = append(keyvals, a.messageKey, msg) - } - if err := a.Logger.Log(keyvals...); err != nil { - return 0, err - } - return len(p), nil -} - -const ( - logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` - logRegexpTime = `(?P
- -passing `ginkgo watch` the `-r` flag will recursively detect all test suites under the current directory and monitor them. -`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages -that depend on X are not rerun. - -[OSX & Linux only] To receive (desktop) notifications when a test run completes: - - ginkgo -notify - -this is particularly useful with `ginkgo watch`. Notifications are currently only supported on OS X and require that you `brew install terminal-notifier` - -Sometimes (to suss out race conditions/flakey tests, for example) you want to keep running a test suite until it fails. You can do this with: - - ginkgo -untilItFails - -To bootstrap a test suite: - - ginkgo bootstrap - -To generate a test file: - - ginkgo generate - -To bootstrap/generate test files without using "." imports: - - ginkgo bootstrap --nodot - ginkgo generate --nodot - -this will explicitly export all the identifiers in Ginkgo and Gomega allowing you to rename them to avoid collisions. When you pull to the latest Ginkgo/Gomega you'll want to run - - ginkgo nodot - -to refresh this list and pull in any new identifiers. In particular, this will pull in any new Gomega matchers that get added. - -To convert an existing XUnit style test suite to a Ginkgo-style test suite: - - ginkgo convert . - -To unfocus tests: - - ginkgo unfocus - -or - - ginkgo blur - -To compile a test suite: - - ginkgo build - -will output an executable file named `package.test`. This can be run directly or by invoking - - ginkgo - - -To print an outline of Ginkgo specs and containers in a file: - - gingko outline - -To print out Ginkgo's version: - - ginkgo version - -To get more help: - - ginkgo help -*/ -package main - -import ( - "flag" - "fmt" - "os" - "os/exec" - "strings" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/ginkgo/testsuite" -) - -const greenColor = "\x1b[32m" -const redColor = "\x1b[91m" -const defaultStyle = "\x1b[0m" -const lightGrayColor = "\x1b[37m" - -type Command struct { - Name string - AltName string - FlagSet *flag.FlagSet - Usage []string - UsageCommand string - Command func(args []string, additionalArgs []string) - SuppressFlagDocumentation bool - FlagDocSubstitute []string -} - -func (c *Command) Matches(name string) bool { - return c.Name == name || (c.AltName != "" && c.AltName == name) -} - -func (c *Command) Run(args []string, additionalArgs []string) { - c.FlagSet.Usage = usage - c.FlagSet.Parse(args) - c.Command(c.FlagSet.Args(), additionalArgs) -} - -var DefaultCommand *Command -var Commands []*Command - -func init() { - DefaultCommand = BuildRunCommand() - Commands = append(Commands, BuildWatchCommand()) - Commands = append(Commands, BuildBuildCommand()) - Commands = append(Commands, BuildBootstrapCommand()) - Commands = append(Commands, BuildGenerateCommand()) - Commands = append(Commands, BuildNodotCommand()) - Commands = append(Commands, BuildConvertCommand()) - Commands = append(Commands, BuildUnfocusCommand()) - Commands = append(Commands, BuildVersionCommand()) - Commands = append(Commands, BuildHelpCommand()) - Commands = append(Commands, BuildOutlineCommand()) -} - -func main() { - args := []string{} - additionalArgs := []string{} - - foundDelimiter := false - - for _, arg := range os.Args[1:] { - if !foundDelimiter { - if arg == "--" { - foundDelimiter = true - continue - } - } - - if foundDelimiter { - additionalArgs = append(additionalArgs, arg) - } else { - args = append(args, arg) - } - } - - if len(args) > 0 { - commandToRun, found := commandMatching(args[0]) - if found { - commandToRun.Run(args[1:], additionalArgs) - return - } - } - - DefaultCommand.Run(args, additionalArgs) -} - -func commandMatching(name string) (*Command, bool) { - for _, command := range Commands { - if command.Matches(name) { - return command, true - } - } - return nil, false -} - -func usage() { - fmt.Printf("Ginkgo Version %s\n\n", config.VERSION) - usageForCommand(DefaultCommand, false) - for _, command := range Commands { - fmt.Printf("\n") - usageForCommand(command, false) - } -} - -func usageForCommand(command *Command, longForm bool) { - fmt.Printf("%s\n%s\n", command.UsageCommand, strings.Repeat("-", len(command.UsageCommand))) - fmt.Printf("%s\n", strings.Join(command.Usage, "\n")) - if command.SuppressFlagDocumentation && !longForm { - fmt.Printf("%s\n", strings.Join(command.FlagDocSubstitute, "\n ")) - } else { - command.FlagSet.SetOutput(os.Stdout) - command.FlagSet.PrintDefaults() - } -} - -func complainAndQuit(complaint string) { - fmt.Fprintf(os.Stderr, "%s\nFor usage instructions:\n\tginkgo help\n", complaint) - os.Exit(1) -} - -func findSuites(args []string, recurseForAll bool, skipPackage string, allowPrecompiled bool) ([]testsuite.TestSuite, []string) { - suites := []testsuite.TestSuite{} - - if len(args) > 0 { - for _, arg := range args { - if allowPrecompiled { - suite, err := testsuite.PrecompiledTestSuite(arg) - if err == nil { - suites = append(suites, suite) - continue - } - } - recurseForSuite := recurseForAll - if strings.HasSuffix(arg, "/...") && arg != "/..." { - arg = arg[:len(arg)-4] - recurseForSuite = true - } - suites = append(suites, testsuite.SuitesInDir(arg, recurseForSuite)...) - } - } else { - suites = testsuite.SuitesInDir(".", recurseForAll) - } - - skippedPackages := []string{} - if skipPackage != "" { - skipFilters := strings.Split(skipPackage, ",") - filteredSuites := []testsuite.TestSuite{} - for _, suite := range suites { - skip := false - for _, skipFilter := range skipFilters { - if strings.Contains(suite.Path, skipFilter) { - skip = true - break - } - } - if skip { - skippedPackages = append(skippedPackages, suite.Path) - } else { - filteredSuites = append(filteredSuites, suite) - } - } - suites = filteredSuites - } - - return suites, skippedPackages -} - -func goFmt(path string) { - out, err := exec.Command("go", "fmt", path).CombinedOutput() - if err != nil { - complainAndQuit("Could not fmt: " + err.Error() + "\n" + string(out)) - } -} - -func pluralizedWord(singular, plural string, count int) string { - if count == 1 { - return singular - } - return plural -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go deleted file mode 100644 index c87b7216..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot/nodot.go +++ /dev/null @@ -1,196 +0,0 @@ -package nodot - -import ( - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/token" - "path/filepath" - "strings" -) - -func ApplyNoDot(data []byte) ([]byte, error) { - sections, err := generateNodotSections() - if err != nil { - return nil, err - } - - for _, section := range sections { - data = section.createOrUpdateIn(data) - } - - return data, nil -} - -type nodotSection struct { - name string - pkg string - declarations []string - types []string -} - -func (s nodotSection) createOrUpdateIn(data []byte) []byte { - renames := map[string]string{} - - contents := string(data) - - lines := strings.Split(contents, "\n") - - comment := "// Declarations for " + s.name - - newLines := []string{} - for _, line := range lines { - if line == comment { - continue - } - - words := strings.Split(line, " ") - lastWord := words[len(words)-1] - - if s.containsDeclarationOrType(lastWord) { - renames[lastWord] = words[1] - continue - } - - newLines = append(newLines, line) - } - - if len(newLines[len(newLines)-1]) > 0 { - newLines = append(newLines, "") - } - - newLines = append(newLines, comment) - - for _, typ := range s.types { - name, ok := renames[s.prefix(typ)] - if !ok { - name = typ - } - newLines = append(newLines, fmt.Sprintf("type %s %s", name, s.prefix(typ))) - } - - for _, decl := range s.declarations { - name, ok := renames[s.prefix(decl)] - if !ok { - name = decl - } - newLines = append(newLines, fmt.Sprintf("var %s = %s", name, s.prefix(decl))) - } - - newLines = append(newLines, "") - - newContents := strings.Join(newLines, "\n") - - return []byte(newContents) -} - -func (s nodotSection) prefix(declOrType string) string { - return s.pkg + "." + declOrType -} - -func (s nodotSection) containsDeclarationOrType(word string) bool { - for _, declaration := range s.declarations { - if s.prefix(declaration) == word { - return true - } - } - - for _, typ := range s.types { - if s.prefix(typ) == word { - return true - } - } - - return false -} - -func generateNodotSections() ([]nodotSection, error) { - sections := []nodotSection{} - - declarations, err := getExportedDeclerationsForPackage("github.com/onsi/ginkgo", "ginkgo_dsl.go", "GINKGO_VERSION", "GINKGO_PANIC") - if err != nil { - return nil, err - } - sections = append(sections, nodotSection{ - name: "Ginkgo DSL", - pkg: "ginkgo", - declarations: declarations, - types: []string{"Done", "Benchmarker"}, - }) - - declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "gomega_dsl.go", "GOMEGA_VERSION") - if err != nil { - return nil, err - } - sections = append(sections, nodotSection{ - name: "Gomega DSL", - pkg: "gomega", - declarations: declarations, - }) - - declarations, err = getExportedDeclerationsForPackage("github.com/onsi/gomega", "matchers.go") - if err != nil { - return nil, err - } - sections = append(sections, nodotSection{ - name: "Gomega Matchers", - pkg: "gomega", - declarations: declarations, - }) - - return sections, nil -} - -func getExportedDeclerationsForPackage(pkgPath string, filename string, blacklist ...string) ([]string, error) { - pkg, err := build.Import(pkgPath, ".", 0) - if err != nil { - return []string{}, err - } - - declarations, err := getExportedDeclarationsForFile(filepath.Join(pkg.Dir, filename)) - if err != nil { - return []string{}, err - } - - blacklistLookup := map[string]bool{} - for _, declaration := range blacklist { - blacklistLookup[declaration] = true - } - - filteredDeclarations := []string{} - for _, declaration := range declarations { - if blacklistLookup[declaration] { - continue - } - filteredDeclarations = append(filteredDeclarations, declaration) - } - - return filteredDeclarations, nil -} - -func getExportedDeclarationsForFile(path string) ([]string, error) { - fset := token.NewFileSet() - tree, err := parser.ParseFile(fset, path, nil, 0) - if err != nil { - return []string{}, err - } - - declarations := []string{} - ast.FileExports(tree) - for _, decl := range tree.Decls { - switch x := decl.(type) { - case *ast.GenDecl: - switch s := x.Specs[0].(type) { - case *ast.ValueSpec: - declarations = append(declarations, s.Names[0].Name) - } - case *ast.FuncDecl: - if x.Recv == nil { - declarations = append(declarations, x.Name.Name) - } - } - } - - return declarations, nil -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go deleted file mode 100644 index 39b88b5d..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/nodot_command.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "bufio" - "flag" - "io/ioutil" - "os" - "path/filepath" - "regexp" - - "github.com/onsi/ginkgo/ginkgo/nodot" -) - -func BuildNodotCommand() *Command { - return &Command{ - Name: "nodot", - FlagSet: flag.NewFlagSet("bootstrap", flag.ExitOnError), - UsageCommand: "ginkgo nodot", - Usage: []string{ - "Update the nodot declarations in your test suite", - "Any missing declarations (from, say, a recently added matcher) will be added to your bootstrap file.", - "If you've renamed a declaration, that name will be honored and not overwritten.", - }, - Command: updateNodot, - } -} - -func updateNodot(args []string, additionalArgs []string) { - suiteFile, perm := findSuiteFile() - - data, err := ioutil.ReadFile(suiteFile) - if err != nil { - complainAndQuit("Failed to update nodot declarations: " + err.Error()) - } - - content, err := nodot.ApplyNoDot(data) - if err != nil { - complainAndQuit("Failed to update nodot declarations: " + err.Error()) - } - ioutil.WriteFile(suiteFile, content, perm) - - goFmt(suiteFile) -} - -func findSuiteFile() (string, os.FileMode) { - workingDir, err := os.Getwd() - if err != nil { - complainAndQuit("Could not find suite file for nodot: " + err.Error()) - } - - files, err := ioutil.ReadDir(workingDir) - if err != nil { - complainAndQuit("Could not find suite file for nodot: " + err.Error()) - } - - re := regexp.MustCompile(`RunSpecs\(|RunSpecsWithDefaultAndCustomReporters\(|RunSpecsWithCustomReporters\(`) - - for _, file := range files { - if file.IsDir() { - continue - } - path := filepath.Join(workingDir, file.Name()) - f, err := os.Open(path) - if err != nil { - complainAndQuit("Could not find suite file for nodot: " + err.Error()) - } - defer f.Close() - - if re.MatchReader(bufio.NewReader(f)) { - return path, file.Mode() - } - } - - complainAndQuit("Could not find a suite file for nodot: you need a bootstrap file that call's Ginkgo's RunSpecs() command.\nTry running ginkgo bootstrap first.") - - return "", 0 -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go b/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go deleted file mode 100644 index 368d61fb..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/notifications.go +++ /dev/null @@ -1,141 +0,0 @@ -package main - -import ( - "fmt" - "os" - "os/exec" - "regexp" - "runtime" - "strings" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/ginkgo/testsuite" -) - -type Notifier struct { - commandFlags *RunWatchAndBuildCommandFlags -} - -func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier { - return &Notifier{ - commandFlags: commandFlags, - } -} - -func (n *Notifier) VerifyNotificationsAreAvailable() { - if n.commandFlags.Notify { - onLinux := (runtime.GOOS == "linux") - onOSX := (runtime.GOOS == "darwin") - if onOSX { - - _, err := exec.LookPath("terminal-notifier") - if err != nil { - fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed. - -OSX: - -To remedy this: - - brew install terminal-notifier - -To learn more about terminal-notifier: - - https://github.com/alloy/terminal-notifier -`) - os.Exit(1) - } - - } else if onLinux { - - _, err := exec.LookPath("notify-send") - if err != nil { - fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed. - -Linux: - -Download and install notify-send for your distribution -`) - os.Exit(1) - } - - } - } -} - -func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, suitePassed bool) { - if suitePassed { - n.SendNotification("Ginkgo [PASS]", fmt.Sprintf(`Test suite for "%s" passed.`, suite.PackageName)) - } else { - n.SendNotification("Ginkgo [FAIL]", fmt.Sprintf(`Test suite for "%s" failed.`, suite.PackageName)) - } -} - -func (n *Notifier) SendNotification(title string, subtitle string) { - - if n.commandFlags.Notify { - onLinux := (runtime.GOOS == "linux") - onOSX := (runtime.GOOS == "darwin") - - if onOSX { - - _, err := exec.LookPath("terminal-notifier") - if err == nil { - args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"} - terminal := os.Getenv("TERM_PROGRAM") - if terminal == "iTerm.app" { - args = append(args, "-activate", "com.googlecode.iterm2") - } else if terminal == "Apple_Terminal" { - args = append(args, "-activate", "com.apple.Terminal") - } - - exec.Command("terminal-notifier", args...).Run() - } - - } else if onLinux { - - _, err := exec.LookPath("notify-send") - if err == nil { - args := []string{"-a", "ginkgo", title, subtitle} - exec.Command("notify-send", args...).Run() - } - - } - } -} - -func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) { - - command := n.commandFlags.AfterSuiteHook - if command != "" { - - // Allow for string replacement to pass input to the command - passed := "[FAIL]" - if suitePassed { - passed = "[PASS]" - } - command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1) - command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1) - - // Must break command into parts - splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) - parts := splitArgs.FindAllString(command, -1) - - output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() - if err != nil { - fmt.Println("Post-suite command failed:") - if config.DefaultReporterConfig.NoColor { - fmt.Printf("\t%s\n", output) - } else { - fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle) - } - n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook)) - } else { - fmt.Println("Post-suite command succeeded:") - if config.DefaultReporterConfig.NoColor { - fmt.Printf("\t%s\n", output) - } else { - fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle) - } - } - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go deleted file mode 100644 index ce6b7fcd..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/outline/ginkgo.go +++ /dev/null @@ -1,243 +0,0 @@ -package outline - -import ( - "go/ast" - "go/token" - "strconv" -) - -const ( - // undefinedTextAlt is used if the spec/container text cannot be derived - undefinedTextAlt = "undefined" -) - -// ginkgoMetadata holds useful bits of information for every entry in the outline -type ginkgoMetadata struct { - // Name is the spec or container function name, e.g. `Describe` or `It` - Name string `json:"name"` - - // Text is the `text` argument passed to specs, and some containers - Text string `json:"text"` - - // Start is the position of first character of the spec or container block - Start int `json:"start"` - - // End is the position of first character immediately after the spec or container block - End int `json:"end"` - - Spec bool `json:"spec"` - Focused bool `json:"focused"` - Pending bool `json:"pending"` -} - -// ginkgoNode is used to construct the outline as a tree -type ginkgoNode struct { - ginkgoMetadata - Nodes []*ginkgoNode `json:"nodes"` -} - -type walkFunc func(n *ginkgoNode) - -func (n *ginkgoNode) PreOrder(f walkFunc) { - f(n) - for _, m := range n.Nodes { - m.PreOrder(f) - } -} - -func (n *ginkgoNode) PostOrder(f walkFunc) { - for _, m := range n.Nodes { - m.PostOrder(f) - } - f(n) -} - -func (n *ginkgoNode) Walk(pre, post walkFunc) { - pre(n) - for _, m := range n.Nodes { - m.Walk(pre, post) - } - post(n) -} - -// PropagateInheritedProperties propagates the Pending and Focused properties -// through the subtree rooted at n. -func (n *ginkgoNode) PropagateInheritedProperties() { - n.PreOrder(func(thisNode *ginkgoNode) { - for _, descendantNode := range thisNode.Nodes { - if thisNode.Pending { - descendantNode.Pending = true - descendantNode.Focused = false - } - if thisNode.Focused && !descendantNode.Pending { - descendantNode.Focused = true - } - } - }) -} - -// BackpropagateUnfocus propagates the Focused property through the subtree -// rooted at n. It applies the rule described in the Ginkgo docs: -// > Nested programmatically focused specs follow a simple rule: if a -// > leaf-node is marked focused, any of its ancestor nodes that are marked -// > focus will be unfocused. -func (n *ginkgoNode) BackpropagateUnfocus() { - focusedSpecInSubtreeStack := []bool{} - n.PostOrder(func(thisNode *ginkgoNode) { - if thisNode.Spec { - focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) - return - } - focusedSpecInSubtree := false - for range thisNode.Nodes { - focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] - focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] - } - focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) - if focusedSpecInSubtree { - thisNode.Focused = false - } - }) - -} - -func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { - switch ex := ce.Fun.(type) { - case *ast.Ident: - return "", ex.Name, true - case *ast.SelectorExpr: - pkgID, ok := ex.X.(*ast.Ident) - if !ok { - return "", "", false - } - // A package identifier is top-level, so Obj must be nil - if pkgID.Obj != nil { - return "", "", false - } - if ex.Sel == nil { - return "", "", false - } - return pkgID.Name, ex.Sel.Name, true - default: - return "", "", false - } -} - -// absoluteOffsetsForNode derives the absolute character offsets of the node start and -// end positions. -func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { - return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset -} - -// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree -// corresponding to a Ginkgo container or spec. -func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName, tablePackageName *string) (*ginkgoNode, bool) { - packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) - if !ok { - return nil, false - } - - n := ginkgoNode{} - n.Name = identName - n.Start, n.End = absoluteOffsetsForNode(fset, ce) - n.Nodes = make([]*ginkgoNode, 0) - switch identName { - case "It", "Measure", "Specify": - n.Spec = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "Entry": - n.Spec = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, tablePackageName != nil && *tablePackageName == packageName - case "FIt", "FMeasure", "FSpecify": - n.Spec = true - n.Focused = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "FEntry": - n.Spec = true - n.Focused = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, tablePackageName != nil && *tablePackageName == packageName - case "PIt", "PMeasure", "PSpecify", "XIt", "XMeasure", "XSpecify": - n.Spec = true - n.Pending = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "PEntry", "XEntry": - n.Spec = true - n.Pending = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, tablePackageName != nil && *tablePackageName == packageName - case "Context", "Describe", "When": - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "DescribeTable": - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, tablePackageName != nil && *tablePackageName == packageName - case "FContext", "FDescribe", "FWhen": - n.Focused = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "FDescribeTable": - n.Focused = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, tablePackageName != nil && *tablePackageName == packageName - case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen": - n.Pending = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "PDescribeTable", "XDescribeTable": - n.Pending = true - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, tablePackageName != nil && *tablePackageName == packageName - case "By": - n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "AfterEach", "BeforeEach": - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "JustAfterEach", "JustBeforeEach": - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "AfterSuite", "BeforeSuite": - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": - return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName - default: - return nil, false - } -} - -// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or -// container. If it cannot derive it, it returns the alt text. -func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { - text, defined := textFromCallExpr(ce) - if !defined { - return alt - } - return text -} - -// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If -// it cannot derive it, it returns false. -func textFromCallExpr(ce *ast.CallExpr) (string, bool) { - if len(ce.Args) < 1 { - return "", false - } - text, ok := ce.Args[0].(*ast.BasicLit) - if !ok { - return "", false - } - switch text.Kind { - case token.CHAR, token.STRING: - // For token.CHAR and token.STRING, Value is quoted - unquoted, err := strconv.Unquote(text.Value) - if err != nil { - // If unquoting fails, just use the raw Value - return text.Value, true - } - return unquoted, true - default: - return text.Value, true - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go deleted file mode 100644 index 4328ab39..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/outline/import.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Most of the required functions were available in the -// "golang.org/x/tools/go/ast/astutil" package, but not exported. -// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go - -package outline - -import ( - "go/ast" - "strconv" - "strings" -) - -// packageNameForImport returns the package name for the package. If the package -// is not imported, it returns nil. "Package name" refers to `pkgname` in the -// call expression `pkgname.ExportedIdentifier`. Examples: -// (import path not found) -> nil -// "import example.com/pkg/foo" -> "foo" -// "import fooalias example.com/pkg/foo" -> "fooalias" -// "import . example.com/pkg/foo" -> "" -func packageNameForImport(f *ast.File, path string) *string { - spec := importSpec(f, path) - if spec == nil { - return nil - } - name := spec.Name.String() - if name == "" { - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } - } - if name == "." { - name = "" - } - return &name -} - -// importSpec returns the import spec if f imports path, -// or nil otherwise. -func importSpec(f *ast.File, path string) *ast.ImportSpec { - for _, s := range f.Imports { - if importPath(s) == path { - return s - } - } - return nil -} - -// importPath returns the unquoted import path of s, -// or "" if the path is not properly quoted. -func importPath(s *ast.ImportSpec) string { - t, err := strconv.Unquote(s.Path.Value) - if err != nil { - return "" - } - return t -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go deleted file mode 100644 index 242e6a10..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/outline/outline.go +++ /dev/null @@ -1,107 +0,0 @@ -package outline - -import ( - "encoding/json" - "fmt" - "go/ast" - "go/token" - "strings" - - "golang.org/x/tools/go/ast/inspector" -) - -const ( - // ginkgoImportPath is the well-known ginkgo import path - ginkgoImportPath = "github.com/onsi/ginkgo" - - // tableImportPath is the well-known table extension import path - tableImportPath = "github.com/onsi/ginkgo/extensions/table" -) - -// FromASTFile returns an outline for a Ginkgo test source file -func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { - ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) - tablePackageName := packageNameForImport(src, tableImportPath) - if ginkgoPackageName == nil && tablePackageName == nil { - return nil, fmt.Errorf("file does not import %q or %q", ginkgoImportPath, tableImportPath) - } - - root := ginkgoNode{} - stack := []*ginkgoNode{&root} - ispr := inspector.New([]*ast.File{src}) - ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { - if push { - // Pre-order traversal - ce, ok := node.(*ast.CallExpr) - if !ok { - // Because `Nodes` calls this function only when the node is an - // ast.CallExpr, this should never happen - panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) - } - gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName, tablePackageName) - if !ok { - // Node is not a Ginkgo spec or container, continue - return true - } - parent := stack[len(stack)-1] - parent.Nodes = append(parent.Nodes, gn) - stack = append(stack, gn) - return true - } - // Post-order traversal - start, end := absoluteOffsetsForNode(fset, node) - lastVisitedGinkgoNode := stack[len(stack)-1] - if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { - // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue - return true - } - stack = stack[0 : len(stack)-1] - return true - }) - if len(root.Nodes) == 0 { - return &outline{[]*ginkgoNode{}}, nil - } - - // Derive the final focused property for all nodes. This must be done - // _before_ propagating the inherited focused property. - root.BackpropagateUnfocus() - // Now, propagate inherited properties, including focused and pending. - root.PropagateInheritedProperties() - - return &outline{root.Nodes}, nil -} - -type outline struct { - Nodes []*ginkgoNode `json:"nodes"` -} - -func (o *outline) MarshalJSON() ([]byte, error) { - return json.Marshal(o.Nodes) -} - -// String returns a CSV-formatted outline. Spec or container are output in -// depth-first order. -func (o *outline) String() string { - return o.StringIndent(0) -} - -// StringIndent returns a CSV-formated outline, but every line is indented by -// one 'width' of spaces for every level of nesting. -func (o *outline) StringIndent(width int) string { - var b strings.Builder - b.WriteString("Name,Text,Start,End,Spec,Focused,Pending\n") - - currentIndent := 0 - pre := func(n *ginkgoNode) { - b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) - b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending)) - currentIndent += width - } - post := func(n *ginkgoNode) { - currentIndent -= width - } - for _, n := range o.Nodes { - n.Walk(pre, post) - } - return b.String() -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go deleted file mode 100644 index 96ca7ad2..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/outline_command.go +++ /dev/null @@ -1,95 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "fmt" - "go/parser" - "go/token" - "os" - - "github.com/onsi/ginkgo/ginkgo/outline" -) - -const ( - // indentWidth is the width used by the 'indent' output - indentWidth = 4 - // stdinAlias is a portable alias for stdin. This convention is used in - // other CLIs, e.g., kubectl. - stdinAlias = "-" - usageCommand = "ginkgo outline " -) - -func BuildOutlineCommand() *Command { - const defaultFormat = "csv" - var format string - flagSet := flag.NewFlagSet("outline", flag.ExitOnError) - flagSet.StringVar(&format, "format", defaultFormat, "Format of outline. Accepted: 'csv', 'indent', 'json'") - return &Command{ - Name: "outline", - FlagSet: flagSet, - UsageCommand: usageCommand, - Usage: []string{ - "Create an outline of Ginkgo symbols for a file", - "To read from stdin, use: `ginkgo outline -`", - "Accepts the following flags:", - }, - Command: func(args []string, additionalArgs []string) { - outlineFile(args, format) - }, - } -} - -func outlineFile(args []string, format string) { - if len(args) != 1 { - println(fmt.Sprintf("usage: %s", usageCommand)) - os.Exit(1) - } - - filename := args[0] - var src *os.File - if filename == stdinAlias { - src = os.Stdin - } else { - var err error - src, err = os.Open(filename) - if err != nil { - println(fmt.Sprintf("error opening file: %s", err)) - os.Exit(1) - } - } - - fset := token.NewFileSet() - - parsedSrc, err := parser.ParseFile(fset, filename, src, 0) - if err != nil { - println(fmt.Sprintf("error parsing source: %s", err)) - os.Exit(1) - } - - o, err := outline.FromASTFile(fset, parsedSrc) - if err != nil { - println(fmt.Sprintf("error creating outline: %s", err)) - os.Exit(1) - } - - var oerr error - switch format { - case "csv": - _, oerr = fmt.Print(o) - case "indent": - _, oerr = fmt.Print(o.StringIndent(indentWidth)) - case "json": - b, err := json.Marshal(o) - if err != nil { - println(fmt.Sprintf("error marshalling to json: %s", err)) - } - _, oerr = fmt.Println(string(b)) - default: - complainAndQuit(fmt.Sprintf("format %s not accepted", format)) - } - if oerr != nil { - println(fmt.Sprintf("error writing outline: %s", oerr)) - os.Exit(1) - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go deleted file mode 100644 index c7f80d14..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/run_command.go +++ /dev/null @@ -1,315 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "math/rand" - "os" - "regexp" - "runtime" - "strings" - "time" - - "io/ioutil" - "path/filepath" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/ginkgo/interrupthandler" - "github.com/onsi/ginkgo/ginkgo/testrunner" - colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" - "github.com/onsi/ginkgo/types" -) - -func BuildRunCommand() *Command { - commandFlags := NewRunCommandFlags(flag.NewFlagSet("ginkgo", flag.ExitOnError)) - notifier := NewNotifier(commandFlags) - interruptHandler := interrupthandler.NewInterruptHandler() - runner := &SpecRunner{ - commandFlags: commandFlags, - notifier: notifier, - interruptHandler: interruptHandler, - suiteRunner: NewSuiteRunner(notifier, interruptHandler), - } - - return &Command{ - Name: "", - FlagSet: commandFlags.FlagSet, - UsageCommand: "ginkgo -- ", - Usage: []string{ - "Run the tests in the passed in (or the package in the current directory if left blank).", - "Any arguments after -- will be passed to the test.", - "Accepts the following flags:", - }, - Command: runner.RunSpecs, - } -} - -type SpecRunner struct { - commandFlags *RunWatchAndBuildCommandFlags - notifier *Notifier - interruptHandler *interrupthandler.InterruptHandler - suiteRunner *SuiteRunner -} - -func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { - r.commandFlags.computeNodes() - r.notifier.VerifyNotificationsAreAvailable() - - deprecationTracker := types.NewDeprecationTracker() - - if r.commandFlags.ParallelStream && (runtime.GOOS != "windows") { - deprecationTracker.TrackDeprecation(types.Deprecation{ - Message: "--stream is deprecated and will be removed in Ginkgo 2.0", - DocLink: "removed--stream", - Version: "1.16.0", - }) - } - - if r.commandFlags.Notify { - deprecationTracker.TrackDeprecation(types.Deprecation{ - Message: "--notify is deprecated and will be removed in Ginkgo 2.0", - DocLink: "removed--notify", - Version: "1.16.0", - }) - } - - if deprecationTracker.DidTrackDeprecations() { - fmt.Fprintln(colorable.NewColorableStderr(), deprecationTracker.DeprecationsReport()) - } - - suites, skippedPackages := findSuites(args, r.commandFlags.Recurse, r.commandFlags.SkipPackage, true) - if len(skippedPackages) > 0 { - fmt.Println("Will skip:") - for _, skippedPackage := range skippedPackages { - fmt.Println(" " + skippedPackage) - } - } - - if len(skippedPackages) > 0 && len(suites) == 0 { - fmt.Println("All tests skipped! Exiting...") - os.Exit(0) - } - - if len(suites) == 0 { - complainAndQuit("Found no test suites") - } - - r.ComputeSuccinctMode(len(suites)) - - t := time.Now() - - runners := []*testrunner.TestRunner{} - for _, suite := range suites { - runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Timeout, r.commandFlags.GoOpts, additionalArgs)) - } - - numSuites := 0 - runResult := testrunner.PassingRunResult() - if r.commandFlags.UntilItFails { - iteration := 0 - for { - r.UpdateSeed() - randomizedRunners := r.randomizeOrder(runners) - runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil) - iteration++ - - if r.interruptHandler.WasInterrupted() { - break - } - - if runResult.Passed { - fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration, orcMessage(iteration)) - } else { - fmt.Printf("\nTests failed on attempt #%d\n\n", iteration) - break - } - } - } else { - randomizedRunners := r.randomizeOrder(runners) - runResult, numSuites = r.suiteRunner.RunSuites(randomizedRunners, r.commandFlags.NumCompilers, r.commandFlags.KeepGoing, nil) - } - - for _, runner := range runners { - runner.CleanUp() - } - - if r.isInCoverageMode() { - if r.getOutputDir() != "" { - // If coverprofile is set, combine coverages - if r.getCoverprofile() != "" { - if err := r.combineCoverprofiles(runners); err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - } else { - // Just move them - r.moveCoverprofiles(runners) - } - } - } - - fmt.Printf("\nGinkgo ran %d %s in %s\n", numSuites, pluralizedWord("suite", "suites", numSuites), time.Since(t)) - - if runResult.Passed { - if runResult.HasProgrammaticFocus && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { - fmt.Printf("Test Suite Passed\n") - fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) - os.Exit(types.GINKGO_FOCUS_EXIT_CODE) - } else { - fmt.Printf("Test Suite Passed\n") - os.Exit(0) - } - } else { - fmt.Printf("Test Suite Failed\n") - os.Exit(1) - } -} - -// Moves all generated profiles to specified directory -func (r *SpecRunner) moveCoverprofiles(runners []*testrunner.TestRunner) { - for _, runner := range runners { - _, filename := filepath.Split(runner.CoverageFile) - err := os.Rename(runner.CoverageFile, filepath.Join(r.getOutputDir(), filename)) - - if err != nil { - fmt.Printf("Unable to move coverprofile %s, %v\n", runner.CoverageFile, err) - return - } - } -} - -// Combines all generated profiles in the specified directory -func (r *SpecRunner) combineCoverprofiles(runners []*testrunner.TestRunner) error { - - path, _ := filepath.Abs(r.getOutputDir()) - if !fileExists(path) { - return fmt.Errorf("Unable to create combined profile, outputdir does not exist: %s", r.getOutputDir()) - } - - fmt.Println("path is " + path) - - combined, err := os.OpenFile( - filepath.Join(path, r.getCoverprofile()), - os.O_WRONLY|os.O_CREATE, - 0666, - ) - - if err != nil { - fmt.Printf("Unable to create combined profile, %v\n", err) - return nil // non-fatal error - } - - modeRegex := regexp.MustCompile(`^mode: .*\n`) - for index, runner := range runners { - contents, err := ioutil.ReadFile(runner.CoverageFile) - - if err != nil { - fmt.Printf("Unable to read coverage file %s to combine, %v\n", runner.CoverageFile, err) - return nil // non-fatal error - } - - // remove the cover mode line from every file - // except the first one - if index > 0 { - contents = modeRegex.ReplaceAll(contents, []byte{}) - } - - _, err = combined.Write(contents) - - // Add a newline to the end of every file if missing. - if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { - _, err = combined.Write([]byte("\n")) - } - - if err != nil { - fmt.Printf("Unable to append to coverprofile, %v\n", err) - return nil // non-fatal error - } - } - - fmt.Println("All profiles combined") - return nil -} - -func (r *SpecRunner) isInCoverageMode() bool { - opts := r.commandFlags.GoOpts - return *opts["cover"].(*bool) || *opts["coverpkg"].(*string) != "" || *opts["covermode"].(*string) != "" -} - -func (r *SpecRunner) getCoverprofile() string { - return *r.commandFlags.GoOpts["coverprofile"].(*string) -} - -func (r *SpecRunner) getOutputDir() string { - return *r.commandFlags.GoOpts["outputdir"].(*string) -} - -func (r *SpecRunner) ComputeSuccinctMode(numSuites int) { - if config.DefaultReporterConfig.Verbose { - config.DefaultReporterConfig.Succinct = false - return - } - - if numSuites == 1 { - return - } - - if numSuites > 1 && !r.commandFlags.wasSet("succinct") { - config.DefaultReporterConfig.Succinct = true - } -} - -func (r *SpecRunner) UpdateSeed() { - if !r.commandFlags.wasSet("seed") { - config.GinkgoConfig.RandomSeed = time.Now().Unix() - } -} - -func (r *SpecRunner) randomizeOrder(runners []*testrunner.TestRunner) []*testrunner.TestRunner { - if !r.commandFlags.RandomizeSuites { - return runners - } - - if len(runners) <= 1 { - return runners - } - - randomizedRunners := make([]*testrunner.TestRunner, len(runners)) - randomizer := rand.New(rand.NewSource(config.GinkgoConfig.RandomSeed)) - permutation := randomizer.Perm(len(runners)) - for i, j := range permutation { - randomizedRunners[i] = runners[j] - } - return randomizedRunners -} - -func orcMessage(iteration int) string { - if iteration < 10 { - return "" - } else if iteration < 30 { - return []string{ - "If at first you succeed...", - "...try, try again.", - "Looking good!", - "Still good...", - "I think your tests are fine....", - "Yep, still passing", - "Oh boy, here I go testin' again!", - "Even the gophers are getting bored", - "Did you try -race?", - "Maybe you should stop now?", - "I'm getting tired...", - "What if I just made you a sandwich?", - "Hit ^C, hit ^C, please hit ^C", - "Make it stop. Please!", - "Come on! Enough is enough!", - "Dave, this conversation can serve no purpose anymore. Goodbye.", - "Just what do you think you're doing, Dave? ", - "I, Sisyphus", - "Insanity: doing the same thing over and over again and expecting different results. -Einstein", - "I guess Einstein never tried to churn butter", - }[iteration-10] + "\n" - } else { - return "No, seriously... you can probably stop now.\n" - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go b/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go deleted file mode 100644 index e0994fc3..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/run_watch_and_build_command_flags.go +++ /dev/null @@ -1,169 +0,0 @@ -package main - -import ( - "flag" - "runtime" - - "time" - - "github.com/onsi/ginkgo/config" -) - -type RunWatchAndBuildCommandFlags struct { - Recurse bool - SkipPackage string - GoOpts map[string]interface{} - - //for run and watch commands - NumCPU int - NumCompilers int - ParallelStream bool - Notify bool - AfterSuiteHook string - AutoNodes bool - Timeout time.Duration - - //only for run command - KeepGoing bool - UntilItFails bool - RandomizeSuites bool - - //only for watch command - Depth int - WatchRegExp string - - FlagSet *flag.FlagSet -} - -const runMode = 1 -const watchMode = 2 -const buildMode = 3 - -func NewRunCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags { - c := &RunWatchAndBuildCommandFlags{ - FlagSet: flagSet, - } - c.flags(runMode) - return c -} - -func NewWatchCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags { - c := &RunWatchAndBuildCommandFlags{ - FlagSet: flagSet, - } - c.flags(watchMode) - return c -} - -func NewBuildCommandFlags(flagSet *flag.FlagSet) *RunWatchAndBuildCommandFlags { - c := &RunWatchAndBuildCommandFlags{ - FlagSet: flagSet, - } - c.flags(buildMode) - return c -} - -func (c *RunWatchAndBuildCommandFlags) wasSet(flagName string) bool { - wasSet := false - c.FlagSet.Visit(func(f *flag.Flag) { - if f.Name == flagName { - wasSet = true - } - }) - - return wasSet -} - -func (c *RunWatchAndBuildCommandFlags) computeNodes() { - if c.wasSet("nodes") { - return - } - if c.AutoNodes { - switch n := runtime.NumCPU(); { - case n <= 4: - c.NumCPU = n - default: - c.NumCPU = n - 1 - } - } -} - -func (c *RunWatchAndBuildCommandFlags) stringSlot(slot string) *string { - var opt string - c.GoOpts[slot] = &opt - return &opt -} - -func (c *RunWatchAndBuildCommandFlags) boolSlot(slot string) *bool { - var opt bool - c.GoOpts[slot] = &opt - return &opt -} - -func (c *RunWatchAndBuildCommandFlags) intSlot(slot string) *int { - var opt int - c.GoOpts[slot] = &opt - return &opt -} - -func (c *RunWatchAndBuildCommandFlags) flags(mode int) { - c.GoOpts = make(map[string]interface{}) - - onWindows := (runtime.GOOS == "windows") - - c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively.") - c.FlagSet.BoolVar(c.boolSlot("race"), "race", false, "Run tests with race detection enabled.") - c.FlagSet.BoolVar(c.boolSlot("cover"), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory.") - c.FlagSet.StringVar(c.stringSlot("coverpkg"), "coverpkg", "", "Run tests with coverage on the given external modules.") - c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.") - c.FlagSet.StringVar(c.stringSlot("tags"), "tags", "", "A list of build tags to consider satisfied during the build.") - c.FlagSet.StringVar(c.stringSlot("gcflags"), "gcflags", "", "Arguments to pass on each go tool compile invocation.") - c.FlagSet.StringVar(c.stringSlot("covermode"), "covermode", "", "Set the mode for coverage analysis.") - c.FlagSet.BoolVar(c.boolSlot("a"), "a", false, "Force rebuilding of packages that are already up-to-date.") - c.FlagSet.BoolVar(c.boolSlot("n"), "n", false, "Have `go test` print the commands but do not run them.") - c.FlagSet.BoolVar(c.boolSlot("msan"), "msan", false, "Enable interoperation with memory sanitizer.") - c.FlagSet.BoolVar(c.boolSlot("x"), "x", false, "Have `go test` print the commands.") - c.FlagSet.BoolVar(c.boolSlot("work"), "work", false, "Print the name of the temporary work directory and do not delete it when exiting.") - c.FlagSet.StringVar(c.stringSlot("asmflags"), "asmflags", "", "Arguments to pass on each go tool asm invocation.") - c.FlagSet.StringVar(c.stringSlot("buildmode"), "buildmode", "", "Build mode to use. See 'go help buildmode' for more.") - c.FlagSet.StringVar(c.stringSlot("mod"), "mod", "", "Go module control. See 'go help modules' for more.") - c.FlagSet.StringVar(c.stringSlot("compiler"), "compiler", "", "Name of compiler to use, as in runtime.Compiler (gccgo or gc).") - c.FlagSet.StringVar(c.stringSlot("gccgoflags"), "gccgoflags", "", "Arguments to pass on each gccgo compiler/linker invocation.") - c.FlagSet.StringVar(c.stringSlot("installsuffix"), "installsuffix", "", "A suffix to use in the name of the package installation directory.") - c.FlagSet.StringVar(c.stringSlot("ldflags"), "ldflags", "", "Arguments to pass on each go tool link invocation.") - c.FlagSet.BoolVar(c.boolSlot("linkshared"), "linkshared", false, "Link against shared libraries previously created with -buildmode=shared.") - c.FlagSet.StringVar(c.stringSlot("pkgdir"), "pkgdir", "", "install and load all packages from the given dir instead of the usual locations.") - c.FlagSet.StringVar(c.stringSlot("toolexec"), "toolexec", "", "a program to use to invoke toolchain programs like vet and asm.") - c.FlagSet.IntVar(c.intSlot("blockprofilerate"), "blockprofilerate", 1, "Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with the given value.") - c.FlagSet.StringVar(c.stringSlot("coverprofile"), "coverprofile", "", "Write a coverage profile to the specified file after all tests have passed.") - c.FlagSet.StringVar(c.stringSlot("cpuprofile"), "cpuprofile", "", "Write a CPU profile to the specified file before exiting.") - c.FlagSet.StringVar(c.stringSlot("memprofile"), "memprofile", "", "Write a memory profile to the specified file after all tests have passed.") - c.FlagSet.IntVar(c.intSlot("memprofilerate"), "memprofilerate", 0, "Enable more precise (and expensive) memory profiles by setting runtime.MemProfileRate.") - c.FlagSet.StringVar(c.stringSlot("outputdir"), "outputdir", "", "Place output files from profiling in the specified directory.") - c.FlagSet.BoolVar(c.boolSlot("requireSuite"), "requireSuite", false, "Fail if there are ginkgo tests in a directory but no test suite (missing RunSpecs)") - c.FlagSet.StringVar(c.stringSlot("vet"), "vet", "", "Configure the invocation of 'go vet' to use the comma-separated list of vet checks. If list is 'off', 'go test' does not run 'go vet' at all.") - - if mode == runMode || mode == watchMode { - config.Flags(c.FlagSet, "", false) - c.FlagSet.IntVar(&(c.NumCPU), "nodes", 1, "The number of parallel test nodes to run") - c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)") - c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes") - c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging") - if !onWindows { - c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes") - } - c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes") - c.FlagSet.DurationVar(&(c.Timeout), "timeout", 24*time.Hour, "Suite fails if it does not complete within the specified timeout") - } - - if mode == runMode { - c.FlagSet.BoolVar(&(c.KeepGoing), "keepGoing", false, "When true, failures from earlier test suites do not prevent later test suites from running") - c.FlagSet.BoolVar(&(c.UntilItFails), "untilItFails", false, "When true, Ginkgo will keep rerunning tests until a failure occurs") - c.FlagSet.BoolVar(&(c.RandomizeSuites), "randomizeSuites", false, "When true, Ginkgo will randomize the order in which test suites run") - } - - if mode == watchMode { - c.FlagSet.IntVar(&(c.Depth), "depth", 1, "Ginkgo will watch dependencies down to this depth in the dependency tree") - c.FlagSet.StringVar(&(c.WatchRegExp), "watchRegExp", `\.go$`, "Files matching this regular expression will be watched for changes") - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go deleted file mode 100644 index ab746d7e..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/suite_runner.go +++ /dev/null @@ -1,173 +0,0 @@ -package main - -import ( - "fmt" - "runtime" - "sync" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/ginkgo/interrupthandler" - "github.com/onsi/ginkgo/ginkgo/testrunner" - "github.com/onsi/ginkgo/ginkgo/testsuite" - colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" -) - -type compilationInput struct { - runner *testrunner.TestRunner - result chan compilationOutput -} - -type compilationOutput struct { - runner *testrunner.TestRunner - err error -} - -type SuiteRunner struct { - notifier *Notifier - interruptHandler *interrupthandler.InterruptHandler -} - -func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner { - return &SuiteRunner{ - notifier: notifier, - interruptHandler: interruptHandler, - } -} - -func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput { - //we return this to the consumer, it will return each runner in order as it compiles - compilationOutputs := make(chan compilationOutput, len(runners)) - - //an array of channels - the nth runner's compilation output is sent to the nth channel in this array - //we read from these channels in order to ensure we run the suites in order - orderedCompilationOutputs := []chan compilationOutput{} - for range runners { - orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1)) - } - - //we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel - //we prefill the channel then close it, this ensures we compile things in the correct order - workPool := make(chan compilationInput, len(runners)) - for i, runner := range runners { - workPool <- compilationInput{runner, orderedCompilationOutputs[i]} - } - close(workPool) - - //pick a reasonable numCompilers - if numCompilers == 0 { - numCompilers = runtime.NumCPU() - } - - //a WaitGroup to help us wait for all compilers to shut down - wg := &sync.WaitGroup{} - wg.Add(numCompilers) - - //spin up the concurrent compilers - for i := 0; i < numCompilers; i++ { - go func() { - defer wg.Done() - for input := range workPool { - if r.interruptHandler.WasInterrupted() { - return - } - - if willCompile != nil { - willCompile(input.runner.Suite) - } - - //We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness... - var err error - retries := 0 - for retries <= 5 { - if r.interruptHandler.WasInterrupted() { - return - } - if err = input.runner.Compile(); err == nil { - break - } - retries++ - } - - input.result <- compilationOutput{input.runner, err} - } - }() - } - - //read from the compilation output channels *in order* and send them to the caller - //close the compilationOutputs channel to tell the caller we're done - go func() { - defer close(compilationOutputs) - for _, orderedCompilationOutput := range orderedCompilationOutputs { - select { - case compilationOutput := <-orderedCompilationOutput: - compilationOutputs <- compilationOutput - case <-r.interruptHandler.C: - //interrupt detected, wait for the compilers to shut down then bail - //this ensure we clean up after ourselves as we don't leave any compilation processes running - wg.Wait() - return - } - } - }() - - return compilationOutputs -} - -func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) { - runResult := testrunner.PassingRunResult() - - compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile) - - numSuitesThatRan := 0 - suitesThatFailed := []testsuite.TestSuite{} - for compilationOutput := range compilationOutputs { - if compilationOutput.err != nil { - fmt.Print(compilationOutput.err.Error()) - } - numSuitesThatRan++ - suiteRunResult := testrunner.FailingRunResult() - if compilationOutput.err == nil { - suiteRunResult = compilationOutput.runner.Run() - } - r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed) - r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed) - runResult = runResult.Merge(suiteRunResult) - if !suiteRunResult.Passed { - suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite) - if !keepGoing { - break - } - } - if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct { - fmt.Println("") - } - } - - if keepGoing && !runResult.Passed { - r.listFailedSuites(suitesThatFailed) - } - - return runResult, numSuitesThatRan -} - -func (r *SuiteRunner) listFailedSuites(suitesThatFailed []testsuite.TestSuite) { - fmt.Println("") - fmt.Println("There were failures detected in the following suites:") - - maxPackageNameLength := 0 - for _, suite := range suitesThatFailed { - if len(suite.PackageName) > maxPackageNameLength { - maxPackageNameLength = len(suite.PackageName) - } - } - - packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) - - for _, suite := range suitesThatFailed { - if config.DefaultReporterConfig.NoColor { - fmt.Printf("\t"+packageNameFormatter+" %s\n", suite.PackageName, suite.Path) - } else { - fmt.Fprintf(colorable.NewColorableStdout(), "\t%s"+packageNameFormatter+"%s %s%s%s\n", redColor, suite.PackageName, defaultStyle, lightGrayColor, suite.Path, defaultStyle) - } - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go deleted file mode 100644 index 3b1a238c..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build go1.10 - -package testrunner - -var ( - buildArgs = []string{"test", "-c"} -) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go deleted file mode 100644 index 14d70dbc..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/build_args_old.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !go1.10 - -package testrunner - -var ( - buildArgs = []string{"test", "-c", "-i"} -) diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go deleted file mode 100644 index a73a6e37..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/log_writer.go +++ /dev/null @@ -1,52 +0,0 @@ -package testrunner - -import ( - "bytes" - "fmt" - "io" - "log" - "strings" - "sync" -) - -type logWriter struct { - buffer *bytes.Buffer - lock *sync.Mutex - log *log.Logger -} - -func newLogWriter(target io.Writer, node int) *logWriter { - return &logWriter{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - log: log.New(target, fmt.Sprintf("[%d] ", node), 0), - } -} - -func (w *logWriter) Write(data []byte) (n int, err error) { - w.lock.Lock() - defer w.lock.Unlock() - - w.buffer.Write(data) - contents := w.buffer.String() - - lines := strings.Split(contents, "\n") - for _, line := range lines[0 : len(lines)-1] { - w.log.Println(line) - } - - w.buffer.Reset() - w.buffer.Write([]byte(lines[len(lines)-1])) - return len(data), nil -} - -func (w *logWriter) Close() error { - w.lock.Lock() - defer w.lock.Unlock() - - if w.buffer.Len() > 0 { - w.log.Println(w.buffer.String()) - } - - return nil -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go deleted file mode 100644 index 5d472acb..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/run_result.go +++ /dev/null @@ -1,27 +0,0 @@ -package testrunner - -type RunResult struct { - Passed bool - HasProgrammaticFocus bool -} - -func PassingRunResult() RunResult { - return RunResult{ - Passed: true, - HasProgrammaticFocus: false, - } -} - -func FailingRunResult() RunResult { - return RunResult{ - Passed: false, - HasProgrammaticFocus: false, - } -} - -func (r RunResult) Merge(o RunResult) RunResult { - return RunResult{ - Passed: r.Passed && o.Passed, - HasProgrammaticFocus: r.HasProgrammaticFocus || o.HasProgrammaticFocus, - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go deleted file mode 100644 index 66c0f06f..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go +++ /dev/null @@ -1,554 +0,0 @@ -package testrunner - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/ginkgo/testsuite" - "github.com/onsi/ginkgo/internal/remote" - "github.com/onsi/ginkgo/reporters/stenographer" - colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" - "github.com/onsi/ginkgo/types" -) - -type TestRunner struct { - Suite testsuite.TestSuite - - compiled bool - compilationTargetPath string - - numCPU int - parallelStream bool - timeout time.Duration - goOpts map[string]interface{} - additionalArgs []string - stderr *bytes.Buffer - - CoverageFile string -} - -func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, timeout time.Duration, goOpts map[string]interface{}, additionalArgs []string) *TestRunner { - runner := &TestRunner{ - Suite: suite, - numCPU: numCPU, - parallelStream: parallelStream, - goOpts: goOpts, - additionalArgs: additionalArgs, - timeout: timeout, - stderr: new(bytes.Buffer), - } - - if !suite.Precompiled { - runner.compilationTargetPath, _ = filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) - } - - return runner -} - -func (t *TestRunner) Compile() error { - return t.CompileTo(t.compilationTargetPath) -} - -func (t *TestRunner) BuildArgs(path string) []string { - args := make([]string, len(buildArgs), len(buildArgs)+3) - copy(args, buildArgs) - args = append(args, "-o", path, t.Suite.Path) - - if t.getCoverMode() != "" { - args = append(args, "-cover", fmt.Sprintf("-covermode=%s", t.getCoverMode())) - } else { - if t.shouldCover() || t.getCoverPackage() != "" { - args = append(args, "-cover", "-covermode=atomic") - } - } - - boolOpts := []string{ - "a", - "n", - "msan", - "race", - "x", - "work", - "linkshared", - } - - for _, opt := range boolOpts { - if s, found := t.goOpts[opt].(*bool); found && *s { - args = append(args, fmt.Sprintf("-%s", opt)) - } - } - - intOpts := []string{ - "memprofilerate", - "blockprofilerate", - } - - for _, opt := range intOpts { - if s, found := t.goOpts[opt].(*int); found { - args = append(args, fmt.Sprintf("-%s=%d", opt, *s)) - } - } - - stringOpts := []string{ - "asmflags", - "buildmode", - "compiler", - "gccgoflags", - "installsuffix", - "ldflags", - "pkgdir", - "toolexec", - "coverprofile", - "cpuprofile", - "memprofile", - "outputdir", - "coverpkg", - "tags", - "gcflags", - "vet", - "mod", - } - - for _, opt := range stringOpts { - if s, found := t.goOpts[opt].(*string); found && *s != "" { - args = append(args, fmt.Sprintf("-%s=%s", opt, *s)) - } - } - return args -} - -func (t *TestRunner) CompileTo(path string) error { - if t.compiled { - return nil - } - - if t.Suite.Precompiled { - return nil - } - - args := t.BuildArgs(path) - cmd := exec.Command("go", args...) - - output, err := cmd.CombinedOutput() - - if err != nil { - if len(output) > 0 { - return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, output) - } - return fmt.Errorf("Failed to compile %s", t.Suite.PackageName) - } - - if len(output) > 0 { - fmt.Println(string(output)) - } - - if !fileExists(path) { - compiledFile := t.Suite.PackageName + ".test" - if fileExists(compiledFile) { - // seems like we are on an old go version that does not support the -o flag on go test - // move the compiled test file to the desired location by hand - err = os.Rename(compiledFile, path) - if err != nil { - // We cannot move the file, perhaps because the source and destination - // are on different partitions. We can copy the file, however. - err = copyFile(compiledFile, path) - if err != nil { - return fmt.Errorf("Failed to copy compiled file: %s", err) - } - } - } else { - return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path) - } - } - - t.compiled = true - - return nil -} - -func fileExists(path string) bool { - _, err := os.Stat(path) - return err == nil || !os.IsNotExist(err) -} - -// copyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all it's contents will be replaced by the contents -// of the source file. -func copyFile(src, dst string) error { - srcInfo, err := os.Stat(src) - if err != nil { - return err - } - mode := srcInfo.Mode() - - in, err := os.Open(src) - if err != nil { - return err - } - - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return err - } - - defer func() { - closeErr := out.Close() - if err == nil { - err = closeErr - } - }() - - _, err = io.Copy(out, in) - if err != nil { - return err - } - - err = out.Sync() - if err != nil { - return err - } - - return out.Chmod(mode) -} - -func (t *TestRunner) Run() RunResult { - if t.Suite.IsGinkgo { - if t.numCPU > 1 { - if t.parallelStream { - return t.runAndStreamParallelGinkgoSuite() - } else { - return t.runParallelGinkgoSuite() - } - } else { - return t.runSerialGinkgoSuite() - } - } else { - return t.runGoTestSuite() - } -} - -func (t *TestRunner) CleanUp() { - if t.Suite.Precompiled { - return - } - os.Remove(t.compilationTargetPath) -} - -func (t *TestRunner) runSerialGinkgoSuite() RunResult { - ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) - return t.run(t.cmd(ginkgoArgs, os.Stdout, 1), nil) -} - -func (t *TestRunner) runGoTestSuite() RunResult { - return t.run(t.cmd([]string{"-test.v"}, os.Stdout, 1), nil) -} - -func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult { - completions := make(chan RunResult) - writers := make([]*logWriter, t.numCPU) - - server, err := remote.NewServer(t.numCPU) - if err != nil { - panic("Failed to start parallel spec server") - } - - server.Start() - defer server.Close() - - for cpu := 0; cpu < t.numCPU; cpu++ { - config.GinkgoConfig.ParallelNode = cpu + 1 - config.GinkgoConfig.ParallelTotal = t.numCPU - config.GinkgoConfig.SyncHost = server.Address() - - ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) - - writers[cpu] = newLogWriter(os.Stdout, cpu+1) - - cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) - - server.RegisterAlive(cpu+1, func() bool { - if cmd.ProcessState == nil { - return true - } - return !cmd.ProcessState.Exited() - }) - - go t.run(cmd, completions) - } - - res := PassingRunResult() - - for cpu := 0; cpu < t.numCPU; cpu++ { - res = res.Merge(<-completions) - } - - for _, writer := range writers { - writer.Close() - } - - os.Stdout.Sync() - - if t.shouldCombineCoverprofiles() { - t.combineCoverprofiles() - } - - return res -} - -func (t *TestRunner) runParallelGinkgoSuite() RunResult { - result := make(chan bool) - completions := make(chan RunResult) - writers := make([]*logWriter, t.numCPU) - reports := make([]*bytes.Buffer, t.numCPU) - - stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout()) - aggregator := remote.NewAggregator(t.numCPU, result, config.DefaultReporterConfig, stenographer) - - server, err := remote.NewServer(t.numCPU) - if err != nil { - panic("Failed to start parallel spec server") - } - server.RegisterReporters(aggregator) - server.Start() - defer server.Close() - - for cpu := 0; cpu < t.numCPU; cpu++ { - config.GinkgoConfig.ParallelNode = cpu + 1 - config.GinkgoConfig.ParallelTotal = t.numCPU - config.GinkgoConfig.SyncHost = server.Address() - config.GinkgoConfig.StreamHost = server.Address() - - ginkgoArgs := config.BuildFlagArgs("ginkgo", config.GinkgoConfig, config.DefaultReporterConfig) - - reports[cpu] = &bytes.Buffer{} - writers[cpu] = newLogWriter(reports[cpu], cpu+1) - - cmd := t.cmd(ginkgoArgs, writers[cpu], cpu+1) - - server.RegisterAlive(cpu+1, func() bool { - if cmd.ProcessState == nil { - return true - } - return !cmd.ProcessState.Exited() - }) - - go t.run(cmd, completions) - } - - res := PassingRunResult() - - for cpu := 0; cpu < t.numCPU; cpu++ { - res = res.Merge(<-completions) - } - - //all test processes are done, at this point - //we should be able to wait for the aggregator to tell us that it's done - - select { - case <-result: - fmt.Println("") - case <-time.After(time.Second): - //the aggregator never got back to us! something must have gone wrong - fmt.Println(` - ------------------------------------------------------------------- - | | - | Ginkgo timed out waiting for all parallel nodes to report back! | - | | - -------------------------------------------------------------------`) - fmt.Println("\n", t.Suite.PackageName, "timed out. path:", t.Suite.Path) - os.Stdout.Sync() - - for _, writer := range writers { - writer.Close() - } - - for _, report := range reports { - fmt.Print(report.String()) - } - - os.Stdout.Sync() - } - - if t.shouldCombineCoverprofiles() { - t.combineCoverprofiles() - } - - return res -} - -const CoverProfileSuffix = ".coverprofile" - -func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd { - args := []string{"--test.timeout=" + t.timeout.String()} - - coverProfile := t.getCoverProfile() - - if t.shouldCombineCoverprofiles() { - - testCoverProfile := "--test.coverprofile=" - - coverageFile := "" - // Set default name for coverage results - if coverProfile == "" { - coverageFile = t.Suite.PackageName + CoverProfileSuffix - } else { - coverageFile = coverProfile - } - - testCoverProfile += coverageFile - - t.CoverageFile = filepath.Join(t.Suite.Path, coverageFile) - - if t.numCPU > 1 { - testCoverProfile = fmt.Sprintf("%s.%d", testCoverProfile, node) - } - args = append(args, testCoverProfile) - } - - args = append(args, ginkgoArgs...) - args = append(args, t.additionalArgs...) - - path := t.compilationTargetPath - if t.Suite.Precompiled { - path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName))) - } - - cmd := exec.Command(path, args...) - - cmd.Dir = t.Suite.Path - cmd.Stderr = io.MultiWriter(stream, t.stderr) - cmd.Stdout = stream - - return cmd -} - -func (t *TestRunner) shouldCover() bool { - return *t.goOpts["cover"].(*bool) -} - -func (t *TestRunner) shouldRequireSuite() bool { - return *t.goOpts["requireSuite"].(*bool) -} - -func (t *TestRunner) getCoverProfile() string { - return *t.goOpts["coverprofile"].(*string) -} - -func (t *TestRunner) getCoverPackage() string { - return *t.goOpts["coverpkg"].(*string) -} - -func (t *TestRunner) getCoverMode() string { - return *t.goOpts["covermode"].(*string) -} - -func (t *TestRunner) shouldCombineCoverprofiles() bool { - return t.shouldCover() || t.getCoverPackage() != "" || t.getCoverMode() != "" -} - -func (t *TestRunner) run(cmd *exec.Cmd, completions chan RunResult) RunResult { - var res RunResult - - defer func() { - if completions != nil { - completions <- res - } - }() - - err := cmd.Start() - if err != nil { - fmt.Printf("Failed to run test suite!\n\t%s", err.Error()) - return res - } - - cmd.Wait() - - exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() - res.Passed = (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) - res.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) - - if strings.Contains(t.stderr.String(), "warning: no tests to run") { - if t.shouldRequireSuite() { - res.Passed = false - } - fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) - } - - return res -} - -func (t *TestRunner) combineCoverprofiles() { - profiles := []string{} - - coverProfile := t.getCoverProfile() - - for cpu := 1; cpu <= t.numCPU; cpu++ { - var coverFile string - if coverProfile == "" { - coverFile = fmt.Sprintf("%s%s.%d", t.Suite.PackageName, CoverProfileSuffix, cpu) - } else { - coverFile = fmt.Sprintf("%s.%d", coverProfile, cpu) - } - - coverFile = filepath.Join(t.Suite.Path, coverFile) - coverProfile, err := ioutil.ReadFile(coverFile) - os.Remove(coverFile) - - if err == nil { - profiles = append(profiles, string(coverProfile)) - } - } - - if len(profiles) != t.numCPU { - return - } - - lines := map[string]int{} - lineOrder := []string{} - for i, coverProfile := range profiles { - for _, line := range strings.Split(coverProfile, "\n")[1:] { - if len(line) == 0 { - continue - } - components := strings.Split(line, " ") - count, _ := strconv.Atoi(components[len(components)-1]) - prefix := strings.Join(components[0:len(components)-1], " ") - lines[prefix] += count - if i == 0 { - lineOrder = append(lineOrder, prefix) - } - } - } - - output := []string{"mode: atomic"} - for _, line := range lineOrder { - output = append(output, fmt.Sprintf("%s %d", line, lines[line])) - } - finalOutput := strings.Join(output, "\n") - - finalFilename := "" - - if coverProfile != "" { - finalFilename = coverProfile - } else { - finalFilename = fmt.Sprintf("%s%s", t.Suite.PackageName, CoverProfileSuffix) - } - - coverageFilepath := filepath.Join(t.Suite.Path, finalFilename) - ioutil.WriteFile(coverageFilepath, []byte(finalOutput), 0666) - - t.CoverageFile = coverageFilepath -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go deleted file mode 100644 index 9de8c2bb..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go +++ /dev/null @@ -1,115 +0,0 @@ -package testsuite - -import ( - "errors" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" -) - -type TestSuite struct { - Path string - PackageName string - IsGinkgo bool - Precompiled bool -} - -func PrecompiledTestSuite(path string) (TestSuite, error) { - info, err := os.Stat(path) - if err != nil { - return TestSuite{}, err - } - - if info.IsDir() { - return TestSuite{}, errors.New("this is a directory, not a file") - } - - if filepath.Ext(path) != ".test" { - return TestSuite{}, errors.New("this is not a .test binary") - } - - if info.Mode()&0111 == 0 { - return TestSuite{}, errors.New("this is not executable") - } - - dir := relPath(filepath.Dir(path)) - packageName := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) - - return TestSuite{ - Path: dir, - PackageName: packageName, - IsGinkgo: true, - Precompiled: true, - }, nil -} - -func SuitesInDir(dir string, recurse bool) []TestSuite { - suites := []TestSuite{} - - if vendorExperimentCheck(dir) { - return suites - } - - files, _ := ioutil.ReadDir(dir) - re := regexp.MustCompile(`^[^._].*_test\.go$`) - for _, file := range files { - if !file.IsDir() && re.Match([]byte(file.Name())) { - suites = append(suites, New(dir, files)) - break - } - } - - if recurse { - re = regexp.MustCompile(`^[._]`) - for _, file := range files { - if file.IsDir() && !re.Match([]byte(file.Name())) { - suites = append(suites, SuitesInDir(dir+"/"+file.Name(), recurse)...) - } - } - } - - return suites -} - -func relPath(dir string) string { - dir, _ = filepath.Abs(dir) - cwd, _ := os.Getwd() - dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) - - if string(dir[0]) != "." { - dir = "." + string(filepath.Separator) + dir - } - - return dir -} - -func New(dir string, files []os.FileInfo) TestSuite { - return TestSuite{ - Path: relPath(dir), - PackageName: packageNameForSuite(dir), - IsGinkgo: filesHaveGinkgoSuite(dir, files), - } -} - -func packageNameForSuite(dir string) string { - path, _ := filepath.Abs(dir) - return filepath.Base(path) -} - -func filesHaveGinkgoSuite(dir string, files []os.FileInfo) bool { - reTestFile := regexp.MustCompile(`_test\.go$`) - reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"`) - - for _, file := range files { - if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { - contents, _ := ioutil.ReadFile(dir + "/" + file.Name()) - if reGinkgo.Match(contents) { - return true - } - } - } - - return false -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go deleted file mode 100644 index 75f827a1..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go15.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !go1.6 - -package testsuite - -import ( - "os" - "path" -) - -// "This change will only be enabled if the go command is run with -// GO15VENDOREXPERIMENT=1 in its environment." -// c.f. the vendor-experiment proposal https://goo.gl/2ucMeC -func vendorExperimentCheck(dir string) bool { - vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT") - return vendorExperiment == "1" && path.Base(dir) == "vendor" -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go b/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go deleted file mode 100644 index 596e5e5c..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/testsuite/vendor_check_go16.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.6 - -package testsuite - -import ( - "os" - "path" -) - -// in 1.6 the vendor directory became the default go behaviour, so now -// check if its disabled. -func vendorExperimentCheck(dir string) bool { - vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT") - return vendorExperiment != "0" && path.Base(dir) == "vendor" -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go deleted file mode 100644 index d9dfb6e4..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/unfocus_command.go +++ /dev/null @@ -1,180 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/parser" - "go/token" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" -) - -func BuildUnfocusCommand() *Command { - return &Command{ - Name: "unfocus", - AltName: "blur", - FlagSet: flag.NewFlagSet("unfocus", flag.ExitOnError), - UsageCommand: "ginkgo unfocus (or ginkgo blur)", - Usage: []string{ - "Recursively unfocuses any focused tests under the current directory", - }, - Command: unfocusSpecs, - } -} - -func unfocusSpecs([]string, []string) { - fmt.Println("Scanning for focus...") - - goFiles := make(chan string) - go func() { - unfocusDir(goFiles, ".") - close(goFiles) - }() - - const workers = 10 - wg := sync.WaitGroup{} - wg.Add(workers) - - for i := 0; i < workers; i++ { - go func() { - for path := range goFiles { - unfocusFile(path) - } - wg.Done() - }() - } - - wg.Wait() -} - -func unfocusDir(goFiles chan string, path string) { - files, err := ioutil.ReadDir(path) - if err != nil { - fmt.Println(err.Error()) - return - } - - for _, f := range files { - switch { - case f.IsDir() && shouldProcessDir(f.Name()): - unfocusDir(goFiles, filepath.Join(path, f.Name())) - case !f.IsDir() && shouldProcessFile(f.Name()): - goFiles <- filepath.Join(path, f.Name()) - } - } -} - -func shouldProcessDir(basename string) bool { - return basename != "vendor" && !strings.HasPrefix(basename, ".") -} - -func shouldProcessFile(basename string) bool { - return strings.HasSuffix(basename, ".go") -} - -func unfocusFile(path string) { - data, err := ioutil.ReadFile(path) - if err != nil { - fmt.Printf("error reading file '%s': %s\n", path, err.Error()) - return - } - - ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), 0) - if err != nil { - fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) - return - } - - eliminations := scanForFocus(ast) - if len(eliminations) == 0 { - return - } - - fmt.Printf("...updating %s\n", path) - backup, err := writeBackup(path, data) - if err != nil { - fmt.Printf("error creating backup file: %s\n", err.Error()) - return - } - - if err := updateFile(path, data, eliminations); err != nil { - fmt.Printf("error writing file '%s': %s\n", path, err.Error()) - return - } - - os.Remove(backup) -} - -func writeBackup(path string, data []byte) (string, error) { - t, err := ioutil.TempFile(filepath.Dir(path), filepath.Base(path)) - - if err != nil { - return "", fmt.Errorf("error creating temporary file: %w", err) - } - defer t.Close() - - if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { - return "", fmt.Errorf("error writing to temporary file: %w", err) - } - - return t.Name(), nil -} - -func updateFile(path string, data []byte, eliminations []int64) error { - to, err := os.Create(path) - if err != nil { - return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) - } - defer to.Close() - - from := bytes.NewReader(data) - var cursor int64 - for _, byteToEliminate := range eliminations { - if _, err := io.CopyN(to, from, byteToEliminate-cursor); err != nil { - return fmt.Errorf("error copying data: %w", err) - } - - cursor = byteToEliminate + 1 - - if _, err := from.Seek(1, io.SeekCurrent); err != nil { - return fmt.Errorf("error seeking to position in buffer: %w", err) - } - } - - if _, err := io.Copy(to, from); err != nil { - return fmt.Errorf("error copying end data: %w", err) - } - - return nil -} - -func scanForFocus(file *ast.File) (eliminations []int64) { - ast.Inspect(file, func(n ast.Node) bool { - if c, ok := n.(*ast.CallExpr); ok { - if i, ok := c.Fun.(*ast.Ident); ok { - if isFocus(i.Name) { - eliminations = append(eliminations, int64(i.Pos()-file.Pos())) - } - } - } - - return true - }) - - return eliminations -} - -func isFocus(name string) bool { - switch name { - case "FDescribe", "FContext", "FIt", "FMeasure", "FDescribeTable", "FEntry", "FSpecify", "FWhen": - return true - default: - return false - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go deleted file mode 100644 index f586908e..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/version_command.go +++ /dev/null @@ -1,24 +0,0 @@ -package main - -import ( - "flag" - "fmt" - - "github.com/onsi/ginkgo/config" -) - -func BuildVersionCommand() *Command { - return &Command{ - Name: "version", - FlagSet: flag.NewFlagSet("version", flag.ExitOnError), - UsageCommand: "ginkgo version", - Usage: []string{ - "Print Ginkgo's version", - }, - Command: printVersion, - } -} - -func printVersion([]string, []string) { - fmt.Printf("Ginkgo Version %s\n", config.VERSION) -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go deleted file mode 100644 index 6c485c5b..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta.go +++ /dev/null @@ -1,22 +0,0 @@ -package watch - -import "sort" - -type Delta struct { - ModifiedPackages []string - - NewSuites []*Suite - RemovedSuites []*Suite - modifiedSuites []*Suite -} - -type DescendingByDelta []*Suite - -func (a DescendingByDelta) Len() int { return len(a) } -func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } - -func (d Delta) ModifiedSuites() []*Suite { - sort.Sort(DescendingByDelta(d.modifiedSuites)) - return d.modifiedSuites -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go deleted file mode 100644 index a628303d..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/delta_tracker.go +++ /dev/null @@ -1,75 +0,0 @@ -package watch - -import ( - "fmt" - - "regexp" - - "github.com/onsi/ginkgo/ginkgo/testsuite" -) - -type SuiteErrors map[testsuite.TestSuite]error - -type DeltaTracker struct { - maxDepth int - watchRegExp *regexp.Regexp - suites map[string]*Suite - packageHashes *PackageHashes -} - -func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { - return &DeltaTracker{ - maxDepth: maxDepth, - watchRegExp: watchRegExp, - packageHashes: NewPackageHashes(watchRegExp), - suites: map[string]*Suite{}, - } -} - -func (d *DeltaTracker) Delta(suites []testsuite.TestSuite) (delta Delta, errors SuiteErrors) { - errors = SuiteErrors{} - delta.ModifiedPackages = d.packageHashes.CheckForChanges() - - providedSuitePaths := map[string]bool{} - for _, suite := range suites { - providedSuitePaths[suite.Path] = true - } - - d.packageHashes.StartTrackingUsage() - - for _, suite := range d.suites { - if providedSuitePaths[suite.Suite.Path] { - if suite.Delta() > 0 { - delta.modifiedSuites = append(delta.modifiedSuites, suite) - } - } else { - delta.RemovedSuites = append(delta.RemovedSuites, suite) - } - } - - d.packageHashes.StopTrackingUsageAndPrune() - - for _, suite := range suites { - _, ok := d.suites[suite.Path] - if !ok { - s, err := NewSuite(suite, d.maxDepth, d.packageHashes) - if err != nil { - errors[suite] = err - continue - } - d.suites[suite.Path] = s - delta.NewSuites = append(delta.NewSuites, s) - } - } - - return delta, errors -} - -func (d *DeltaTracker) WillRun(suite testsuite.TestSuite) error { - s, ok := d.suites[suite.Path] - if !ok { - return fmt.Errorf("unknown suite %s", suite.Path) - } - - return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go deleted file mode 100644 index f5ddff30..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/dependencies.go +++ /dev/null @@ -1,92 +0,0 @@ -package watch - -import ( - "go/build" - "regexp" -) - -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) -var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing - -type Dependencies struct { - deps map[string]int -} - -func NewDependencies(path string, maxDepth int) (Dependencies, error) { - d := Dependencies{ - deps: map[string]int{}, - } - - if maxDepth == 0 { - return d, nil - } - - err := d.seedWithDepsForPackageAtPath(path) - if err != nil { - return d, err - } - - for depth := 1; depth < maxDepth; depth++ { - n := len(d.deps) - d.addDepsForDepth(depth) - if n == len(d.deps) { - break - } - } - - return d, nil -} - -func (d Dependencies) Dependencies() map[string]int { - return d.deps -} - -func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { - pkg, err := build.ImportDir(path, 0) - if err != nil { - return err - } - - d.resolveAndAdd(pkg.Imports, 1) - d.resolveAndAdd(pkg.TestImports, 1) - d.resolveAndAdd(pkg.XTestImports, 1) - - delete(d.deps, pkg.Dir) - return nil -} - -func (d Dependencies) addDepsForDepth(depth int) { - for dep, depDepth := range d.deps { - if depDepth == depth { - d.addDepsForDep(dep, depth+1) - } - } -} - -func (d Dependencies) addDepsForDep(dep string, depth int) { - pkg, err := build.ImportDir(dep, 0) - if err != nil { - println(err.Error()) - return - } - d.resolveAndAdd(pkg.Imports, depth) -} - -func (d Dependencies) resolveAndAdd(deps []string, depth int) { - for _, dep := range deps { - pkg, err := build.Import(dep, ".", 0) - if err != nil { - continue - } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { - d.addDepIfNotPresent(pkg.Dir, depth) - } - } -} - -func (d Dependencies) addDepIfNotPresent(dep string, depth int) { - _, ok := d.deps[dep] - if !ok { - d.deps[dep] = depth - } -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go deleted file mode 100644 index 67e2c1c3..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hash.go +++ /dev/null @@ -1,104 +0,0 @@ -package watch - -import ( - "fmt" - "io/ioutil" - "os" - "regexp" - "time" -) - -var goTestRegExp = regexp.MustCompile(`_test\.go$`) - -type PackageHash struct { - CodeModifiedTime time.Time - TestModifiedTime time.Time - Deleted bool - - path string - codeHash string - testHash string - watchRegExp *regexp.Regexp -} - -func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { - p := &PackageHash{ - path: path, - watchRegExp: watchRegExp, - } - - p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() - - return p -} - -func (p *PackageHash) CheckForChanges() bool { - codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() - - if deleted { - if !p.Deleted { - t := time.Now() - p.CodeModifiedTime = t - p.TestModifiedTime = t - } - p.Deleted = true - return true - } - - modified := false - p.Deleted = false - - if p.codeHash != codeHash { - p.CodeModifiedTime = codeModifiedTime - modified = true - } - if p.testHash != testHash { - p.TestModifiedTime = testModifiedTime - modified = true - } - - p.codeHash = codeHash - p.testHash = testHash - return modified -} - -func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { - infos, err := ioutil.ReadDir(p.path) - - if err != nil { - deleted = true - return - } - - for _, info := range infos { - if info.IsDir() { - continue - } - - if goTestRegExp.Match([]byte(info.Name())) { - testHash += p.hashForFileInfo(info) - if info.ModTime().After(testModifiedTime) { - testModifiedTime = info.ModTime() - } - continue - } - - if p.watchRegExp.Match([]byte(info.Name())) { - codeHash += p.hashForFileInfo(info) - if info.ModTime().After(codeModifiedTime) { - codeModifiedTime = info.ModTime() - } - } - } - - testHash += codeHash - if codeModifiedTime.After(testModifiedTime) { - testModifiedTime = codeModifiedTime - } - - return -} - -func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { - return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go deleted file mode 100644 index b4892beb..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/package_hashes.go +++ /dev/null @@ -1,85 +0,0 @@ -package watch - -import ( - "path/filepath" - "regexp" - "sync" -) - -type PackageHashes struct { - PackageHashes map[string]*PackageHash - usedPaths map[string]bool - watchRegExp *regexp.Regexp - lock *sync.Mutex -} - -func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { - return &PackageHashes{ - PackageHashes: map[string]*PackageHash{}, - usedPaths: nil, - watchRegExp: watchRegExp, - lock: &sync.Mutex{}, - } -} - -func (p *PackageHashes) CheckForChanges() []string { - p.lock.Lock() - defer p.lock.Unlock() - - modified := []string{} - - for _, packageHash := range p.PackageHashes { - if packageHash.CheckForChanges() { - modified = append(modified, packageHash.path) - } - } - - return modified -} - -func (p *PackageHashes) Add(path string) *PackageHash { - p.lock.Lock() - defer p.lock.Unlock() - - path, _ = filepath.Abs(path) - _, ok := p.PackageHashes[path] - if !ok { - p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) - } - - if p.usedPaths != nil { - p.usedPaths[path] = true - } - return p.PackageHashes[path] -} - -func (p *PackageHashes) Get(path string) *PackageHash { - p.lock.Lock() - defer p.lock.Unlock() - - path, _ = filepath.Abs(path) - if p.usedPaths != nil { - p.usedPaths[path] = true - } - return p.PackageHashes[path] -} - -func (p *PackageHashes) StartTrackingUsage() { - p.lock.Lock() - defer p.lock.Unlock() - - p.usedPaths = map[string]bool{} -} - -func (p *PackageHashes) StopTrackingUsageAndPrune() { - p.lock.Lock() - defer p.lock.Unlock() - - for path := range p.PackageHashes { - if !p.usedPaths[path] { - delete(p.PackageHashes, path) - } - } - - p.usedPaths = nil -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go deleted file mode 100644 index 5deaba7c..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch/suite.go +++ /dev/null @@ -1,87 +0,0 @@ -package watch - -import ( - "fmt" - "math" - "time" - - "github.com/onsi/ginkgo/ginkgo/testsuite" -) - -type Suite struct { - Suite testsuite.TestSuite - RunTime time.Time - Dependencies Dependencies - - sharedPackageHashes *PackageHashes -} - -func NewSuite(suite testsuite.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { - deps, err := NewDependencies(suite.Path, maxDepth) - if err != nil { - return nil, err - } - - sharedPackageHashes.Add(suite.Path) - for dep := range deps.Dependencies() { - sharedPackageHashes.Add(dep) - } - - return &Suite{ - Suite: suite, - Dependencies: deps, - - sharedPackageHashes: sharedPackageHashes, - }, nil -} - -func (s *Suite) Delta() float64 { - delta := s.delta(s.Suite.Path, true, 0) * 1000 - for dep, depth := range s.Dependencies.Dependencies() { - delta += s.delta(dep, false, depth) - } - return delta -} - -func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { - s.RunTime = time.Now() - - deps, err := NewDependencies(s.Suite.Path, maxDepth) - if err != nil { - return err - } - - s.sharedPackageHashes.Add(s.Suite.Path) - for dep := range deps.Dependencies() { - s.sharedPackageHashes.Add(dep) - } - - s.Dependencies = deps - - return nil -} - -func (s *Suite) Description() string { - numDeps := len(s.Dependencies.Dependencies()) - pluralizer := "ies" - if numDeps == 1 { - pluralizer = "y" - } - return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) -} - -func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { - return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) -} - -func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { - packageHash := s.sharedPackageHashes.Get(packagePath) - var modifiedTime time.Time - if includeTests { - modifiedTime = packageHash.TestModifiedTime - } else { - modifiedTime = packageHash.CodeModifiedTime - } - - return modifiedTime.Sub(s.RunTime) -} diff --git a/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go b/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go deleted file mode 100644 index a6ef053c..00000000 --- a/vendor/github.com/onsi/ginkgo/ginkgo/watch_command.go +++ /dev/null @@ -1,175 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "regexp" - "time" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/ginkgo/interrupthandler" - "github.com/onsi/ginkgo/ginkgo/testrunner" - "github.com/onsi/ginkgo/ginkgo/testsuite" - "github.com/onsi/ginkgo/ginkgo/watch" - colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" -) - -func BuildWatchCommand() *Command { - commandFlags := NewWatchCommandFlags(flag.NewFlagSet("watch", flag.ExitOnError)) - interruptHandler := interrupthandler.NewInterruptHandler() - notifier := NewNotifier(commandFlags) - watcher := &SpecWatcher{ - commandFlags: commandFlags, - notifier: notifier, - interruptHandler: interruptHandler, - suiteRunner: NewSuiteRunner(notifier, interruptHandler), - } - - return &Command{ - Name: "watch", - FlagSet: commandFlags.FlagSet, - UsageCommand: "ginkgo watch -- ", - Usage: []string{ - "Watches the tests in the passed in and runs them when changes occur.", - "Any arguments after -- will be passed to the test.", - }, - Command: watcher.WatchSpecs, - SuppressFlagDocumentation: true, - FlagDocSubstitute: []string{ - "Accepts all the flags that the ginkgo command accepts except for --keepGoing and --untilItFails", - }, - } -} - -type SpecWatcher struct { - commandFlags *RunWatchAndBuildCommandFlags - notifier *Notifier - interruptHandler *interrupthandler.InterruptHandler - suiteRunner *SuiteRunner -} - -func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { - w.commandFlags.computeNodes() - w.notifier.VerifyNotificationsAreAvailable() - - w.WatchSuites(args, additionalArgs) -} - -func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalArgs []string) []*testrunner.TestRunner { - runners := []*testrunner.TestRunner{} - - for _, suite := range suites { - runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Timeout, w.commandFlags.GoOpts, additionalArgs)) - } - - return runners -} - -func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) { - suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false) - - if len(suites) == 0 { - complainAndQuit("Found no test suites") - } - - fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), pluralizedWord("suite", "suites", len(suites)), w.commandFlags.Depth) - deltaTracker := watch.NewDeltaTracker(w.commandFlags.Depth, regexp.MustCompile(w.commandFlags.WatchRegExp)) - delta, errors := deltaTracker.Delta(suites) - - fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites))) - for _, suite := range delta.NewSuites { - fmt.Println(" " + suite.Description()) - } - - for suite, err := range errors { - fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) - } - - if len(suites) == 1 { - runners := w.runnersForSuites(suites, additionalArgs) - w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, nil) - runners[0].CleanUp() - } - - ticker := time.NewTicker(time.Second) - - for { - select { - case <-ticker.C: - suites, _ := findSuites(args, w.commandFlags.Recurse, w.commandFlags.SkipPackage, false) - delta, _ := deltaTracker.Delta(suites) - coloredStream := colorable.NewColorableStdout() - - suitesToRun := []testsuite.TestSuite{} - - if len(delta.NewSuites) > 0 { - fmt.Fprintf(coloredStream, greenColor+"Detected %d new %s:\n"+defaultStyle, len(delta.NewSuites), pluralizedWord("suite", "suites", len(delta.NewSuites))) - for _, suite := range delta.NewSuites { - suitesToRun = append(suitesToRun, suite.Suite) - fmt.Fprintln(coloredStream, " "+suite.Description()) - } - } - - modifiedSuites := delta.ModifiedSuites() - if len(modifiedSuites) > 0 { - fmt.Fprintln(coloredStream, greenColor+"\nDetected changes in:"+defaultStyle) - for _, pkg := range delta.ModifiedPackages { - fmt.Fprintln(coloredStream, " "+pkg) - } - fmt.Fprintf(coloredStream, greenColor+"Will run %d %s:\n"+defaultStyle, len(modifiedSuites), pluralizedWord("suite", "suites", len(modifiedSuites))) - for _, suite := range modifiedSuites { - suitesToRun = append(suitesToRun, suite.Suite) - fmt.Fprintln(coloredStream, " "+suite.Description()) - } - fmt.Fprintln(coloredStream, "") - } - - if len(suitesToRun) > 0 { - w.UpdateSeed() - w.ComputeSuccinctMode(len(suitesToRun)) - runners := w.runnersForSuites(suitesToRun, additionalArgs) - result, _ := w.suiteRunner.RunSuites(runners, w.commandFlags.NumCompilers, true, func(suite testsuite.TestSuite) { - deltaTracker.WillRun(suite) - }) - for _, runner := range runners { - runner.CleanUp() - } - if !w.interruptHandler.WasInterrupted() { - color := redColor - if result.Passed { - color = greenColor - } - fmt.Fprintln(coloredStream, color+"\nDone. Resuming watch..."+defaultStyle) - } - } - - case <-w.interruptHandler.C: - return - } - } -} - -func (w *SpecWatcher) ComputeSuccinctMode(numSuites int) { - if config.DefaultReporterConfig.Verbose { - config.DefaultReporterConfig.Succinct = false - return - } - - if w.commandFlags.wasSet("succinct") { - return - } - - if numSuites == 1 { - config.DefaultReporterConfig.Succinct = false - } - - if numSuites > 1 { - config.DefaultReporterConfig.Succinct = true - } -} - -func (w *SpecWatcher) UpdateSeed() { - if !w.commandFlags.wasSet("seed") { - config.GinkgoConfig.RandomSeed = time.Now().Unix() - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go deleted file mode 100644 index aa89d6cb..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go +++ /dev/null @@ -1,48 +0,0 @@ -package codelocation - -import ( - "regexp" - "runtime" - "runtime/debug" - "strings" - - "github.com/onsi/ginkgo/types" -) - -func New(skip int) types.CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) - return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} -} - -// PruneStack removes references to functions that are internal to Ginkgo -// and the Go runtime from a stack string and a certain number of stack entries -// at the beginning of the stack. The stack string has the format -// as returned by runtime/debug.Stack. The leading goroutine information is -// optional and always removed if present. Beware that runtime/debug.Stack -// adds itself as first entry, so typically skip must be >= 1 to remove that -// entry. -func PruneStack(fullStackTrace string, skip int) string { - stack := strings.Split(fullStackTrace, "\n") - // Ensure that the even entries are the method names and the - // the odd entries the source code information. - if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { - // Ignore "goroutine 29 [running]:" line. - stack = stack[1:] - } - // The "+1" is for skipping over the initial entry, which is - // runtime/debug.Stack() itself. - if len(stack) > 2*(skip+1) { - stack = stack[2*(skip+1):] - } - prunedStack := []string{} - re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) - for i := 0; i < len(stack)/2; i++ { - // We filter out based on the source code file name. - if !re.Match([]byte(stack[i*2+1])) { - prunedStack = append(prunedStack, stack[i*2]) - prunedStack = append(prunedStack, stack[i*2+1]) - } - } - return strings.Join(prunedStack, "\n") -} diff --git a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go deleted file mode 100644 index 0737746d..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go +++ /dev/null @@ -1,151 +0,0 @@ -package containernode - -import ( - "math/rand" - "sort" - - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/types" -) - -type subjectOrContainerNode struct { - containerNode *ContainerNode - subjectNode leafnodes.SubjectNode -} - -func (n subjectOrContainerNode) text() string { - if n.containerNode != nil { - return n.containerNode.Text() - } else { - return n.subjectNode.Text() - } -} - -type CollatedNodes struct { - Containers []*ContainerNode - Subject leafnodes.SubjectNode -} - -type ContainerNode struct { - text string - flag types.FlagType - codeLocation types.CodeLocation - - setupNodes []leafnodes.BasicNode - subjectAndContainerNodes []subjectOrContainerNode -} - -func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode { - return &ContainerNode{ - text: text, - flag: flag, - codeLocation: codeLocation, - } -} - -func (container *ContainerNode) Shuffle(r *rand.Rand) { - sort.Sort(container) - permutation := r.Perm(len(container.subjectAndContainerNodes)) - shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes)) - for i, j := range permutation { - shuffledNodes[i] = container.subjectAndContainerNodes[j] - } - container.subjectAndContainerNodes = shuffledNodes -} - -func (node *ContainerNode) BackPropagateProgrammaticFocus() bool { - if node.flag == types.FlagTypePending { - return false - } - - shouldUnfocus := false - for _, subjectOrContainerNode := range node.subjectAndContainerNodes { - if subjectOrContainerNode.containerNode != nil { - shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus - } else { - shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus - } - } - - if shouldUnfocus { - if node.flag == types.FlagTypeFocused { - node.flag = types.FlagTypeNone - } - return true - } - - return node.flag == types.FlagTypeFocused -} - -func (node *ContainerNode) Collate() []CollatedNodes { - return node.collate([]*ContainerNode{}) -} - -func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes { - collated := make([]CollatedNodes, 0) - - containers := make([]*ContainerNode, len(enclosingContainers)) - copy(containers, enclosingContainers) - containers = append(containers, node) - - for _, subjectOrContainer := range node.subjectAndContainerNodes { - if subjectOrContainer.containerNode != nil { - collated = append(collated, subjectOrContainer.containerNode.collate(containers)...) - } else { - collated = append(collated, CollatedNodes{ - Containers: containers, - Subject: subjectOrContainer.subjectNode, - }) - } - } - - return collated -} - -func (node *ContainerNode) PushContainerNode(container *ContainerNode) { - node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container}) -} - -func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) { - node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject}) -} - -func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) { - node.setupNodes = append(node.setupNodes, setupNode) -} - -func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode { - nodes := []leafnodes.BasicNode{} - for _, setupNode := range node.setupNodes { - if setupNode.Type() == nodeType { - nodes = append(nodes, setupNode) - } - } - return nodes -} - -func (node *ContainerNode) Text() string { - return node.text -} - -func (node *ContainerNode) CodeLocation() types.CodeLocation { - return node.codeLocation -} - -func (node *ContainerNode) Flag() types.FlagType { - return node.flag -} - -//sort.Interface - -func (node *ContainerNode) Len() int { - return len(node.subjectAndContainerNodes) -} - -func (node *ContainerNode) Less(i, j int) bool { - return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text() -} - -func (node *ContainerNode) Swap(i, j int) { - node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i] -} diff --git a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/vendor/github.com/onsi/ginkgo/internal/failer/failer.go deleted file mode 100644 index 678ea251..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/failer/failer.go +++ /dev/null @@ -1,92 +0,0 @@ -package failer - -import ( - "fmt" - "sync" - - "github.com/onsi/ginkgo/types" -) - -type Failer struct { - lock *sync.Mutex - failure types.SpecFailure - state types.SpecState -} - -func New() *Failer { - return &Failer{ - lock: &sync.Mutex{}, - state: types.SpecStatePassed, - } -} - -func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.state == types.SpecStatePassed { - f.state = types.SpecStatePanicked - f.failure = types.SpecFailure{ - Message: "Test Panicked", - Location: location, - ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), - } - } -} - -func (f *Failer) Timeout(location types.CodeLocation) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.state == types.SpecStatePassed { - f.state = types.SpecStateTimedOut - f.failure = types.SpecFailure{ - Message: "Timed out", - Location: location, - } - } -} - -func (f *Failer) Fail(message string, location types.CodeLocation) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.state == types.SpecStatePassed { - f.state = types.SpecStateFailed - f.failure = types.SpecFailure{ - Message: message, - Location: location, - } - } -} - -func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) { - f.lock.Lock() - defer f.lock.Unlock() - - failure := f.failure - outcome := f.state - if outcome != types.SpecStatePassed { - failure.ComponentType = componentType - failure.ComponentIndex = componentIndex - failure.ComponentCodeLocation = componentCodeLocation - } - - f.state = types.SpecStatePassed - f.failure = types.SpecFailure{} - - return failure, outcome -} - -func (f *Failer) Skip(message string, location types.CodeLocation) { - f.lock.Lock() - defer f.lock.Unlock() - - if f.state == types.SpecStatePassed { - f.state = types.SpecStateSkipped - f.failure = types.SpecFailure{ - Message: message, - Location: location, - } - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go deleted file mode 100644 index 393901e1..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go +++ /dev/null @@ -1,103 +0,0 @@ -package leafnodes - -import ( - "math" - "time" - - "sync" - - "github.com/onsi/ginkgo/types" -) - -type benchmarker struct { - mu sync.Mutex - measurements map[string]*types.SpecMeasurement - orderCounter int -} - -func newBenchmarker() *benchmarker { - return &benchmarker{ - measurements: make(map[string]*types.SpecMeasurement), - } -} - -func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) { - t := time.Now() - body() - elapsedTime = time.Since(t) - - b.mu.Lock() - defer b.mu.Unlock() - measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...) - measurement.Results = append(measurement.Results, elapsedTime.Seconds()) - - return -} - -func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) { - b.mu.Lock() - measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...) - defer b.mu.Unlock() - measurement.Results = append(measurement.Results, value) -} - -func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) { - b.mu.Lock() - measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...) - defer b.mu.Unlock() - measurement.Results = append(measurement.Results, value) -} - -func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement { - measurement, ok := b.measurements[name] - if !ok { - var computedInfo interface{} - computedInfo = nil - if len(info) > 0 { - computedInfo = info[0] - } - measurement = &types.SpecMeasurement{ - Name: name, - Info: computedInfo, - Order: b.orderCounter, - SmallestLabel: smallestLabel, - LargestLabel: largestLabel, - AverageLabel: averageLabel, - Units: units, - Precision: precision, - Results: make([]float64, 0), - } - b.measurements[name] = measurement - b.orderCounter++ - } - - return measurement -} - -func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement { - b.mu.Lock() - defer b.mu.Unlock() - for _, measurement := range b.measurements { - measurement.Smallest = math.MaxFloat64 - measurement.Largest = -math.MaxFloat64 - sum := float64(0) - sumOfSquares := float64(0) - - for _, result := range measurement.Results { - if result > measurement.Largest { - measurement.Largest = result - } - if result < measurement.Smallest { - measurement.Smallest = result - } - sum += result - sumOfSquares += result * result - } - - n := float64(len(measurement.Results)) - measurement.Average = sum / n - measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n)) - } - - return b.measurements -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go deleted file mode 100644 index 8c3902d6..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go +++ /dev/null @@ -1,19 +0,0 @@ -package leafnodes - -import ( - "github.com/onsi/ginkgo/types" -) - -type BasicNode interface { - Type() types.SpecComponentType - Run() (types.SpecState, types.SpecFailure) - CodeLocation() types.CodeLocation -} - -type SubjectNode interface { - BasicNode - - Text() string - Flag() types.FlagType - Samples() int -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go deleted file mode 100644 index 6eded7b7..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go +++ /dev/null @@ -1,47 +0,0 @@ -package leafnodes - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type ItNode struct { - runner *runner - - flag types.FlagType - text string -} - -func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode { - return &ItNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex), - flag: flag, - text: text, - } -} - -func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *ItNode) Type() types.SpecComponentType { - return types.SpecComponentTypeIt -} - -func (node *ItNode) Text() string { - return node.text -} - -func (node *ItNode) Flag() types.FlagType { - return node.flag -} - -func (node *ItNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func (node *ItNode) Samples() int { - return 1 -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go deleted file mode 100644 index 3ab9a6d5..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go +++ /dev/null @@ -1,62 +0,0 @@ -package leafnodes - -import ( - "reflect" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type MeasureNode struct { - runner *runner - - text string - flag types.FlagType - samples int - benchmarker *benchmarker -} - -func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode { - benchmarker := newBenchmarker() - - wrappedBody := func() { - reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)}) - } - - return &MeasureNode{ - runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex), - - text: text, - flag: flag, - samples: samples, - benchmarker: benchmarker, - } -} - -func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement { - return node.benchmarker.measurementsReport() -} - -func (node *MeasureNode) Type() types.SpecComponentType { - return types.SpecComponentTypeMeasure -} - -func (node *MeasureNode) Text() string { - return node.text -} - -func (node *MeasureNode) Flag() types.FlagType { - return node.flag -} - -func (node *MeasureNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func (node *MeasureNode) Samples() int { - return node.samples -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go deleted file mode 100644 index 16cb66c3..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go +++ /dev/null @@ -1,117 +0,0 @@ -package leafnodes - -import ( - "fmt" - "reflect" - "time" - - "github.com/onsi/ginkgo/internal/codelocation" - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type runner struct { - isAsync bool - asyncFunc func(chan<- interface{}) - syncFunc func() - codeLocation types.CodeLocation - timeoutThreshold time.Duration - nodeType types.SpecComponentType - componentIndex int - failer *failer.Failer -} - -func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner { - bodyType := reflect.TypeOf(body) - if bodyType.Kind() != reflect.Func { - panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation)) - } - - runner := &runner{ - codeLocation: codeLocation, - timeoutThreshold: timeout, - failer: failer, - nodeType: nodeType, - componentIndex: componentIndex, - } - - switch bodyType.NumIn() { - case 0: - runner.syncFunc = body.(func()) - return runner - case 1: - if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) { - panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation)) - } - - wrappedBody := func(done chan<- interface{}) { - bodyValue := reflect.ValueOf(body) - bodyValue.Call([]reflect.Value{reflect.ValueOf(done)}) - } - - runner.isAsync = true - runner.asyncFunc = wrappedBody - return runner - } - - panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation)) -} - -func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) { - if r.isAsync { - return r.runAsync() - } else { - return r.runSync() - } -} - -func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) { - done := make(chan interface{}, 1) - - go func() { - finished := false - - defer func() { - if e := recover(); e != nil || !finished { - r.failer.Panic(codelocation.New(2), e) - select { - case <-done: - break - default: - close(done) - } - } - }() - - r.asyncFunc(done) - finished = true - }() - - // If this goroutine gets no CPU time before the select block, - // the <-done case may complete even if the test took longer than the timeoutThreshold. - // This can cause flaky behaviour, but we haven't seen it in the wild. - select { - case <-done: - case <-time.After(r.timeoutThreshold): - r.failer.Timeout(r.codeLocation) - } - - failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) - return -} -func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { - finished := false - - defer func() { - if e := recover(); e != nil || !finished { - r.failer.Panic(codelocation.New(2), e) - } - - failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation) - }() - - r.syncFunc() - finished = true - - return -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go deleted file mode 100644 index e3e9cb7c..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go +++ /dev/null @@ -1,48 +0,0 @@ -package leafnodes - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type SetupNode struct { - runner *runner -} - -func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) { - return node.runner.run() -} - -func (node *SetupNode) Type() types.SpecComponentType { - return node.runner.nodeType -} - -func (node *SetupNode) CodeLocation() types.CodeLocation { - return node.runner.codeLocation -} - -func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex), - } -} - -func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex), - } -} - -func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex), - } -} - -func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode { - return &SetupNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex), - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go deleted file mode 100644 index 80f16ed7..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go +++ /dev/null @@ -1,55 +0,0 @@ -package leafnodes - -import ( - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type SuiteNode interface { - Run(parallelNode int, parallelTotal int, syncHost string) bool - Passed() bool - Summary() *types.SetupSummary -} - -type simpleSuiteNode struct { - runner *runner - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - t := time.Now() - node.outcome, node.failure = node.runner.run() - node.runTime = time.Since(t) - - return node.outcome == types.SpecStatePassed -} - -func (node *simpleSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *simpleSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runner.nodeType, - CodeLocation: node.runner.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &simpleSuiteNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0), - } -} - -func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &simpleSuiteNode{ - runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go deleted file mode 100644 index a721d0cf..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go +++ /dev/null @@ -1,90 +0,0 @@ -package leafnodes - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type synchronizedAfterSuiteNode struct { - runnerA *runner - runnerB *runner - - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - return &synchronizedAfterSuiteNode{ - runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0), - } -} - -func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - node.outcome, node.failure = node.runnerA.run() - - if parallelNode == 1 { - if parallelTotal > 1 { - node.waitUntilOtherNodesAreDone(syncHost) - } - - outcome, failure := node.runnerB.run() - - if node.outcome == types.SpecStatePassed { - node.outcome, node.failure = outcome, failure - } - } - - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedAfterSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runnerA.nodeType, - CodeLocation: node.runnerA.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) { - for { - if node.canRun(syncHost) { - return - } - - time.Sleep(50 * time.Millisecond) - } -} - -func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool { - resp, err := http.Get(syncHost + "/RemoteAfterSuiteData") - if err != nil || resp.StatusCode != http.StatusOK { - return false - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return false - } - resp.Body.Close() - - afterSuiteData := types.RemoteAfterSuiteData{} - err = json.Unmarshal(body, &afterSuiteData) - if err != nil { - return false - } - - return afterSuiteData.CanRun -} diff --git a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go deleted file mode 100644 index d5c88931..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go +++ /dev/null @@ -1,181 +0,0 @@ -package leafnodes - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "reflect" - "time" - - "github.com/onsi/ginkgo/internal/failer" - "github.com/onsi/ginkgo/types" -) - -type synchronizedBeforeSuiteNode struct { - runnerA *runner - runnerB *runner - - data []byte - - outcome types.SpecState - failure types.SpecFailure - runTime time.Duration -} - -func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode { - node := &synchronizedBeforeSuiteNode{} - - node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) - node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0) - - return node -} - -func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool { - t := time.Now() - defer func() { - node.runTime = time.Since(t) - }() - - if parallelNode == 1 { - node.outcome, node.failure = node.runA(parallelTotal, syncHost) - } else { - node.outcome, node.failure = node.waitForA(syncHost) - } - - if node.outcome != types.SpecStatePassed { - return false - } - node.outcome, node.failure = node.runnerB.run() - - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) { - outcome, failure := node.runnerA.run() - - if parallelTotal > 1 { - state := types.RemoteBeforeSuiteStatePassed - if outcome != types.SpecStatePassed { - state = types.RemoteBeforeSuiteStateFailed - } - json := (types.RemoteBeforeSuiteData{ - Data: node.data, - State: state, - }).ToJSON() - http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json)) - } - - return outcome, failure -} - -func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) { - failure := func(message string) types.SpecFailure { - return types.SpecFailure{ - Message: message, - Location: node.runnerA.codeLocation, - ComponentType: node.runnerA.nodeType, - ComponentIndex: node.runnerA.componentIndex, - ComponentCodeLocation: node.runnerA.codeLocation, - } - } - for { - resp, err := http.Get(syncHost + "/BeforeSuiteState") - if err != nil || resp.StatusCode != http.StatusOK { - return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state") - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return types.SpecStateFailed, failure("Failed to read BeforeSuite state") - } - resp.Body.Close() - - beforeSuiteData := types.RemoteBeforeSuiteData{} - err = json.Unmarshal(body, &beforeSuiteData) - if err != nil { - return types.SpecStateFailed, failure("Failed to decode BeforeSuite state") - } - - switch beforeSuiteData.State { - case types.RemoteBeforeSuiteStatePassed: - node.data = beforeSuiteData.Data - return types.SpecStatePassed, types.SpecFailure{} - case types.RemoteBeforeSuiteStateFailed: - return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed") - case types.RemoteBeforeSuiteStateDisappeared: - return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite") - } - - time.Sleep(50 * time.Millisecond) - } -} - -func (node *synchronizedBeforeSuiteNode) Passed() bool { - return node.outcome == types.SpecStatePassed -} - -func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary { - return &types.SetupSummary{ - ComponentType: node.runnerA.nodeType, - CodeLocation: node.runnerA.codeLocation, - State: node.outcome, - RunTime: node.runTime, - Failure: node.failure, - } -} - -func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} { - typeA := reflect.TypeOf(bodyA) - if typeA.Kind() != reflect.Func { - panic("SynchronizedBeforeSuite expects a function as its first argument") - } - - takesNothing := typeA.NumIn() == 0 - takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface - returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8 - - if !((takesNothing || takesADoneChannel) && returnsBytes) { - panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.") - } - - if takesADoneChannel { - return func(done chan<- interface{}) { - out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)}) - node.data = out[0].Interface().([]byte) - } - } - - return func() { - out := reflect.ValueOf(bodyA).Call([]reflect.Value{}) - node.data = out[0].Interface().([]byte) - } -} - -func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} { - typeB := reflect.TypeOf(bodyB) - if typeB.Kind() != reflect.Func { - panic("SynchronizedBeforeSuite expects a function as its second argument") - } - - returnsNothing := typeB.NumOut() == 0 - takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 - takesBytesAndDone := typeB.NumIn() == 2 && - typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 && - typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface - - if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) { - panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)") - } - - if takesBytesAndDone { - return func(done chan<- interface{}) { - reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)}) - } - } - - return func() { - reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)}) - } -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go deleted file mode 100644 index 992437d9..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ /dev/null @@ -1,249 +0,0 @@ -/* - -Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output -coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel: - - ginkgo -nodes=N - -where N is the number of nodes you desire. -*/ -package remote - -import ( - "time" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters/stenographer" - "github.com/onsi/ginkgo/types" -) - -type configAndSuite struct { - config config.GinkgoConfigType - summary *types.SuiteSummary -} - -type Aggregator struct { - nodeCount int - config config.DefaultReporterConfigType - stenographer stenographer.Stenographer - result chan bool - - suiteBeginnings chan configAndSuite - aggregatedSuiteBeginnings []configAndSuite - - beforeSuites chan *types.SetupSummary - aggregatedBeforeSuites []*types.SetupSummary - - afterSuites chan *types.SetupSummary - aggregatedAfterSuites []*types.SetupSummary - - specCompletions chan *types.SpecSummary - completedSpecs []*types.SpecSummary - - suiteEndings chan *types.SuiteSummary - aggregatedSuiteEndings []*types.SuiteSummary - specs []*types.SpecSummary - - startTime time.Time -} - -func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator { - aggregator := &Aggregator{ - nodeCount: nodeCount, - result: result, - config: config, - stenographer: stenographer, - - suiteBeginnings: make(chan configAndSuite), - beforeSuites: make(chan *types.SetupSummary), - afterSuites: make(chan *types.SetupSummary), - specCompletions: make(chan *types.SpecSummary), - suiteEndings: make(chan *types.SuiteSummary), - } - - go aggregator.mux() - - return aggregator -} - -func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - aggregator.suiteBeginnings <- configAndSuite{config, summary} -} - -func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.beforeSuites <- setupSummary -} - -func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - aggregator.afterSuites <- setupSummary -} - -func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) { - //noop -} - -func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) { - aggregator.specCompletions <- specSummary -} - -func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) { - aggregator.suiteEndings <- summary -} - -func (aggregator *Aggregator) mux() { -loop: - for { - select { - case configAndSuite := <-aggregator.suiteBeginnings: - aggregator.registerSuiteBeginning(configAndSuite) - case setupSummary := <-aggregator.beforeSuites: - aggregator.registerBeforeSuite(setupSummary) - case setupSummary := <-aggregator.afterSuites: - aggregator.registerAfterSuite(setupSummary) - case specSummary := <-aggregator.specCompletions: - aggregator.registerSpecCompletion(specSummary) - case suite := <-aggregator.suiteEndings: - finished, passed := aggregator.registerSuiteEnding(suite) - if finished { - aggregator.result <- passed - break loop - } - } - } -} - -func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) { - aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite) - - if len(aggregator.aggregatedSuiteBeginnings) == 1 { - aggregator.startTime = time.Now() - } - - if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { - return - } - - aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) - - totalNumberOfSpecs := 0 - if len(aggregator.aggregatedSuiteBeginnings) > 0 { - totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization - } - - aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) - aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) { - aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) { - aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) { - aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary) - aggregator.specs = append(aggregator.specs, specSummary) - aggregator.flushCompletedSpecs() -} - -func (aggregator *Aggregator) flushCompletedSpecs() { - if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount { - return - } - - for _, setupSummary := range aggregator.aggregatedBeforeSuites { - aggregator.announceBeforeSuite(setupSummary) - } - - for _, specSummary := range aggregator.completedSpecs { - aggregator.announceSpec(specSummary) - } - - for _, setupSummary := range aggregator.aggregatedAfterSuites { - aggregator.announceAfterSuite(setupSummary) - } - - aggregator.aggregatedBeforeSuites = []*types.SetupSummary{} - aggregator.completedSpecs = []*types.SpecSummary{} - aggregator.aggregatedAfterSuites = []*types.SetupSummary{} -} - -func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) { - aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) - if setupSummary.State != types.SpecStatePassed { - aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) { - aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput) - if setupSummary.State != types.SpecStatePassed { - aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) { - if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { - aggregator.stenographer.AnnounceSpecWillRun(specSummary) - } - - aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) - - switch specSummary.State { - case types.SpecStatePassed: - if specSummary.IsMeasurement { - aggregator.stenographer.AnnounceSuccessfulMeasurement(specSummary, aggregator.config.Succinct) - } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold { - aggregator.stenographer.AnnounceSuccessfulSlowSpec(specSummary, aggregator.config.Succinct) - } else { - aggregator.stenographer.AnnounceSuccessfulSpec(specSummary) - } - - case types.SpecStatePending: - aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct) - case types.SpecStateSkipped: - aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace) - case types.SpecStateTimedOut: - aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - case types.SpecStatePanicked: - aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - case types.SpecStateFailed: - aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace) - } -} - -func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) { - aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite) - if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount { - return false, false - } - - aggregatedSuiteSummary := &types.SuiteSummary{} - aggregatedSuiteSummary.SuiteSucceeded = true - - for _, suiteSummary := range aggregator.aggregatedSuiteEndings { - if !suiteSummary.SuiteSucceeded { - aggregatedSuiteSummary.SuiteSucceeded = false - } - - aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun - aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs - aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs - aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs - aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs - aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs - aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs - } - - aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) - - aggregator.stenographer.SummarizeFailures(aggregator.specs) - aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct) - - return true, aggregatedSuiteSummary.SuiteSucceeded -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go deleted file mode 100644 index 284bc62e..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go +++ /dev/null @@ -1,147 +0,0 @@ -package remote - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - - "github.com/onsi/ginkgo/internal/writer" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/reporters/stenographer" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -//An interface to net/http's client to allow the injection of fakes under test -type Poster interface { - Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) -} - -/* -The ForwardingReporter is a Ginkgo reporter that forwards information to -a Ginkgo remote server. - -When streaming parallel test output, this repoter is automatically installed by Ginkgo. - -This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner -detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter -in place of Ginkgo's DefaultReporter. -*/ - -type ForwardingReporter struct { - serverHost string - poster Poster - outputInterceptor OutputInterceptor - debugMode bool - debugFile *os.File - nestedReporter *reporters.DefaultReporter -} - -func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter { - reporter := &ForwardingReporter{ - serverHost: serverHost, - poster: poster, - outputInterceptor: outputInterceptor, - } - - if debugFile != "" { - var err error - reporter.debugMode = true - reporter.debugFile, err = os.Create(debugFile) - if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } - - if !config.Verbose { - //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication. - ginkgoWriter.AndRedirectTo(reporter.debugFile) - } - outputInterceptor.StreamTo(reporter.debugFile) //This is not working - - stenographer := stenographer.New(false, true, reporter.debugFile) - config.Succinct = false - config.Verbose = true - config.FullTrace = true - reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer) - } - - return reporter -} - -func (reporter *ForwardingReporter) post(path string, data interface{}) { - encoded, _ := json.Marshal(data) - buffer := bytes.NewBuffer(encoded) - reporter.poster.Post(reporter.serverHost+path, "application/json", buffer) -} - -func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) { - data := struct { - Config config.GinkgoConfigType `json:"config"` - Summary *types.SuiteSummary `json:"suite-summary"` - }{ - conf, - summary, - } - - reporter.outputInterceptor.StartInterceptingOutput() - if reporter.debugMode { - reporter.nestedReporter.SpecSuiteWillBegin(conf, summary) - reporter.debugFile.Sync() - } - reporter.post("/SpecSuiteWillBegin", data) -} - -func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - setupSummary.CapturedOutput = output - if reporter.debugMode { - reporter.nestedReporter.BeforeSuiteDidRun(setupSummary) - reporter.debugFile.Sync() - } - reporter.post("/BeforeSuiteDidRun", setupSummary) -} - -func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) { - if reporter.debugMode { - reporter.nestedReporter.SpecWillRun(specSummary) - reporter.debugFile.Sync() - } - reporter.post("/SpecWillRun", specSummary) -} - -func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - specSummary.CapturedOutput = output - if reporter.debugMode { - reporter.nestedReporter.SpecDidComplete(specSummary) - reporter.debugFile.Sync() - } - reporter.post("/SpecDidComplete", specSummary) -} - -func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput() - reporter.outputInterceptor.StartInterceptingOutput() - setupSummary.CapturedOutput = output - if reporter.debugMode { - reporter.nestedReporter.AfterSuiteDidRun(setupSummary) - reporter.debugFile.Sync() - } - reporter.post("/AfterSuiteDidRun", setupSummary) -} - -func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.outputInterceptor.StopInterceptingAndReturnOutput() - if reporter.debugMode { - reporter.nestedReporter.SpecSuiteDidEnd(summary) - reporter.debugFile.Sync() - } - reporter.post("/SpecSuiteDidEnd", summary) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go deleted file mode 100644 index 5154abe8..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go +++ /dev/null @@ -1,13 +0,0 @@ -package remote - -import "os" - -/* -The OutputInterceptor is used by the ForwardingReporter to -intercept and capture all stdin and stderr output during a test run. -*/ -type OutputInterceptor interface { - StartInterceptingOutput() error - StopInterceptingAndReturnOutput() (string, error) - StreamTo(*os.File) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go deleted file mode 100644 index 774967db..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build freebsd openbsd netbsd dragonfly darwin linux solaris - -package remote - -import ( - "errors" - "io/ioutil" - "os" - - "github.com/nxadm/tail" - "golang.org/x/sys/unix" -) - -func NewOutputInterceptor() OutputInterceptor { - return &outputInterceptor{} -} - -type outputInterceptor struct { - redirectFile *os.File - streamTarget *os.File - intercepting bool - tailer *tail.Tail - doneTailing chan bool -} - -func (interceptor *outputInterceptor) StartInterceptingOutput() error { - if interceptor.intercepting { - return errors.New("Already intercepting output!") - } - interceptor.intercepting = true - - var err error - - interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output") - if err != nil { - return err - } - - // This might call Dup3 if the dup2 syscall is not available, e.g. on - // linux/arm64 or linux/riscv64 - unix.Dup2(int(interceptor.redirectFile.Fd()), 1) - unix.Dup2(int(interceptor.redirectFile.Fd()), 2) - - if interceptor.streamTarget != nil { - interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true}) - interceptor.doneTailing = make(chan bool) - - go func() { - for line := range interceptor.tailer.Lines { - interceptor.streamTarget.Write([]byte(line.Text + "\n")) - } - close(interceptor.doneTailing) - }() - } - - return nil -} - -func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { - if !interceptor.intercepting { - return "", errors.New("Not intercepting output!") - } - - interceptor.redirectFile.Close() - output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) - os.Remove(interceptor.redirectFile.Name()) - - interceptor.intercepting = false - - if interceptor.streamTarget != nil { - interceptor.tailer.Stop() - interceptor.tailer.Cleanup() - <-interceptor.doneTailing - interceptor.streamTarget.Sync() - } - - return string(output), err -} - -func (interceptor *outputInterceptor) StreamTo(out *os.File) { - interceptor.streamTarget = out -} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go deleted file mode 100644 index 40c79033..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build windows - -package remote - -import ( - "errors" - "os" -) - -func NewOutputInterceptor() OutputInterceptor { - return &outputInterceptor{} -} - -type outputInterceptor struct { - intercepting bool -} - -func (interceptor *outputInterceptor) StartInterceptingOutput() error { - if interceptor.intercepting { - return errors.New("Already intercepting output!") - } - interceptor.intercepting = true - - // not working on windows... - - return nil -} - -func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) { - // not working on windows... - interceptor.intercepting = false - - return "", nil -} - -func (interceptor *outputInterceptor) StreamTo(*os.File) {} diff --git a/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/vendor/github.com/onsi/ginkgo/internal/remote/server.go deleted file mode 100644 index 93e9dac0..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/remote/server.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - -The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. -This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). - -*/ - -package remote - -import ( - "encoding/json" - "io/ioutil" - "net" - "net/http" - "sync" - - "github.com/onsi/ginkgo/internal/spec_iterator" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" -) - -/* -Server spins up on an automatically selected port and listens for communication from the forwarding reporter. -It then forwards that communication to attached reporters. -*/ -type Server struct { - listener net.Listener - reporters []reporters.Reporter - alives []func() bool - lock *sync.Mutex - beforeSuiteData types.RemoteBeforeSuiteData - parallelTotal int - counter int -} - -//Create a new server, automatically selecting a port -func NewServer(parallelTotal int) (*Server, error) { - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return nil, err - } - return &Server{ - listener: listener, - lock: &sync.Mutex{}, - alives: make([]func() bool, parallelTotal), - beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending}, - parallelTotal: parallelTotal, - }, nil -} - -//Start the server. You don't need to `go s.Start()`, just `s.Start()` -func (server *Server) Start() { - httpServer := &http.Server{} - mux := http.NewServeMux() - httpServer.Handler = mux - - //streaming endpoints - mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin) - mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun) - mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun) - mux.HandleFunc("/SpecWillRun", server.specWillRun) - mux.HandleFunc("/SpecDidComplete", server.specDidComplete) - mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd) - - //synchronization endpoints - mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) - mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) - mux.HandleFunc("/counter", server.handleCounter) - mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility - - go httpServer.Serve(server.listener) -} - -//Stop the server -func (server *Server) Close() { - server.listener.Close() -} - -//The address the server can be reached it. Pass this into the `ForwardingReporter`. -func (server *Server) Address() string { - return "http://" + server.listener.Addr().String() -} - -// -// Streaming Endpoints -// - -//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` -func (server *Server) readAll(request *http.Request) []byte { - defer request.Body.Close() - body, _ := ioutil.ReadAll(request.Body) - return body -} - -func (server *Server) RegisterReporters(reporters ...reporters.Reporter) { - server.reporters = reporters -} - -func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - - var data struct { - Config config.GinkgoConfigType `json:"config"` - Summary *types.SuiteSummary `json:"suite-summary"` - } - - json.Unmarshal(body, &data) - - for _, reporter := range server.reporters { - reporter.SpecSuiteWillBegin(data.Config, data.Summary) - } -} - -func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var setupSummary *types.SetupSummary - json.Unmarshal(body, &setupSummary) - - for _, reporter := range server.reporters { - reporter.BeforeSuiteDidRun(setupSummary) - } -} - -func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var setupSummary *types.SetupSummary - json.Unmarshal(body, &setupSummary) - - for _, reporter := range server.reporters { - reporter.AfterSuiteDidRun(setupSummary) - } -} - -func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var specSummary *types.SpecSummary - json.Unmarshal(body, &specSummary) - - for _, reporter := range server.reporters { - reporter.SpecWillRun(specSummary) - } -} - -func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var specSummary *types.SpecSummary - json.Unmarshal(body, &specSummary) - - for _, reporter := range server.reporters { - reporter.SpecDidComplete(specSummary) - } -} - -func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { - body := server.readAll(request) - var suiteSummary *types.SuiteSummary - json.Unmarshal(body, &suiteSummary) - - for _, reporter := range server.reporters { - reporter.SpecSuiteDidEnd(suiteSummary) - } -} - -// -// Synchronization Endpoints -// - -func (server *Server) RegisterAlive(node int, alive func() bool) { - server.lock.Lock() - defer server.lock.Unlock() - server.alives[node-1] = alive -} - -func (server *Server) nodeIsAlive(node int) bool { - server.lock.Lock() - defer server.lock.Unlock() - alive := server.alives[node-1] - if alive == nil { - return true - } - return alive() -} - -func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { - if request.Method == "POST" { - dec := json.NewDecoder(request.Body) - dec.Decode(&(server.beforeSuiteData)) - } else { - beforeSuiteData := server.beforeSuiteData - if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) { - beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared - } - enc := json.NewEncoder(writer) - enc.Encode(beforeSuiteData) - } -} - -func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) { - afterSuiteData := types.RemoteAfterSuiteData{ - CanRun: true, - } - for i := 2; i <= server.parallelTotal; i++ { - afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i) - } - - enc := json.NewEncoder(writer) - enc.Encode(afterSuiteData) -} - -func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) { - c := spec_iterator.Counter{} - server.lock.Lock() - c.Index = server.counter - server.counter++ - server.lock.Unlock() - - json.NewEncoder(writer).Encode(c) -} - -func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) { - writer.Write([]byte("")) -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/vendor/github.com/onsi/ginkgo/internal/spec/spec.go deleted file mode 100644 index 6eef40a0..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec/spec.go +++ /dev/null @@ -1,247 +0,0 @@ -package spec - -import ( - "fmt" - "io" - "time" - - "sync" - - "github.com/onsi/ginkgo/internal/containernode" - "github.com/onsi/ginkgo/internal/leafnodes" - "github.com/onsi/ginkgo/types" -) - -type Spec struct { - subject leafnodes.SubjectNode - focused bool - announceProgress bool - - containers []*containernode.ContainerNode - - state types.SpecState - runTime time.Duration - startTime time.Time - failure types.SpecFailure - previousFailures bool - - stateMutex *sync.Mutex -} - -func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec { - spec := &Spec{ - subject: subject, - containers: containers, - focused: subject.Flag() == types.FlagTypeFocused, - announceProgress: announceProgress, - stateMutex: &sync.Mutex{}, - } - - spec.processFlag(subject.Flag()) - for i := len(containers) - 1; i >= 0; i-- { - spec.processFlag(containers[i].Flag()) - } - - return spec -} - -func (spec *Spec) processFlag(flag types.FlagType) { - if flag == types.FlagTypeFocused { - spec.focused = true - } else if flag == types.FlagTypePending { - spec.setState(types.SpecStatePending) - } -} - -func (spec *Spec) Skip() { - spec.setState(types.SpecStateSkipped) -} - -func (spec *Spec) Failed() bool { - return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut -} - -func (spec *Spec) Passed() bool { - return spec.getState() == types.SpecStatePassed -} - -func (spec *Spec) Flaked() bool { - return spec.getState() == types.SpecStatePassed && spec.previousFailures -} - -func (spec *Spec) Pending() bool { - return spec.getState() == types.SpecStatePending -} - -func (spec *Spec) Skipped() bool { - return spec.getState() == types.SpecStateSkipped -} - -func (spec *Spec) Focused() bool { - return spec.focused -} - -func (spec *Spec) IsMeasurement() bool { - return spec.subject.Type() == types.SpecComponentTypeMeasure -} - -func (spec *Spec) Summary(suiteID string) *types.SpecSummary { - componentTexts := make([]string, len(spec.containers)+1) - componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1) - - for i, container := range spec.containers { - componentTexts[i] = container.Text() - componentCodeLocations[i] = container.CodeLocation() - } - - componentTexts[len(spec.containers)] = spec.subject.Text() - componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation() - - runTime := spec.runTime - if runTime == 0 && !spec.startTime.IsZero() { - runTime = time.Since(spec.startTime) - } - - return &types.SpecSummary{ - IsMeasurement: spec.IsMeasurement(), - NumberOfSamples: spec.subject.Samples(), - ComponentTexts: componentTexts, - ComponentCodeLocations: componentCodeLocations, - State: spec.getState(), - RunTime: runTime, - Failure: spec.failure, - Measurements: spec.measurementsReport(), - SuiteID: suiteID, - } -} - -func (spec *Spec) ConcatenatedString() string { - s := "" - for _, container := range spec.containers { - s += container.Text() + " " - } - - return s + spec.subject.Text() -} - -func (spec *Spec) Run(writer io.Writer) { - if spec.getState() == types.SpecStateFailed { - spec.previousFailures = true - } - - spec.startTime = time.Now() - defer func() { - spec.runTime = time.Since(spec.startTime) - }() - - for sample := 0; sample < spec.subject.Samples(); sample++ { - spec.runSample(sample, writer) - - if spec.getState() != types.SpecStatePassed { - return - } - } -} - -func (spec *Spec) getState() types.SpecState { - spec.stateMutex.Lock() - defer spec.stateMutex.Unlock() - return spec.state -} - -func (spec *Spec) setState(state types.SpecState) { - spec.stateMutex.Lock() - defer spec.stateMutex.Unlock() - spec.state = state -} - -func (spec *Spec) runSample(sample int, writer io.Writer) { - spec.setState(types.SpecStatePassed) - spec.failure = types.SpecFailure{} - innerMostContainerIndexToUnwind := -1 - - defer func() { - for i := innerMostContainerIndexToUnwind; i >= 0; i-- { - container := spec.containers[i] - for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) { - spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach) - justAfterEachState, justAfterEachFailure := justAfterEach.Run() - if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed { - spec.state = justAfterEachState - spec.failure = justAfterEachFailure - } - } - } - - for i := innerMostContainerIndexToUnwind; i >= 0; i-- { - container := spec.containers[i] - for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { - spec.announceSetupNode(writer, "AfterEach", container, afterEach) - afterEachState, afterEachFailure := afterEach.Run() - if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed { - spec.setState(afterEachState) - spec.failure = afterEachFailure - } - } - } - }() - - for i, container := range spec.containers { - innerMostContainerIndexToUnwind = i - for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { - spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) - s, f := beforeEach.Run() - spec.failure = f - spec.setState(s) - if spec.getState() != types.SpecStatePassed { - return - } - } - } - - for _, container := range spec.containers { - for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { - spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) - s, f := justBeforeEach.Run() - spec.failure = f - spec.setState(s) - if spec.getState() != types.SpecStatePassed { - return - } - } - } - - spec.announceSubject(writer, spec.subject) - s, f := spec.subject.Run() - spec.failure = f - spec.setState(s) -} - -func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { - if spec.announceProgress { - s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String()) - writer.Write([]byte(s)) - } -} - -func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) { - if spec.announceProgress { - nodeType := "" - switch subject.Type() { - case types.SpecComponentTypeIt: - nodeType = "It" - case types.SpecComponentTypeMeasure: - nodeType = "Measure" - } - s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String()) - writer.Write([]byte(s)) - } -} - -func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement { - if !spec.IsMeasurement() || spec.Failed() { - return map[string]*types.SpecMeasurement{} - } - - return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport() -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/vendor/github.com/onsi/ginkgo/internal/spec/specs.go deleted file mode 100644 index 0a24139f..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ /dev/null @@ -1,144 +0,0 @@ -package spec - -import ( - "math/rand" - "regexp" - "sort" - "strings" -) - -type Specs struct { - specs []*Spec - names []string - - hasProgrammaticFocus bool - RegexScansFilePath bool -} - -func NewSpecs(specs []*Spec) *Specs { - names := make([]string, len(specs)) - for i, spec := range specs { - names[i] = spec.ConcatenatedString() - } - return &Specs{ - specs: specs, - names: names, - } -} - -func (e *Specs) Specs() []*Spec { - return e.specs -} - -func (e *Specs) HasProgrammaticFocus() bool { - return e.hasProgrammaticFocus -} - -func (e *Specs) Shuffle(r *rand.Rand) { - sort.Sort(e) - permutation := r.Perm(len(e.specs)) - shuffledSpecs := make([]*Spec, len(e.specs)) - names := make([]string, len(e.specs)) - for i, j := range permutation { - shuffledSpecs[i] = e.specs[j] - names[i] = e.names[j] - } - e.specs = shuffledSpecs - e.names = names -} - -func (e *Specs) ApplyFocus(description string, focus, skip []string) { - if len(focus)+len(skip) == 0 { - e.applyProgrammaticFocus() - } else { - e.applyRegExpFocusAndSkip(description, focus, skip) - } -} - -func (e *Specs) applyProgrammaticFocus() { - e.hasProgrammaticFocus = false - for _, spec := range e.specs { - if spec.Focused() && !spec.Pending() { - e.hasProgrammaticFocus = true - break - } - } - - if e.hasProgrammaticFocus { - for _, spec := range e.specs { - if !spec.Focused() { - spec.Skip() - } - } - } -} - -// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function, -// this is the place which we append to. -func (e *Specs) toMatch(description string, i int) []byte { - if i > len(e.names) { - return nil - } - if e.RegexScansFilePath { - return []byte( - description + " " + - e.names[i] + " " + - e.specs[i].subject.CodeLocation().FileName) - } else { - return []byte( - description + " " + - e.names[i]) - } -} - -func (e *Specs) applyRegExpFocusAndSkip(description string, focus, skip []string) { - var focusFilter, skipFilter *regexp.Regexp - if len(focus) > 0 { - focusFilter = regexp.MustCompile(strings.Join(focus, "|")) - } - if len(skip) > 0 { - skipFilter = regexp.MustCompile(strings.Join(skip, "|")) - } - - for i, spec := range e.specs { - matchesFocus := true - matchesSkip := false - - toMatch := e.toMatch(description, i) - - if focusFilter != nil { - matchesFocus = focusFilter.Match(toMatch) - } - - if skipFilter != nil { - matchesSkip = skipFilter.Match(toMatch) - } - - if !matchesFocus || matchesSkip { - spec.Skip() - } - } -} - -func (e *Specs) SkipMeasurements() { - for _, spec := range e.specs { - if spec.IsMeasurement() { - spec.Skip() - } - } -} - -//sort.Interface - -func (e *Specs) Len() int { - return len(e.specs) -} - -func (e *Specs) Less(i, j int) bool { - return e.names[i] < e.names[j] -} - -func (e *Specs) Swap(i, j int) { - e.names[i], e.names[j] = e.names[j], e.names[i] - e.specs[i], e.specs[j] = e.specs[j], e.specs[i] -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go deleted file mode 100644 index 82272554..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go +++ /dev/null @@ -1,55 +0,0 @@ -package spec_iterator - -func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { - if length == 0 { - return 0, 0 - } - - // We have more nodes than tests. Trivial case. - if parallelTotal >= length { - if parallelNode > length { - return 0, 0 - } else { - return parallelNode - 1, 1 - } - } - - // This is the minimum amount of tests that a node will be required to run - minTestsPerNode := length / parallelTotal - - // This is the maximum amount of tests that a node will be required to run - // The algorithm guarantees that this would be equal to at least the minimum amount - // and at most one more - maxTestsPerNode := minTestsPerNode - if length%parallelTotal != 0 { - maxTestsPerNode++ - } - - // Number of nodes that will have to run the maximum amount of tests per node - numMaxLoadNodes := length % parallelTotal - - // Number of nodes that precede the current node and will have to run the maximum amount of tests per node - var numPrecedingMaxLoadNodes int - if parallelNode > numMaxLoadNodes { - numPrecedingMaxLoadNodes = numMaxLoadNodes - } else { - numPrecedingMaxLoadNodes = parallelNode - 1 - } - - // Number of nodes that precede the current node and will have to run the minimum amount of tests per node - var numPrecedingMinLoadNodes int - if parallelNode <= numMaxLoadNodes { - numPrecedingMinLoadNodes = 0 - } else { - numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1 - } - - // Evaluate the test start index and number of tests to run - startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode - if parallelNode > numMaxLoadNodes { - count = minTestsPerNode - } else { - count = maxTestsPerNode - } - return -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go deleted file mode 100644 index 99f548bc..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go +++ /dev/null @@ -1,59 +0,0 @@ -package spec_iterator - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/onsi/ginkgo/internal/spec" -) - -type ParallelIterator struct { - specs []*spec.Spec - host string - client *http.Client -} - -func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { - return &ParallelIterator{ - specs: specs, - host: host, - client: &http.Client{}, - } -} - -func (s *ParallelIterator) Next() (*spec.Spec, error) { - resp, err := s.client.Get(s.host + "/counter") - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) - } - - var counter Counter - err = json.NewDecoder(resp.Body).Decode(&counter) - if err != nil { - return nil, err - } - - if counter.Index >= len(s.specs) { - return nil, ErrClosed - } - - return s.specs[counter.Index], nil -} - -func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { - return len(s.specs) -} - -func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { - return -1, false -} - -func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { - return -1, false -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go deleted file mode 100644 index a51c93b8..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go +++ /dev/null @@ -1,45 +0,0 @@ -package spec_iterator - -import ( - "github.com/onsi/ginkgo/internal/spec" -) - -type SerialIterator struct { - specs []*spec.Spec - index int -} - -func NewSerialIterator(specs []*spec.Spec) *SerialIterator { - return &SerialIterator{ - specs: specs, - index: 0, - } -} - -func (s *SerialIterator) Next() (*spec.Spec, error) { - if s.index >= len(s.specs) { - return nil, ErrClosed - } - - spec := s.specs[s.index] - s.index += 1 - return spec, nil -} - -func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { - return len(s.specs) -} - -func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { - return len(s.specs), true -} - -func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { - count := 0 - for _, s := range s.specs { - if !s.Skipped() && !s.Pending() { - count += 1 - } - } - return count, true -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go deleted file mode 100644 index ad4a3ea3..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go +++ /dev/null @@ -1,47 +0,0 @@ -package spec_iterator - -import "github.com/onsi/ginkgo/internal/spec" - -type ShardedParallelIterator struct { - specs []*spec.Spec - index int - maxIndex int -} - -func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { - startIndex, count := ParallelizedIndexRange(len(specs), total, node) - - return &ShardedParallelIterator{ - specs: specs, - index: startIndex, - maxIndex: startIndex + count, - } -} - -func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { - if s.index >= s.maxIndex { - return nil, ErrClosed - } - - spec := s.specs[s.index] - s.index += 1 - return spec, nil -} - -func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { - return len(s.specs) -} - -func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { - return s.maxIndex - s.index, true -} - -func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { - count := 0 - for i := s.index; i < s.maxIndex; i += 1 { - if !s.specs[i].Skipped() && !s.specs[i].Pending() { - count += 1 - } - } - return count, true -} diff --git a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go deleted file mode 100644 index 74bffad6..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go +++ /dev/null @@ -1,20 +0,0 @@ -package spec_iterator - -import ( - "errors" - - "github.com/onsi/ginkgo/internal/spec" -) - -var ErrClosed = errors.New("no more specs to run") - -type SpecIterator interface { - Next() (*spec.Spec, error) - NumberOfSpecsPriorToIteration() int - NumberOfSpecsToProcessIfKnown() (int, bool) - NumberOfSpecsThatWillBeRunIfKnown() (int, bool) -} - -type Counter struct { - Index int `json:"index"` -} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go deleted file mode 100644 index 6739c3f6..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go +++ /dev/null @@ -1,36 +0,0 @@ -package writer - -type FakeGinkgoWriter struct { - EventStream []string -} - -func NewFake() *FakeGinkgoWriter { - return &FakeGinkgoWriter{ - EventStream: []string{}, - } -} - -func (writer *FakeGinkgoWriter) AddEvent(event string) { - writer.EventStream = append(writer.EventStream, event) -} - -func (writer *FakeGinkgoWriter) Truncate() { - writer.EventStream = append(writer.EventStream, "TRUNCATE") -} - -func (writer *FakeGinkgoWriter) DumpOut() { - writer.EventStream = append(writer.EventStream, "DUMP") -} - -func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { - writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) -} - -func (writer *FakeGinkgoWriter) Bytes() []byte { - writer.EventStream = append(writer.EventStream, "BYTES") - return nil -} - -func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { - return 0, nil -} diff --git a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/vendor/github.com/onsi/ginkgo/internal/writer/writer.go deleted file mode 100644 index 98eca3bd..00000000 --- a/vendor/github.com/onsi/ginkgo/internal/writer/writer.go +++ /dev/null @@ -1,89 +0,0 @@ -package writer - -import ( - "bytes" - "io" - "sync" -) - -type WriterInterface interface { - io.Writer - - Truncate() - DumpOut() - DumpOutWithHeader(header string) - Bytes() []byte -} - -type Writer struct { - buffer *bytes.Buffer - outWriter io.Writer - lock *sync.Mutex - stream bool - redirector io.Writer -} - -func New(outWriter io.Writer) *Writer { - return &Writer{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - outWriter: outWriter, - stream: true, - } -} - -func (w *Writer) AndRedirectTo(writer io.Writer) { - w.redirector = writer -} - -func (w *Writer) SetStream(stream bool) { - w.lock.Lock() - defer w.lock.Unlock() - w.stream = stream -} - -func (w *Writer) Write(b []byte) (n int, err error) { - w.lock.Lock() - defer w.lock.Unlock() - - n, err = w.buffer.Write(b) - if w.redirector != nil { - w.redirector.Write(b) - } - if w.stream { - return w.outWriter.Write(b) - } - return n, err -} - -func (w *Writer) Truncate() { - w.lock.Lock() - defer w.lock.Unlock() - w.buffer.Reset() -} - -func (w *Writer) DumpOut() { - w.lock.Lock() - defer w.lock.Unlock() - if !w.stream { - w.buffer.WriteTo(w.outWriter) - } -} - -func (w *Writer) Bytes() []byte { - w.lock.Lock() - defer w.lock.Unlock() - b := w.buffer.Bytes() - copied := make([]byte, len(b)) - copy(copied, b) - return copied -} - -func (w *Writer) DumpOutWithHeader(header string) { - w.lock.Lock() - defer w.lock.Unlock() - if !w.stream && w.buffer.Len() > 0 { - w.outWriter.Write([]byte(header)) - w.buffer.WriteTo(w.outWriter) - } -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go deleted file mode 100644 index f0c9f614..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Ginkgo's Default Reporter - -A number of command line flags are available to tweak Ginkgo's default output. - -These are documented [here](http://onsi.github.io/ginkgo/#running_tests) -*/ -package reporters - -import ( - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters/stenographer" - "github.com/onsi/ginkgo/types" -) - -type DefaultReporter struct { - config config.DefaultReporterConfigType - stenographer stenographer.Stenographer - specSummaries []*types.SpecSummary -} - -func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter { - return &DefaultReporter{ - config: config, - stenographer: stenographer, - } -} - -func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) - if config.ParallelTotal > 1 { - reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct) - } else { - reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) - } -} - -func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) - } -} - -func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace) - } -} - -func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) { - if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped { - reporter.stenographer.AnnounceSpecWillRun(specSummary) - } -} - -func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) { - switch specSummary.State { - case types.SpecStatePassed: - if specSummary.IsMeasurement { - reporter.stenographer.AnnounceSuccessfulMeasurement(specSummary, reporter.config.Succinct) - } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold { - reporter.stenographer.AnnounceSuccessfulSlowSpec(specSummary, reporter.config.Succinct) - } else { - reporter.stenographer.AnnounceSuccessfulSpec(specSummary) - if reporter.config.ReportPassed { - reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput) - } - } - case types.SpecStatePending: - reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct) - case types.SpecStateSkipped: - reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace) - case types.SpecStateTimedOut: - reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - case types.SpecStatePanicked: - reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - case types.SpecStateFailed: - reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace) - } - - reporter.specSummaries = append(reporter.specSummaries, specSummary) -} - -func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.stenographer.SummarizeFailures(reporter.specSummaries) - reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct) -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go deleted file mode 100644 index 27db4794..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go +++ /dev/null @@ -1,59 +0,0 @@ -package reporters - -import ( - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -//FakeReporter is useful for testing purposes -type FakeReporter struct { - Config config.GinkgoConfigType - - BeginSummary *types.SuiteSummary - BeforeSuiteSummary *types.SetupSummary - SpecWillRunSummaries []*types.SpecSummary - SpecSummaries []*types.SpecSummary - AfterSuiteSummary *types.SetupSummary - EndSummary *types.SuiteSummary - - SpecWillRunStub func(specSummary *types.SpecSummary) - SpecDidCompleteStub func(specSummary *types.SpecSummary) -} - -func NewFakeReporter() *FakeReporter { - return &FakeReporter{ - SpecWillRunSummaries: make([]*types.SpecSummary, 0), - SpecSummaries: make([]*types.SpecSummary, 0), - } -} - -func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - fakeR.Config = config - fakeR.BeginSummary = summary -} - -func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - fakeR.BeforeSuiteSummary = setupSummary -} - -func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) { - if fakeR.SpecWillRunStub != nil { - fakeR.SpecWillRunStub(specSummary) - } - fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary) -} - -func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) { - if fakeR.SpecDidCompleteStub != nil { - fakeR.SpecDidCompleteStub(specSummary) - } - fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary) -} - -func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - fakeR.AfterSuiteSummary = setupSummary -} - -func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - fakeR.EndSummary = summary -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go deleted file mode 100644 index 01ddca6e..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - -JUnit XML Reporter for Ginkgo - -For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output - -*/ - -package reporters - -import ( - "encoding/xml" - "fmt" - "math" - "os" - "path/filepath" - "strings" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -type JUnitTestSuite struct { - XMLName xml.Name `xml:"testsuite"` - TestCases []JUnitTestCase `xml:"testcase"` - Name string `xml:"name,attr"` - Tests int `xml:"tests,attr"` - Failures int `xml:"failures,attr"` - Errors int `xml:"errors,attr"` - Time float64 `xml:"time,attr"` -} - -type JUnitTestCase struct { - Name string `xml:"name,attr"` - ClassName string `xml:"classname,attr"` - FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` - Skipped *JUnitSkipped `xml:"skipped,omitempty"` - Time float64 `xml:"time,attr"` - SystemOut string `xml:"system-out,omitempty"` -} - -type JUnitFailureMessage struct { - Type string `xml:"type,attr"` - Message string `xml:",chardata"` -} - -type JUnitSkipped struct { - Message string `xml:",chardata"` -} - -type JUnitReporter struct { - suite JUnitTestSuite - filename string - testSuiteName string - ReporterConfig config.DefaultReporterConfigType -} - -//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename. -func NewJUnitReporter(filename string) *JUnitReporter { - return &JUnitReporter{ - filename: filename, - } -} - -func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.suite = JUnitTestSuite{ - Name: summary.SuiteDescription, - TestCases: []JUnitTestCase{}, - } - reporter.testSuiteName = summary.SuiteDescription - reporter.ReporterConfig = config.DefaultReporterConfig -} - -func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) { -} - -func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("BeforeSuite", setupSummary) -} - -func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("AfterSuite", setupSummary) -} - -func failureMessage(failure types.SpecFailure) string { - return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String()) -} - -func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - testCase := JUnitTestCase{ - Name: name, - ClassName: reporter.testSuiteName, - } - - testCase.FailureMessage = &JUnitFailureMessage{ - Type: reporter.failureTypeForState(setupSummary.State), - Message: failureMessage(setupSummary.Failure), - } - testCase.SystemOut = setupSummary.CapturedOutput - testCase.Time = setupSummary.RunTime.Seconds() - reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) - } -} - -func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { - testCase := JUnitTestCase{ - Name: strings.Join(specSummary.ComponentTexts[1:], " "), - ClassName: reporter.testSuiteName, - } - if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { - testCase.SystemOut = specSummary.CapturedOutput - } - if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { - testCase.FailureMessage = &JUnitFailureMessage{ - Type: reporter.failureTypeForState(specSummary.State), - Message: failureMessage(specSummary.Failure), - } - if specSummary.State == types.SpecStatePanicked { - testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s", - specSummary.Failure.ForwardedPanic, - specSummary.Failure.Location.FullStackTrace) - } - testCase.SystemOut = specSummary.CapturedOutput - } - if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { - testCase.Skipped = &JUnitSkipped{} - if specSummary.Failure.Message != "" { - testCase.Skipped.Message = failureMessage(specSummary.Failure) - } - } - testCase.Time = specSummary.RunTime.Seconds() - reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) -} - -func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun - reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000 - reporter.suite.Failures = summary.NumberOfFailedSpecs - reporter.suite.Errors = 0 - if reporter.ReporterConfig.ReportFile != "" { - reporter.filename = reporter.ReporterConfig.ReportFile - fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename) - } - filePath, _ := filepath.Abs(reporter.filename) - dirPath := filepath.Dir(filePath) - err := os.MkdirAll(dirPath, os.ModePerm) - if err != nil { - fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error()) - } - file, err := os.Create(filePath) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error()) - } - defer file.Close() - file.WriteString(xml.Header) - encoder := xml.NewEncoder(file) - encoder.Indent(" ", " ") - err = encoder.Encode(reporter.suite) - if err == nil { - fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath) - } else { - fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error()) - } -} - -func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string { - switch state { - case types.SpecStateFailed: - return "Failure" - case types.SpecStateTimedOut: - return "Timeout" - case types.SpecStatePanicked: - return "Panic" - default: - return "" - } -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/reporters/reporter.go deleted file mode 100644 index 348b9dfc..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/reporter.go +++ /dev/null @@ -1,15 +0,0 @@ -package reporters - -import ( - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -type Reporter interface { - SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) - BeforeSuiteDidRun(setupSummary *types.SetupSummary) - SpecWillRun(specSummary *types.SpecSummary) - SpecDidComplete(specSummary *types.SpecSummary) - AfterSuiteDidRun(setupSummary *types.SetupSummary) - SpecSuiteDidEnd(summary *types.SuiteSummary) -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go deleted file mode 100644 index 45b8f886..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go +++ /dev/null @@ -1,64 +0,0 @@ -package stenographer - -import ( - "fmt" - "strings" -) - -func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string { - var out string - - if len(args) > 0 { - out = fmt.Sprintf(format, args...) - } else { - out = format - } - - if s.color { - return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle) - } else { - return out - } -} - -func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { - fmt.Fprintln(s.w, text) - fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text))) -} - -func (s *consoleStenographer) printNewLine() { - fmt.Fprintln(s.w, "") -} - -func (s *consoleStenographer) printDelimiter() { - fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30))) -} - -func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { - fmt.Fprint(s.w, s.indent(indentation, format, args...)) -} - -func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { - fmt.Fprintln(s.w, s.indent(indentation, format, args...)) -} - -func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { - var text string - - if len(args) > 0 { - text = fmt.Sprintf(format, args...) - } else { - text = format - } - - stringArray := strings.Split(text, "\n") - padding := "" - if indentation >= 0 { - padding = strings.Repeat(" ", indentation) - } - for i, s := range stringArray { - stringArray[i] = fmt.Sprintf("%s%s", padding, s) - } - - return strings.Join(stringArray, "\n") -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go deleted file mode 100644 index 1aa5b9db..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go +++ /dev/null @@ -1,142 +0,0 @@ -package stenographer - -import ( - "sync" - - "github.com/onsi/ginkgo/types" -) - -func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall { - return FakeStenographerCall{ - Method: method, - Args: args, - } -} - -type FakeStenographer struct { - calls []FakeStenographerCall - lock *sync.Mutex -} - -type FakeStenographerCall struct { - Method string - Args []interface{} -} - -func NewFakeStenographer() *FakeStenographer { - stenographer := &FakeStenographer{ - lock: &sync.Mutex{}, - } - stenographer.Reset() - return stenographer -} - -func (stenographer *FakeStenographer) Calls() []FakeStenographerCall { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - return stenographer.calls -} - -func (stenographer *FakeStenographer) Reset() { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - stenographer.calls = make([]FakeStenographerCall, 0) -} - -func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - results := make([]FakeStenographerCall, 0) - for _, call := range stenographer.calls { - if call.Method == method { - results = append(results, call) - } - } - - return results -} - -func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) { - stenographer.lock.Lock() - defer stenographer.lock.Unlock() - - stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...)) -} - -func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { - stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct) -} - -func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { - stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) -} - -func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { - stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct) -} - -func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { - stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) -} - -func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { - stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { - stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSpecWillRun", spec) -} - -func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace) -} -func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) { - stenographer.registerCall("AnnounceCapturedOutput", output) -} - -func (stenographer *FakeStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) { - stenographer.registerCall("AnnounceSuccessfulSpec", spec) -} - -func (stenographer *FakeStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccessfulSlowSpec", spec, succinct) -} - -func (stenographer *FakeStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) { - stenographer.registerCall("AnnounceSuccessfulMeasurement", spec, succinct) -} - -func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { - stenographer.registerCall("AnnouncePendingSpec", spec, noisy) -} - -func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { - stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace) -} - -func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) { - stenographer.registerCall("SummarizeFailures", summaries) -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go deleted file mode 100644 index 638d6fbb..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go +++ /dev/null @@ -1,572 +0,0 @@ -/* -The stenographer is used by Ginkgo's reporters to generate output. - -Move along, nothing to see here. -*/ - -package stenographer - -import ( - "fmt" - "io" - "runtime" - "strings" - - "github.com/onsi/ginkgo/types" -) - -const defaultStyle = "\x1b[0m" -const boldStyle = "\x1b[1m" -const redColor = "\x1b[91m" -const greenColor = "\x1b[32m" -const yellowColor = "\x1b[33m" -const cyanColor = "\x1b[36m" -const grayColor = "\x1b[90m" -const lightGrayColor = "\x1b[37m" - -type cursorStateType int - -const ( - cursorStateTop cursorStateType = iota - cursorStateStreaming - cursorStateMidBlock - cursorStateEndBlock -) - -type Stenographer interface { - AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) - AnnounceAggregatedParallelRun(nodes int, succinct bool) - AnnounceParallelRun(node int, nodes int, succinct bool) - AnnounceTotalNumberOfSpecs(total int, succinct bool) - AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) - AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) - - AnnounceSpecWillRun(spec *types.SpecSummary) - AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) - AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) - - AnnounceCapturedOutput(output string) - - AnnounceSuccessfulSpec(spec *types.SpecSummary) - AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) - AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) - - AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) - AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) - - AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) - AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) - AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) - - SummarizeFailures(summaries []*types.SpecSummary) -} - -func New(color bool, enableFlakes bool, writer io.Writer) Stenographer { - denoter := "•" - if runtime.GOOS == "windows" { - denoter = "+" - } - return &consoleStenographer{ - color: color, - denoter: denoter, - cursorState: cursorStateTop, - enableFlakes: enableFlakes, - w: writer, - } -} - -type consoleStenographer struct { - color bool - denoter string - cursorState cursorStateType - enableFlakes bool - w io.Writer -} - -var alternatingColors = []string{defaultStyle, grayColor} - -func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) { - if succinct { - s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description)) - return - } - s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=") - s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed)) - if randomizingAll { - s.print(0, " - Will randomize all specs") - } - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { - if succinct { - s.print(0, "- node #%d ", node) - return - } - s.println(0, - "Parallel test node %s/%s.", - s.colorize(boldStyle, "%d", node), - s.colorize(boldStyle, "%d", nodes), - ) - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) { - if succinct { - s.print(0, "- %d nodes ", nodes) - return - } - s.println(0, - "Running in parallel across %s nodes", - s.colorize(boldStyle, "%d", nodes), - ) - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { - if succinct { - s.print(0, "- %d/%d specs ", specsToRun, total) - s.stream() - return - } - s.println(0, - "Will run %s of %s specs", - s.colorize(boldStyle, "%d", specsToRun), - s.colorize(boldStyle, "%d", total), - ) - - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { - if succinct { - s.print(0, "- %d specs ", total) - s.stream() - return - } - s.println(0, - "Will run %s specs", - s.colorize(boldStyle, "%d", total), - ) - - s.printNewLine() -} - -func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { - if succinct && summary.SuiteSucceeded { - s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) - return - } - s.printNewLine() - color := greenColor - if !summary.SuiteSucceeded { - color = redColor - } - s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds())) - - status := "" - if summary.SuiteSucceeded { - status = s.colorize(boldStyle+greenColor, "SUCCESS!") - } else { - status = s.colorize(boldStyle+redColor, "FAIL!") - } - - flakes := "" - if s.enableFlakes { - flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs) - } - - s.print(0, - "%s -- %s | %s | %s | %s\n", - status, - s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs), - s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes, - s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs), - s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs), - ) -} - -func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) { - s.startBlock() - for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] { - s.print(0, s.colorize(alternatingColors[i%2], text)+" ") - } - - indentation := 0 - if len(spec.ComponentTexts) > 2 { - indentation = 1 - s.printNewLine() - } - index := len(spec.ComponentTexts) - 1 - s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index])) - s.printNewLine() - s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String())) - s.printNewLine() - s.midBlock() -} - -func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace) -} - -func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) { - s.startBlock() - var message string - switch summary.State { - case types.SpecStateFailed: - message = "Failure" - case types.SpecStatePanicked: - message = "Panic" - case types.SpecStateTimedOut: - message = "Timeout" - } - - s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) - - s.printNewLine() - s.printFailure(indentation, summary.State, summary.Failure, fullTrace) - - s.endBlock() -} - -func (s *consoleStenographer) AnnounceCapturedOutput(output string) { - if output == "" { - return - } - - s.startBlock() - s.println(0, output) - s.midBlock() -} - -func (s *consoleStenographer) AnnounceSuccessfulSpec(spec *types.SpecSummary) { - s.print(0, s.colorize(greenColor, s.denoter)) - s.stream() -} - -func (s *consoleStenographer) AnnounceSuccessfulSlowSpec(spec *types.SpecSummary, succinct bool) { - s.printBlockWithMessage( - s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), - "", - spec, - succinct, - ) -} - -func (s *consoleStenographer) AnnounceSuccessfulMeasurement(spec *types.SpecSummary, succinct bool) { - s.printBlockWithMessage( - s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), - s.measurementReport(spec, succinct), - spec, - succinct, - ) -} - -func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) { - if noisy { - s.printBlockWithMessage( - s.colorize(yellowColor, "P [PENDING]"), - "", - spec, - false, - ) - } else { - s.print(0, s.colorize(yellowColor, "P")) - s.stream() - } -} - -func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) { - // Skips at runtime will have a non-empty spec.Failure. All others should be succinct. - if succinct || spec.Failure == (types.SpecFailure{}) { - s.print(0, s.colorize(cyanColor, "S")) - s.stream() - } else { - s.startBlock() - s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) - - s.printNewLine() - s.printSkip(indentation, spec.Failure) - s.endBlock() - } -} - -func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) -} - -func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) -} - -func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { - failingSpecs := []*types.SpecSummary{} - - for _, summary := range summaries { - if summary.HasFailureState() { - failingSpecs = append(failingSpecs, summary) - } - } - - if len(failingSpecs) == 0 { - return - } - - s.printNewLine() - s.printNewLine() - plural := "s" - if len(failingSpecs) == 1 { - plural = "" - } - s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural)) - for _, summary := range failingSpecs { - s.printNewLine() - if summary.HasFailureState() { - if summary.TimedOut() { - s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] ")) - } else if summary.Panicked() { - s.print(0, s.colorize(redColor+boldStyle, "[Panic!] ")) - } else if summary.Failed() { - s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) - } - s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) - s.printNewLine() - s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) - } - } -} - -func (s *consoleStenographer) startBlock() { - if s.cursorState == cursorStateStreaming { - s.printNewLine() - s.printDelimiter() - } else if s.cursorState == cursorStateMidBlock { - s.printNewLine() - } -} - -func (s *consoleStenographer) midBlock() { - s.cursorState = cursorStateMidBlock -} - -func (s *consoleStenographer) endBlock() { - s.printDelimiter() - s.cursorState = cursorStateEndBlock -} - -func (s *consoleStenographer) stream() { - s.cursorState = cursorStateStreaming -} - -func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) { - s.startBlock() - s.println(0, header) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) - - if message != "" { - s.printNewLine() - s.println(indentation, message) - } - - s.endBlock() -} - -func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.startBlock() - s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) - - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) - - s.printNewLine() - s.printFailure(indentation, spec.State, spec.Failure, fullTrace) - s.endBlock() -} - -func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string { - switch failedComponentType { - case types.SpecComponentTypeBeforeSuite: - return " in Suite Setup (BeforeSuite)" - case types.SpecComponentTypeAfterSuite: - return " in Suite Teardown (AfterSuite)" - case types.SpecComponentTypeBeforeEach: - return " in Spec Setup (BeforeEach)" - case types.SpecComponentTypeJustBeforeEach: - return " in Spec Setup (JustBeforeEach)" - case types.SpecComponentTypeAfterEach: - return " in Spec Teardown (AfterEach)" - } - - return "" -} - -func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { - s.println(indentation, s.colorize(cyanColor, spec.Message)) - s.printNewLine() - s.println(indentation, spec.Location.String()) -} - -func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { - if state == types.SpecStatePanicked { - s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) - s.println(indentation, s.colorize(redColor, failure.ForwardedPanic)) - s.println(indentation, failure.Location.String()) - s.printNewLine() - s.println(indentation, s.colorize(redColor, "Full Stack Trace")) - s.println(indentation, failure.Location.FullStackTrace) - } else { - s.println(indentation, s.colorize(redColor, failure.Message)) - s.printNewLine() - s.println(indentation, failure.Location.String()) - if fullTrace { - s.printNewLine() - s.println(indentation, s.colorize(redColor, "Full Stack Trace")) - s.println(indentation, failure.Location.FullStackTrace) - } - } -} - -func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { - startIndex := 1 - indentation := 0 - - if len(componentTexts) == 1 { - startIndex = 0 - } - - for i := startIndex; i < len(componentTexts); i++ { - if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { - color := redColor - if state == types.SpecStateSkipped { - color = cyanColor - } - blockType := "" - switch failedComponentType { - case types.SpecComponentTypeBeforeSuite: - blockType = "BeforeSuite" - case types.SpecComponentTypeAfterSuite: - blockType = "AfterSuite" - case types.SpecComponentTypeBeforeEach: - blockType = "BeforeEach" - case types.SpecComponentTypeJustBeforeEach: - blockType = "JustBeforeEach" - case types.SpecComponentTypeAfterEach: - blockType = "AfterEach" - case types.SpecComponentTypeIt: - blockType = "It" - case types.SpecComponentTypeMeasure: - blockType = "Measurement" - } - if succinct { - s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) - } else { - s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) - s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) - } - } else { - if succinct { - s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i])) - } else { - s.println(indentation, componentTexts[i]) - s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) - } - } - indentation++ - } - - return indentation -} - -func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { - indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) - - if succinct { - if len(componentTexts) > 0 { - s.printNewLine() - s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1])) - } - s.printNewLine() - indentation = 1 - } else { - indentation-- - } - - return indentation -} - -func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string { - orderedKeys := make([]string, len(measurements)) - for key, measurement := range measurements { - orderedKeys[measurement.Order] = key - } - return orderedKeys -} - -func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string { - if len(spec.Measurements) == 0 { - return "Found no measurements" - } - - message := []string{} - orderedKeys := s.orderedMeasurementKeys(spec.Measurements) - - if succinct { - message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) - for _, key := range orderedKeys { - measurement := spec.Measurements[key] - message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s", - s.colorize(boldStyle, "%s", measurement.Name), - measurement.SmallestLabel, - s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), - measurement.Units, - measurement.AverageLabel, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), - measurement.Units, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), - measurement.Units, - measurement.LargestLabel, - s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), - measurement.Units, - )) - } - } else { - message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples))) - for _, key := range orderedKeys { - measurement := spec.Measurements[key] - info := "" - if measurement.Info != nil { - message = append(message, fmt.Sprintf("%v", measurement.Info)) - } - - message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s", - s.colorize(boldStyle, "%s", measurement.Name), - info, - measurement.SmallestLabel, - s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest), - measurement.Units, - measurement.LargestLabel, - s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest), - measurement.Units, - measurement.AverageLabel, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average), - measurement.Units, - s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation), - measurement.Units, - )) - } - } - - return strings.Join(message, "\n") -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE deleted file mode 100644 index 91b5cef3..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md deleted file mode 100644 index e84226a7..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# go-colorable - -Colorable writer for windows. - -For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) -This package is possible to handle escape sequence for ansi color on windows. - -## Too Bad! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) - - -## So Good! - -![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) - -## Usage - -```go -logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) -logrus.SetOutput(colorable.NewColorableStdout()) - -logrus.Info("succeeded") -logrus.Warn("not correct") -logrus.Error("something error") -logrus.Fatal("panic") -``` - -You can compile above code on non-windows OSs. - -## Installation - -``` -$ go get github.com/mattn/go-colorable -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go deleted file mode 100644 index 52d6653b..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -package colorable - -import ( - "io" - "os" -) - -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - return file -} - -func NewColorableStdout() io.Writer { - return os.Stdout -} - -func NewColorableStderr() io.Writer { - return os.Stderr -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go deleted file mode 100644 index 10880092..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go +++ /dev/null @@ -1,783 +0,0 @@ -package colorable - -import ( - "bytes" - "fmt" - "io" - "math" - "os" - "strconv" - "strings" - "syscall" - "unsafe" - - "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty" -) - -const ( - foregroundBlue = 0x1 - foregroundGreen = 0x2 - foregroundRed = 0x4 - foregroundIntensity = 0x8 - foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) - backgroundBlue = 0x10 - backgroundGreen = 0x20 - backgroundRed = 0x40 - backgroundIntensity = 0x80 - backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) -) - -type wchar uint16 -type short int16 -type dword uint32 -type word uint16 - -type coord struct { - x short - y short -} - -type smallRect struct { - left short - top short - right short - bottom short -} - -type consoleScreenBufferInfo struct { - size coord - cursorPosition coord - attributes word - window smallRect - maximumWindowSize coord -} - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") - procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") - procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") - procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") -) - -type Writer struct { - out io.Writer - handle syscall.Handle - lastbuf bytes.Buffer - oldattr word -} - -func NewColorable(file *os.File) io.Writer { - if file == nil { - panic("nil passed instead of *os.File to NewColorable()") - } - - if isatty.IsTerminal(file.Fd()) { - var csbi consoleScreenBufferInfo - handle := syscall.Handle(file.Fd()) - procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) - return &Writer{out: file, handle: handle, oldattr: csbi.attributes} - } else { - return file - } -} - -func NewColorableStdout() io.Writer { - return NewColorable(os.Stdout) -} - -func NewColorableStderr() io.Writer { - return NewColorable(os.Stderr) -} - -var color256 = map[int]int{ - 0: 0x000000, - 1: 0x800000, - 2: 0x008000, - 3: 0x808000, - 4: 0x000080, - 5: 0x800080, - 6: 0x008080, - 7: 0xc0c0c0, - 8: 0x808080, - 9: 0xff0000, - 10: 0x00ff00, - 11: 0xffff00, - 12: 0x0000ff, - 13: 0xff00ff, - 14: 0x00ffff, - 15: 0xffffff, - 16: 0x000000, - 17: 0x00005f, - 18: 0x000087, - 19: 0x0000af, - 20: 0x0000d7, - 21: 0x0000ff, - 22: 0x005f00, - 23: 0x005f5f, - 24: 0x005f87, - 25: 0x005faf, - 26: 0x005fd7, - 27: 0x005fff, - 28: 0x008700, - 29: 0x00875f, - 30: 0x008787, - 31: 0x0087af, - 32: 0x0087d7, - 33: 0x0087ff, - 34: 0x00af00, - 35: 0x00af5f, - 36: 0x00af87, - 37: 0x00afaf, - 38: 0x00afd7, - 39: 0x00afff, - 40: 0x00d700, - 41: 0x00d75f, - 42: 0x00d787, - 43: 0x00d7af, - 44: 0x00d7d7, - 45: 0x00d7ff, - 46: 0x00ff00, - 47: 0x00ff5f, - 48: 0x00ff87, - 49: 0x00ffaf, - 50: 0x00ffd7, - 51: 0x00ffff, - 52: 0x5f0000, - 53: 0x5f005f, - 54: 0x5f0087, - 55: 0x5f00af, - 56: 0x5f00d7, - 57: 0x5f00ff, - 58: 0x5f5f00, - 59: 0x5f5f5f, - 60: 0x5f5f87, - 61: 0x5f5faf, - 62: 0x5f5fd7, - 63: 0x5f5fff, - 64: 0x5f8700, - 65: 0x5f875f, - 66: 0x5f8787, - 67: 0x5f87af, - 68: 0x5f87d7, - 69: 0x5f87ff, - 70: 0x5faf00, - 71: 0x5faf5f, - 72: 0x5faf87, - 73: 0x5fafaf, - 74: 0x5fafd7, - 75: 0x5fafff, - 76: 0x5fd700, - 77: 0x5fd75f, - 78: 0x5fd787, - 79: 0x5fd7af, - 80: 0x5fd7d7, - 81: 0x5fd7ff, - 82: 0x5fff00, - 83: 0x5fff5f, - 84: 0x5fff87, - 85: 0x5fffaf, - 86: 0x5fffd7, - 87: 0x5fffff, - 88: 0x870000, - 89: 0x87005f, - 90: 0x870087, - 91: 0x8700af, - 92: 0x8700d7, - 93: 0x8700ff, - 94: 0x875f00, - 95: 0x875f5f, - 96: 0x875f87, - 97: 0x875faf, - 98: 0x875fd7, - 99: 0x875fff, - 100: 0x878700, - 101: 0x87875f, - 102: 0x878787, - 103: 0x8787af, - 104: 0x8787d7, - 105: 0x8787ff, - 106: 0x87af00, - 107: 0x87af5f, - 108: 0x87af87, - 109: 0x87afaf, - 110: 0x87afd7, - 111: 0x87afff, - 112: 0x87d700, - 113: 0x87d75f, - 114: 0x87d787, - 115: 0x87d7af, - 116: 0x87d7d7, - 117: 0x87d7ff, - 118: 0x87ff00, - 119: 0x87ff5f, - 120: 0x87ff87, - 121: 0x87ffaf, - 122: 0x87ffd7, - 123: 0x87ffff, - 124: 0xaf0000, - 125: 0xaf005f, - 126: 0xaf0087, - 127: 0xaf00af, - 128: 0xaf00d7, - 129: 0xaf00ff, - 130: 0xaf5f00, - 131: 0xaf5f5f, - 132: 0xaf5f87, - 133: 0xaf5faf, - 134: 0xaf5fd7, - 135: 0xaf5fff, - 136: 0xaf8700, - 137: 0xaf875f, - 138: 0xaf8787, - 139: 0xaf87af, - 140: 0xaf87d7, - 141: 0xaf87ff, - 142: 0xafaf00, - 143: 0xafaf5f, - 144: 0xafaf87, - 145: 0xafafaf, - 146: 0xafafd7, - 147: 0xafafff, - 148: 0xafd700, - 149: 0xafd75f, - 150: 0xafd787, - 151: 0xafd7af, - 152: 0xafd7d7, - 153: 0xafd7ff, - 154: 0xafff00, - 155: 0xafff5f, - 156: 0xafff87, - 157: 0xafffaf, - 158: 0xafffd7, - 159: 0xafffff, - 160: 0xd70000, - 161: 0xd7005f, - 162: 0xd70087, - 163: 0xd700af, - 164: 0xd700d7, - 165: 0xd700ff, - 166: 0xd75f00, - 167: 0xd75f5f, - 168: 0xd75f87, - 169: 0xd75faf, - 170: 0xd75fd7, - 171: 0xd75fff, - 172: 0xd78700, - 173: 0xd7875f, - 174: 0xd78787, - 175: 0xd787af, - 176: 0xd787d7, - 177: 0xd787ff, - 178: 0xd7af00, - 179: 0xd7af5f, - 180: 0xd7af87, - 181: 0xd7afaf, - 182: 0xd7afd7, - 183: 0xd7afff, - 184: 0xd7d700, - 185: 0xd7d75f, - 186: 0xd7d787, - 187: 0xd7d7af, - 188: 0xd7d7d7, - 189: 0xd7d7ff, - 190: 0xd7ff00, - 191: 0xd7ff5f, - 192: 0xd7ff87, - 193: 0xd7ffaf, - 194: 0xd7ffd7, - 195: 0xd7ffff, - 196: 0xff0000, - 197: 0xff005f, - 198: 0xff0087, - 199: 0xff00af, - 200: 0xff00d7, - 201: 0xff00ff, - 202: 0xff5f00, - 203: 0xff5f5f, - 204: 0xff5f87, - 205: 0xff5faf, - 206: 0xff5fd7, - 207: 0xff5fff, - 208: 0xff8700, - 209: 0xff875f, - 210: 0xff8787, - 211: 0xff87af, - 212: 0xff87d7, - 213: 0xff87ff, - 214: 0xffaf00, - 215: 0xffaf5f, - 216: 0xffaf87, - 217: 0xffafaf, - 218: 0xffafd7, - 219: 0xffafff, - 220: 0xffd700, - 221: 0xffd75f, - 222: 0xffd787, - 223: 0xffd7af, - 224: 0xffd7d7, - 225: 0xffd7ff, - 226: 0xffff00, - 227: 0xffff5f, - 228: 0xffff87, - 229: 0xffffaf, - 230: 0xffffd7, - 231: 0xffffff, - 232: 0x080808, - 233: 0x121212, - 234: 0x1c1c1c, - 235: 0x262626, - 236: 0x303030, - 237: 0x3a3a3a, - 238: 0x444444, - 239: 0x4e4e4e, - 240: 0x585858, - 241: 0x626262, - 242: 0x6c6c6c, - 243: 0x767676, - 244: 0x808080, - 245: 0x8a8a8a, - 246: 0x949494, - 247: 0x9e9e9e, - 248: 0xa8a8a8, - 249: 0xb2b2b2, - 250: 0xbcbcbc, - 251: 0xc6c6c6, - 252: 0xd0d0d0, - 253: 0xdadada, - 254: 0xe4e4e4, - 255: 0xeeeeee, -} - -func (w *Writer) Write(data []byte) (n int, err error) { - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - - er := bytes.NewBuffer(data) -loop: - for { - r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - if r1 == 0 { - break loop - } - - c1, _, err := er.ReadRune() - if err != nil { - break loop - } - if c1 != 0x1b { - fmt.Fprint(w.out, string(c1)) - continue - } - c2, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - break loop - } - if c2 != 0x5b { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - continue - } - - var buf bytes.Buffer - var m rune - for { - c, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - w.lastbuf.Write(buf.Bytes()) - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - m = c - break - } - buf.Write([]byte(string(c))) - } - - var csbi consoleScreenBufferInfo - switch m { - case 'A': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'B': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'C': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'D': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - if n, err = strconv.Atoi(buf.String()); err == nil { - var csbi consoleScreenBufferInfo - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - } - case 'E': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'F': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = 0 - csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'G': - n, err = strconv.Atoi(buf.String()) - if err != nil { - continue - } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - csbi.cursorPosition.x = short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'H': - token := strings.Split(buf.String(), ";") - if len(token) != 2 { - continue - } - n1, err := strconv.Atoi(token[0]) - if err != nil { - continue - } - n2, err := strconv.Atoi(token[1]) - if err != nil { - continue - } - csbi.cursorPosition.x = short(n2) - csbi.cursorPosition.x = short(n1) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) - case 'J': - n, err := strconv.Atoi(buf.String()) - if err != nil { - continue - } - var cursor coord - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top} - } - var count, written dword - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'K': - n, err := strconv.Atoi(buf.String()) - if err != nil { - continue - } - var cursor coord - switch n { - case 0: - cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} - case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} - } - var count, written dword - count = dword(csbi.size.x - csbi.cursorPosition.x) - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - case 'm': - attr := csbi.attributes - cs := buf.String() - if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) - continue - } - token := strings.Split(cs, ";") - for i := 0; i < len(token); i += 1 { - ns := token[i] - if n, err = strconv.Atoi(ns); err == nil { - switch { - case n == 0 || n == 100: - attr = w.oldattr - case 1 <= n && n <= 5: - attr |= foregroundIntensity - case n == 7: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 22 == n || n == 25 || n == 25: - attr |= foregroundIntensity - case n == 27: - attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) - case 30 <= n && n <= 37: - attr = (attr & backgroundMask) - if (n-30)&1 != 0 { - attr |= foregroundRed - } - if (n-30)&2 != 0 { - attr |= foregroundGreen - } - if (n-30)&4 != 0 { - attr |= foregroundBlue - } - case n == 38: // set foreground color. - if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256foreAttr == nil { - n256setup() - } - attr &= backgroundMask - attr |= n256foreAttr[n256] - i += 2 - } - } else { - attr = attr & (w.oldattr & backgroundMask) - } - case n == 39: // reset foreground color. - attr &= backgroundMask - attr |= w.oldattr & foregroundMask - case 40 <= n && n <= 47: - attr = (attr & foregroundMask) - if (n-40)&1 != 0 { - attr |= backgroundRed - } - if (n-40)&2 != 0 { - attr |= backgroundGreen - } - if (n-40)&4 != 0 { - attr |= backgroundBlue - } - case n == 48: // set background color. - if i < len(token)-2 && token[i+1] == "5" { - if n256, err := strconv.Atoi(token[i+2]); err == nil { - if n256backAttr == nil { - n256setup() - } - attr &= foregroundMask - attr |= n256backAttr[n256] - i += 2 - } - } else { - attr = attr & (w.oldattr & foregroundMask) - } - case n == 49: // reset foreground color. - attr &= foregroundMask - attr |= w.oldattr & backgroundMask - case 90 <= n && n <= 97: - attr = (attr & backgroundMask) - attr |= foregroundIntensity - if (n-90)&1 != 0 { - attr |= foregroundRed - } - if (n-90)&2 != 0 { - attr |= foregroundGreen - } - if (n-90)&4 != 0 { - attr |= foregroundBlue - } - case 100 <= n && n <= 107: - attr = (attr & foregroundMask) - attr |= backgroundIntensity - if (n-100)&1 != 0 { - attr |= backgroundRed - } - if (n-100)&2 != 0 { - attr |= backgroundGreen - } - if (n-100)&4 != 0 { - attr |= backgroundBlue - } - } - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) - } - } - } - } - return len(data) - w.lastbuf.Len(), nil -} - -type consoleColor struct { - rgb int - red bool - green bool - blue bool - intensity bool -} - -func (c consoleColor) foregroundAttr() (attr word) { - if c.red { - attr |= foregroundRed - } - if c.green { - attr |= foregroundGreen - } - if c.blue { - attr |= foregroundBlue - } - if c.intensity { - attr |= foregroundIntensity - } - return -} - -func (c consoleColor) backgroundAttr() (attr word) { - if c.red { - attr |= backgroundRed - } - if c.green { - attr |= backgroundGreen - } - if c.blue { - attr |= backgroundBlue - } - if c.intensity { - attr |= backgroundIntensity - } - return -} - -var color16 = []consoleColor{ - consoleColor{0x000000, false, false, false, false}, - consoleColor{0x000080, false, false, true, false}, - consoleColor{0x008000, false, true, false, false}, - consoleColor{0x008080, false, true, true, false}, - consoleColor{0x800000, true, false, false, false}, - consoleColor{0x800080, true, false, true, false}, - consoleColor{0x808000, true, true, false, false}, - consoleColor{0xc0c0c0, true, true, true, false}, - consoleColor{0x808080, false, false, false, true}, - consoleColor{0x0000ff, false, false, true, true}, - consoleColor{0x00ff00, false, true, false, true}, - consoleColor{0x00ffff, false, true, true, true}, - consoleColor{0xff0000, true, false, false, true}, - consoleColor{0xff00ff, true, false, true, true}, - consoleColor{0xffff00, true, true, false, true}, - consoleColor{0xffffff, true, true, true, true}, -} - -type hsv struct { - h, s, v float32 -} - -func (a hsv) dist(b hsv) float32 { - dh := a.h - b.h - switch { - case dh > 0.5: - dh = 1 - dh - case dh < -0.5: - dh = -1 - dh - } - ds := a.s - b.s - dv := a.v - b.v - return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) -} - -func toHSV(rgb int) hsv { - r, g, b := float32((rgb&0xFF0000)>>16)/256.0, - float32((rgb&0x00FF00)>>8)/256.0, - float32(rgb&0x0000FF)/256.0 - min, max := minmax3f(r, g, b) - h := max - min - if h > 0 { - if max == r { - h = (g - b) / h - if h < 0 { - h += 6 - } - } else if max == g { - h = 2 + (b-r)/h - } else { - h = 4 + (r-g)/h - } - } - h /= 6.0 - s := max - min - if max != 0 { - s /= max - } - v := max - return hsv{h: h, s: s, v: v} -} - -type hsvTable []hsv - -func toHSVTable(rgbTable []consoleColor) hsvTable { - t := make(hsvTable, len(rgbTable)) - for i, c := range rgbTable { - t[i] = toHSV(c.rgb) - } - return t -} - -func (t hsvTable) find(rgb int) consoleColor { - hsv := toHSV(rgb) - n := 7 - l := float32(5.0) - for i, p := range t { - d := hsv.dist(p) - if d < l { - l, n = d, i - } - } - return color16[n] -} - -func minmax3f(a, b, c float32) (min, max float32) { - if a < b { - if b < c { - return a, c - } else if a < c { - return a, b - } else { - return c, b - } - } else { - if a < c { - return b, c - } else if b < c { - return b, a - } else { - return c, a - } - } -} - -var n256foreAttr []word -var n256backAttr []word - -func n256setup() { - n256foreAttr = make([]word, 256) - n256backAttr = make([]word, 256) - t := toHSVTable(color16) - for i, rgb := range color256 { - c := t.find(rgb) - n256foreAttr[i] = c.foregroundAttr() - n256backAttr[i] = c.backgroundAttr() - } -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go deleted file mode 100644 index fb976dbd..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go +++ /dev/null @@ -1,57 +0,0 @@ -package colorable - -import ( - "bytes" - "fmt" - "io" -) - -type NonColorable struct { - out io.Writer - lastbuf bytes.Buffer -} - -func NewNonColorable(w io.Writer) io.Writer { - return &NonColorable{out: w} -} - -func (w *NonColorable) Write(data []byte) (n int, err error) { - er := bytes.NewBuffer(data) -loop: - for { - c1, _, err := er.ReadRune() - if err != nil { - break loop - } - if c1 != 0x1b { - fmt.Fprint(w.out, string(c1)) - continue - } - c2, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - break loop - } - if c2 != 0x5b { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - continue - } - - var buf bytes.Buffer - for { - c, _, err := er.ReadRune() - if err != nil { - w.lastbuf.WriteRune(c1) - w.lastbuf.WriteRune(c2) - w.lastbuf.Write(buf.Bytes()) - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - break - } - buf.Write([]byte(string(c))) - } - } - return len(data) - w.lastbuf.Len(), nil -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE deleted file mode 100644 index 65dc692b..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) Yasuhiro MATSUMOTO - -MIT License (Expat) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md deleted file mode 100644 index 74845de4..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# go-isatty - -isatty for golang - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/mattn/go-isatty" - "os" -) - -func main() { - if isatty.IsTerminal(os.Stdout.Fd()) { - fmt.Println("Is Terminal") - } else { - fmt.Println("Is Not Terminal") - } -} -``` - -## Installation - -``` -$ go get github.com/mattn/go-isatty -``` - -# License - -MIT - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go deleted file mode 100644 index 17d4f90e..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package isatty implements interface to isatty -package isatty diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go deleted file mode 100644 index 83c58877..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go deleted file mode 100644 index 98ffe86a..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build darwin freebsd openbsd netbsd -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go deleted file mode 100644 index 9d24bac1..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go deleted file mode 100644 index 1f0c6bf5..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build solaris -// +build !appengine - -package isatty - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c -func IsTerminal(fd uintptr) bool { - var termio unix.Termio - err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) - return err == nil -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go deleted file mode 100644 index 83c398b1..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build windows -// +build !appengine - -package isatty - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") -var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go deleted file mode 100644 index 84fd8aff..00000000 --- a/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - -TeamCity Reporter for Ginkgo - -Makes use of TeamCity's support for Service Messages -http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests -*/ - -package reporters - -import ( - "fmt" - "io" - "strings" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" -) - -const ( - messageId = "##teamcity" -) - -type TeamCityReporter struct { - writer io.Writer - testSuiteName string - ReporterConfig config.DefaultReporterConfigType -} - -func NewTeamCityReporter(writer io.Writer) *TeamCityReporter { - return &TeamCityReporter{ - writer: writer, - } -} - -func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { - reporter.testSuiteName = escape(summary.SuiteDescription) - fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName) -} - -func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("BeforeSuite", setupSummary) -} - -func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) { - reporter.handleSetupSummary("AfterSuite", setupSummary) -} - -func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { - if setupSummary.State != types.SpecStatePassed { - testName := escape(name) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName) - message := reporter.failureMessage(setupSummary.Failure) - details := reporter.failureDetails(setupSummary.Failure) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details) - durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds) - } -} - -func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) { - testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName) -} - -func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) { - testName := escape(strings.Join(specSummary.ComponentTexts[1:], " ")) - - if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed { - details := escape(specSummary.CapturedOutput) - fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details) - } - if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { - message := reporter.failureMessage(specSummary.Failure) - details := reporter.failureDetails(specSummary.Failure) - fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details) - } - if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { - fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName) - } - - durationInMilliseconds := specSummary.RunTime.Seconds() * 1000 - fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds) -} - -func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { - fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName) -} - -func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string { - return escape(failure.ComponentCodeLocation.String()) -} - -func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string { - return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String())) -} - -func escape(output string) string { - output = strings.Replace(output, "|", "||", -1) - output = strings.Replace(output, "'", "|'", -1) - output = strings.Replace(output, "\n", "|n", -1) - output = strings.Replace(output, "\r", "|r", -1) - output = strings.Replace(output, "[", "|[", -1) - output = strings.Replace(output, "]", "|]", -1) - return output -} diff --git a/vendor/github.com/onsi/ginkgo/types/code_location.go b/vendor/github.com/onsi/ginkgo/types/code_location.go deleted file mode 100644 index 935a89e1..00000000 --- a/vendor/github.com/onsi/ginkgo/types/code_location.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -import ( - "fmt" -) - -type CodeLocation struct { - FileName string - LineNumber int - FullStackTrace string -} - -func (codeLocation CodeLocation) String() string { - return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) -} diff --git a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go deleted file mode 100644 index 305c134b..00000000 --- a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go +++ /dev/null @@ -1,150 +0,0 @@ -package types - -import ( - "os" - "strconv" - "strings" - "unicode" - - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/formatter" -) - -type Deprecation struct { - Message string - DocLink string - Version string -} - -type deprecations struct{} - -var Deprecations = deprecations{} - -func (d deprecations) CustomReporter() Deprecation { - return Deprecation{ - Message: "You are using a custom reporter. Support for custom reporters will likely be removed in V2. Most users were using them to generate junit or teamcity reports and this functionality will be merged into the core reporter. In addition, Ginkgo 2.0 will support emitting a JSON-formatted report that users can then manipulate to generate custom reports.\n\n{{red}}{{bold}}If this change will be impactful to you please leave a comment on {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}", - DocLink: "removed-custom-reporters", - Version: "1.16.0", - } -} - -func (d deprecations) V1Reporter() Deprecation { - return Deprecation{ - Message: "You are using a V1 Ginkgo Reporter. Please update your custom reporter to the new V2 Reporter interface.", - DocLink: "changed-reporter-interface", - Version: "1.16.0", - } -} - -func (d deprecations) Async() Deprecation { - return Deprecation{ - Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", - DocLink: "removed-async-testing", - Version: "1.16.0", - } -} - -func (d deprecations) Measure() Deprecation { - return Deprecation{ - Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", - DocLink: "removed-measure", - Version: "1.16.3", - } -} - -func (d deprecations) Convert() Deprecation { - return Deprecation{ - Message: "The convert command is deprecated in Ginkgo V2", - DocLink: "removed-ginkgo-convert", - Version: "1.16.0", - } -} - -func (d deprecations) Blur() Deprecation { - return Deprecation{ - Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", - Version: "1.16.0", - } -} - -type DeprecationTracker struct { - deprecations map[Deprecation][]CodeLocation -} - -func NewDeprecationTracker() *DeprecationTracker { - return &DeprecationTracker{ - deprecations: map[Deprecation][]CodeLocation{}, - } -} - -func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { - ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") - if deprecation.Version != "" && ackVersion != "" { - ack := ParseSemVer(ackVersion) - version := ParseSemVer(deprecation.Version) - if ack.GreaterThanOrEqualTo(version) { - return - } - } - - if len(cl) == 1 { - d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) - } else { - d.deprecations[deprecation] = []CodeLocation{} - } -} - -func (d *DeprecationTracker) DidTrackDeprecations() bool { - return len(d.deprecations) > 0 -} - -func (d *DeprecationTracker) DeprecationsReport() string { - out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") - out += formatter.F("{{light-yellow}}============================================={{/}}\n") - out += formatter.F("Ginkgo 2.0 is under active development and will introduce (a small number of) breaking changes.\n") - out += formatter.F("To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md{{/}}\n") - out += formatter.F("To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n") - - for deprecation, locations := range d.deprecations { - out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") - if deprecation.DocLink != "" { - out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink) - } - for _, location := range locations { - out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) - } - } - out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") - out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", config.VERSION) - return out -} - -type SemVer struct { - Major int - Minor int - Patch int -} - -func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { - return (s.Major > o.Major) || - (s.Major == o.Major && s.Minor > o.Minor) || - (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) -} - -func ParseSemVer(semver string) SemVer { - out := SemVer{} - semver = strings.TrimFunc(semver, func(r rune) bool { - return !(unicode.IsNumber(r) || r == '.') - }) - components := strings.Split(semver, ".") - if len(components) > 0 { - out.Major, _ = strconv.Atoi(components[0]) - } - if len(components) > 1 { - out.Minor, _ = strconv.Atoi(components[1]) - } - if len(components) > 2 { - out.Patch, _ = strconv.Atoi(components[2]) - } - return out -} diff --git a/vendor/github.com/onsi/ginkgo/types/synchronization.go b/vendor/github.com/onsi/ginkgo/types/synchronization.go deleted file mode 100644 index fdd6ed5b..00000000 --- a/vendor/github.com/onsi/ginkgo/types/synchronization.go +++ /dev/null @@ -1,30 +0,0 @@ -package types - -import ( - "encoding/json" -) - -type RemoteBeforeSuiteState int - -const ( - RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota - - RemoteBeforeSuiteStatePending - RemoteBeforeSuiteStatePassed - RemoteBeforeSuiteStateFailed - RemoteBeforeSuiteStateDisappeared -) - -type RemoteBeforeSuiteData struct { - Data []byte - State RemoteBeforeSuiteState -} - -func (r RemoteBeforeSuiteData) ToJSON() []byte { - data, _ := json.Marshal(r) - return data -} - -type RemoteAfterSuiteData struct { - CanRun bool -} diff --git a/vendor/github.com/onsi/ginkgo/types/types.go b/vendor/github.com/onsi/ginkgo/types/types.go deleted file mode 100644 index c143e02d..00000000 --- a/vendor/github.com/onsi/ginkgo/types/types.go +++ /dev/null @@ -1,174 +0,0 @@ -package types - -import ( - "strconv" - "time" -) - -const GINKGO_FOCUS_EXIT_CODE = 197 - -/* -SuiteSummary represents the a summary of the test suite and is passed to both -Reporter.SpecSuiteWillBegin -Reporter.SpecSuiteDidEnd - -this is unfortunate as these two methods should receive different objects. When running in parallel -each node does not deterministically know how many specs it will end up running. - -Unfortunately making such a change would break backward compatibility. - -Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields -with -1. -*/ -type SuiteSummary struct { - SuiteDescription string - SuiteSucceeded bool - SuiteID string - - NumberOfSpecsBeforeParallelization int - NumberOfTotalSpecs int - NumberOfSpecsThatWillBeRun int - NumberOfPendingSpecs int - NumberOfSkippedSpecs int - NumberOfPassedSpecs int - NumberOfFailedSpecs int - // Flaked specs are those that failed initially, but then passed on a - // subsequent try. - NumberOfFlakedSpecs int - RunTime time.Duration -} - -type SpecSummary struct { - ComponentTexts []string - ComponentCodeLocations []CodeLocation - - State SpecState - RunTime time.Duration - Failure SpecFailure - IsMeasurement bool - NumberOfSamples int - Measurements map[string]*SpecMeasurement - - CapturedOutput string - SuiteID string -} - -func (s SpecSummary) HasFailureState() bool { - return s.State.IsFailure() -} - -func (s SpecSummary) TimedOut() bool { - return s.State == SpecStateTimedOut -} - -func (s SpecSummary) Panicked() bool { - return s.State == SpecStatePanicked -} - -func (s SpecSummary) Failed() bool { - return s.State == SpecStateFailed -} - -func (s SpecSummary) Passed() bool { - return s.State == SpecStatePassed -} - -func (s SpecSummary) Skipped() bool { - return s.State == SpecStateSkipped -} - -func (s SpecSummary) Pending() bool { - return s.State == SpecStatePending -} - -type SetupSummary struct { - ComponentType SpecComponentType - CodeLocation CodeLocation - - State SpecState - RunTime time.Duration - Failure SpecFailure - - CapturedOutput string - SuiteID string -} - -type SpecFailure struct { - Message string - Location CodeLocation - ForwardedPanic string - - ComponentIndex int - ComponentType SpecComponentType - ComponentCodeLocation CodeLocation -} - -type SpecMeasurement struct { - Name string - Info interface{} - Order int - - Results []float64 - - Smallest float64 - Largest float64 - Average float64 - StdDeviation float64 - - SmallestLabel string - LargestLabel string - AverageLabel string - Units string - Precision int -} - -func (s SpecMeasurement) PrecisionFmt() string { - if s.Precision == 0 { - return "%f" - } - - str := strconv.Itoa(s.Precision) - - return "%." + str + "f" -} - -type SpecState uint - -const ( - SpecStateInvalid SpecState = iota - - SpecStatePending - SpecStateSkipped - SpecStatePassed - SpecStateFailed - SpecStatePanicked - SpecStateTimedOut -) - -func (state SpecState) IsFailure() bool { - return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed -} - -type SpecComponentType uint - -const ( - SpecComponentTypeInvalid SpecComponentType = iota - - SpecComponentTypeContainer - SpecComponentTypeBeforeSuite - SpecComponentTypeAfterSuite - SpecComponentTypeBeforeEach - SpecComponentTypeJustBeforeEach - SpecComponentTypeJustAfterEach - SpecComponentTypeAfterEach - SpecComponentTypeIt - SpecComponentTypeMeasure -) - -type FlagType uint - -const ( - FlagTypeNone FlagType = iota - FlagTypeFocused - FlagTypePending -) diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b588347..00000000 --- a/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore deleted file mode 100644 index e6ba63a5..00000000 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 98b9893d..00000000 --- a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb016..00000000 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE deleted file mode 100644 index f414553c..00000000 --- a/vendor/github.com/pelletier/go-toml/LICENSE +++ /dev/null @@ -1,247 +0,0 @@ -The bulk of github.com/pelletier/go-toml is distributed under the MIT license -(see below), with the exception of localtime.go and localtime.test.go. -Those two files have been copied over from Google's civil library at revision -ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache -2.0 license (see below). - - -github.com/pelletier/go-toml: - - -The MIT License (MIT) - -Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -localtime.go, localtime_test.go: - -Originals: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go -Changes: - * Renamed files from civil* to localtime*. - * Package changed from civil to toml. - * 'Local' prefix added to all structs. -License: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile deleted file mode 100644 index 9e4503ae..00000000 --- a/vendor/github.com/pelletier/go-toml/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -export CGO_ENABLED=0 -go := go -go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) -go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) - -out.tools := tomll tomljson jsontoml -out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) -sources := $(wildcard **/*.go) - - -.PHONY: -tools: $(out.tools) - -$(out.tools): $(sources) - GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ - -.PHONY: -dist: $(out.dist) - -$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % - if [ "$(go.goos)" = "windows" ]; then \ - tar -cJf $@ $^.exe; \ - else \ - tar -cJf $@ $^; \ - fi - -.PHONY: -clean: - rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a..00000000 --- a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md deleted file mode 100644 index 6c061712..00000000 --- a/vendor/github.com/pelletier/go-toml/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# go-toml - -Go library for the [TOML](https://toml.io/) format. - -This library supports TOML version -[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) -[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) -[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) - - -## Development status - -**â„¹ï¸ Consider go-toml v2!** - -The next version of go-toml is in [active development][v2-dev], and -[nearing completion][v2-map]. - -Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], -and [much faster][v2-bench]. If you only need reading and writing TOML documents -(majority of cases), those features are implemented and the API unlikely to -change. - -The remaining features (Document structure editing and tooling) will be added -shortly. While pull-requests are welcome on v1, no active development is -expected on it. When v2.0.0 is released, v1 will be deprecated. - -👉 [go-toml v2][v2] - -[v2]: https://github.com/pelletier/go-toml/tree/v2 -[v2-map]: https://github.com/pelletier/go-toml/discussions/506 -[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 -[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed -[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks - -## Features - -Go-toml provides the following features for using data parsed from TOML documents: - -* Load TOML documents from files and string data -* Easily navigate TOML structure using Tree -* Marshaling and unmarshaling to and from data structures -* Line & column position data for all parsed elements -* [Query support similar to JSON-Path](query/) -* Syntax errors contain line and column numbers - -## Import - -```go -import "github.com/pelletier/go-toml" -``` - -## Usage example - -Read a TOML document: - -```go -config, _ := toml.Load(` -[postgres] -user = "pelletier" -password = "mypassword"`) -// retrieve data directly -user := config.Get("postgres.user").(string) - -// or using an intermediate object -postgresConfig := config.Get("postgres").(*toml.Tree) -password := postgresConfig.Get("password").(string) -``` - -Or use Unmarshal: - -```go -type Postgres struct { - User string - Password string -} -type Config struct { - Postgres Postgres -} - -doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) - -config := Config{} -toml.Unmarshal(doc, &config) -fmt.Println("user=", config.Postgres.User) -``` - -Or use a query: - -```go -// use a query to gather elements without walking the tree -q, _ := query.Compile("$..[user,password]") -results := q.Execute(config) -for ii, item := range results.Values() { - fmt.Printf("Query result %d: %v\n", ii, item) -} -``` - -## Documentation - -The documentation and additional examples are available at -[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). - -## Tools - -Go-toml provides three handy command line tools: - -* `tomll`: Reads TOML files and lints them. - - ``` - go install github.com/pelletier/go-toml/cmd/tomll - tomll --help - ``` -* `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - go install github.com/pelletier/go-toml/cmd/tomljson - tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also available as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - -## Contribute - -Feel free to report bugs and patches using GitHub's pull requests system on -[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be -much appreciated! - -### Run tests - -`go test ./...` - -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index 4af198b4..00000000 --- a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,188 +0,0 @@ -trigger: -- master - -stages: -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' - env: - CODECOV_TOKEN: $(CODECOV_TOKEN) - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.16: - goVersion: '1.16' - imageName: 'ubuntu-latest' - mac 1.16: - goVersion: '1.16' - imageName: 'macOS-latest' - windows 1.16: - goVersion: '1.16' - imageName: 'windows-latest' - linux 1.15: - goVersion: '1.15' - imageName: 'ubuntu-latest' - mac 1.15: - goVersion: '1.15' - imageName: 'macOS-latest' - windows 1.15: - goVersion: '1.15' - imageName: 'windows-latest' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' -- stage: build_binaries - displayName: "Build binaries" - dependsOn: run_checks - jobs: - - job: build_binary - displayName: "Build binary" - strategy: - matrix: - linux_amd64: - GOOS: linux - GOARCH: amd64 - darwin_amd64: - GOOS: darwin - GOARCH: amd64 - windows_amd64: - GOOS: windows - GOARCH: amd64 - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go" - inputs: - version: 1.16 - - task: Bash@3 - inputs: - targetType: inline - script: "make dist" - env: - go.goos: $(GOOS) - go.goarch: $(GOARCH) - - task: CopyFiles@2 - inputs: - sourceFolder: '$(Build.SourcesDirectory)' - contents: '*.tar.xz' - TargetFolder: '$(Build.ArtifactStagingDirectory)' - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: binaries -- stage: build_binaries_manifest - displayName: "Build binaries manifest" - dependsOn: build_binaries - jobs: - - job: build_manifest - displayName: "Build binaries manifest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: 'current' - downloadType: 'single' - artifactName: 'binaries' - downloadPath: '$(Build.SourcesDirectory)' - - task: Bash@3 - inputs: - targetType: inline - script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: manifest - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh deleted file mode 100644 index a69d3040..00000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -ex - -reference_ref=${1:-master} -reference_git=${2:-.} - -if ! `hash benchstat 2>/dev/null`; then - echo "Installing benchstat" - go get golang.org/x/perf/cmd/benchstat -fi - -tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` -ref_tempdir="${tempdir}/ref" -ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" -local_benchmark="`pwd`/benchmark-local.txt" - -echo "=== ${reference_ref} (${ref_tempdir})" -git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null -pushd ${ref_tempdir} >/dev/null -git checkout ${reference_ref} >/dev/null 2>/dev/null -go test -bench=. -benchmem | tee ${ref_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${ref_benchmark} -popd >/dev/null - -echo "" -echo "=== local" -go test -bench=. -benchmem | tee ${local_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${local_benchmark} - -echo "" -echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index a1406a32..00000000 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml deleted file mode 100644 index 780d9c68..00000000 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml deleted file mode 100644 index f45bf88b..00000000 --- a/vendor/github.com/pelletier/go-toml/example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100644 index 3204b4c4..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index e091500b..00000000 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,112 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "errors" - "fmt" -) - -// Convert the bare key group string to an array. -// The input supports double quotation and single quotation, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string - - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } - } - if len(groups) == 0 { - return nil, fmt.Errorf("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || isDigit(r) -} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index 313908e3..00000000 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,1031 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - brackets []rune - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '}': // after '{' - return l.lexRightCurlyBrace - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - return l.lexLeftBracket - case ']': - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { - return l.lexRvalue - } - return l.lexVoid - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - if next == '+' || next == '-' { - return l.lexNumber - } - - if isDigit(next) { - return l.lexDateTimeOrNumber - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { - // Could be either a date/time, or a digit. - // The options for date/times are: - // YYYY-... => date or date-time - // HH:... => time - // Anything else should be a number. - - lookAhead := l.peekString(5) - if len(lookAhead) < 3 { - return l.lexNumber() - } - - for idx, r := range lookAhead { - if !isDigit(r) { - if idx == 2 && r == ':' { - return l.lexDateTimeOrTime() - } - if idx == 4 && r == '-' { - return l.lexDateTimeOrTime() - } - return l.lexNumber() - } - } - return l.lexNumber() -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - l.brackets = append(l.brackets, '{') - return l.lexVoid -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { - return l.errorf("cannot have '}' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { - // Example matches: - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - // 07:32:00 - // 00:32:00.999999 - - // we already know those two are digits - l.next() - l.next() - - // Got 2 digits. At that point it could be either a time or a date(-time). - - r := l.next() - if r == ':' { - return l.lexTime() - } - - return l.lexDateTime() -} - -func (l *tomlLexer) lexDateTime() tomlLexStateFn { - // This state accepts an offset date-time, a local date-time, or a local date. - // - // v--- cursor - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - - // date - - // already checked by lexRvalue - l.next() // digit - l.next() // - - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid month digit in date: %c", r) - } - } - - r := l.next() - if r != '-' { - return l.errorf("expected - to separate month of a date, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid day digit in date: %c", r) - } - } - - l.emit(tokenLocalDate) - - r = l.peek() - - if r == eof { - - return l.lexRvalue - } - - if r != ' ' && r != 'T' { - return l.errorf("incorrect date/time separation character: %c", r) - } - - if r == ' ' { - lookAhead := l.peekString(3)[1:] - if len(lookAhead) < 2 { - return l.lexRvalue - } - for _, r := range lookAhead { - if !isDigit(r) { - return l.lexRvalue - } - } - } - - l.skip() // skip the T or ' ' - - // time - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - - return l.lexTimeOffset - -} - -func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { - // potential offset - - // Z - // -07:00 - // +07:00 - // nothing - - r := l.peek() - - if r == 'Z' { - l.next() - l.emit(tokenTimeOffset) - } else if r == '+' || r == '-' { - l.next() - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time offset: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time offset hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time offset: %c", r) - } - } - - l.emit(tokenTimeOffset) - } - - return l.lexRvalue -} - -func (l *tomlLexer) lexTime() tomlLexStateFn { - // v--- cursor - // 07:32:00 - // 00:32:00.999999 - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r := l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - return l.lexRvalue - -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { - return l.lexVoid - } - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - var sb strings.Builder - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("\"") - sb.WriteString(str) - sb.WriteString("\"") - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("'") - sb.WriteString(str) - sb.WriteString("'") - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - var str strings.Builder - str.WriteString(" ") - - // skip trailing whitespace - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - // break loop if not a dot - if r != '.' { - break - } - str.WriteString(".") - // skip trailing whitespace after dot - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - sb.WriteString(str.String()) - continue - } else if r == '.' { - // skip - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - sb.WriteRune(r) - l.next() - } - l.emitWithValue(tokenKey, sb.String()) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - l.brackets = append(l.brackets, '[') - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return sb.String(), nil - } - - next := l.peek() - if next == eof { - break - } - sb.WriteRune(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return sb.String(), nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - sb.WriteString("\"") - l.next() - case 'n': - sb.WriteString("\n") - l.next() - case 'b': - sb.WriteString("\b") - l.next() - case 'f': - sb.WriteString("\f") - l.next() - case '/': - sb.WriteString("/") - l.next() - case 't': - sb.WriteString("\t") - l.next() - case 'r': - sb.WriteString("\r") - l.next() - case '\\': - sb.WriteString("\\") - l.next() - case 'u': - l.next() - var code strings.Builder - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code.String()) - } - sb.WriteRune(rune(intcode)) - case 'U': - l.next() - var code strings.Builder - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code.String()) - } - sb.WriteRune(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - sb.WriteRune(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { - return l.errorf("cannot have ']' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index 9dfe4b9e..00000000 --- a/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,287 +0,0 @@ -// Implementation of TOML's local date/time. -// -// Copied over from Google's civil to avoid pulling all the Google dependencies. -// Originals: -// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go -// Changes: -// * Renamed files from civil* to localtime*. -// * Package changed from civil to toml. -// * 'Local' prefix added to all structs. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 3443c354..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,1308 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagLiteral = "literal" - tagDefault = "default" -) - -type tomlOpts struct { - name string - nameFromTag bool - comment string - commented bool - multiline bool - literal bool - include bool - omitempty bool - defaultValue string -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -type annotation struct { - tag string - comment string - commented string - multiline string - literal string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - literal: tagLiteral, - defaultValue: tagDefault, -} - -type MarshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical MarshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) - -// Check if the given marshal type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return isTimeType(mtype) - default: - return false - } -} - -func isTimeType(mtype reflect.Type) bool { - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType -} - -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a custom marshaler type -func isCustomMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isCustomMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a text marshaler type -func isTextMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTextMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -func isTextMarshaler(mtype reflect.Type) bool { - return mtype.Implements(textMarshalerType) && !isTimeType(mtype) -} - -func callTextMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(encoding.TextMarshaler).MarshalText() -} - -func isCustomUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(unmarshalerType) -} - -func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { - return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) -} - -func isTextUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(textUnmarshalerType) -} - -func callTextUnmarshaler(mval reflect.Value, text []byte) error { - return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Unmarshaler is the interface implemented by types that -// can unmarshal a TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts - annotation - line int - col int - order MarshalOrder - promoteAnon bool - compactComments bool - indentation string -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, - indentation: " ", - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord MarshalOrder) *Encoder { - e.order = ord - return e -} - -// Indentation allows to change indentation when marshalling. -func (e *Encoder) Indentation(indent string) *Encoder { - e.indentation = indent - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - -// PromoteAnonymous allows to change how anonymous struct fields are marshaled. -// Usually, they are marshaled as if the inner exported fields were fields in -// the outer struct. However, if an anonymous struct field is given a name in -// its TOML tag, it is treated like a regular struct field with that name. -// rather than being anonymous. -// -// In case anonymous promotion is enabled, all anonymous structs are promoted -// and treated like regular struct fields. -func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { - e.promoteAnon = promote - return e -} - -// CompactComments removes the new line before each comment in the tree. -func (e *Encoder) CompactComments(cc bool) *Encoder { - e.compactComments = cc - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - // Check if indentation is valid - for _, char := range e.indentation { - if !isSpace(char) { - return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") - } - } - - mtype := reflect.TypeOf(v) - if mtype == nil { - return []byte{}, errors.New("nil cannot be marshaled to TOML") - } - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - if reflect.ValueOf(v).IsNil() { - return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") - } - - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - if isTextMarshaler(mtype) { - return callTextMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) - - return buf.Bytes(), err -} - -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := e.nextTree() - switch mtype.Kind() { - case reflect.Struct: - switch mval.Interface().(type) { - case Tree: - reflect.ValueOf(tval).Elem().Set(mval) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { - e.appendTree(tval, tree) - } else { - val = e.wrapTomlValue(val, tval) - tval.SetPathWithOptions([]string{opts.name}, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - Literal: opts.literal, - }, val) - } - } - } - } - case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { - mvalf := mval.MapIndex(key) - if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { - continue - } - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - val = e.wrapTomlValue(val, tval) - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.SetPath([]string{key.String()}, val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - default: - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - } - if mtype.Kind() == reflect.Interface { - return e.valueToToml(mval.Elem().Type(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): - return e.valueToOtherSlice(mtype, mval) - case isTreeSequence(mtype): - return e.valueToTreeSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface(), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -func (e *Encoder) appendTree(t, o *Tree) error { - for key, value := range o.values { - if _, ok := t.values[key]; ok { - continue - } - if tomlValue, ok := value.(*tomlValue); ok { - tomlValue.position.Col = t.position.Col - } - t.values[key] = value - } - return nil -} - -// Create a toml value with the current line number as the position line -func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { - _, isTree := val.(*Tree) - _, isTreeS := val.([]*Tree) - if isTree || isTreeS { - e.line++ - return val - } - - ret := &tomlValue{ - value: val, - position: Position{ - e.line, - parent.position.Col, - }, - } - e.line++ - return ret -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts - tagName string - strict bool - visitor visitorState -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - tagName: tagFieldName, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - -// Strict allows changing to strict decoding. Any fields that are found in the -// input data and do not have a corresponding struct member cause an error. -func (d *Decoder) Strict(strict bool) *Decoder { - d.strict = strict - return d -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype == nil { - return errors.New("nil cannot be unmarshaled from TOML") - } - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Interface: - elem = mapStringInterfaceType - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - if reflect.ValueOf(v).IsNil() { - return errors.New("nil pointer cannot be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - if d.strict { - d.visitor = newVisitorState(d.tval) - } - - sval, err := d.valueFromTree(elem, d.tval, &vv) - if err != nil { - return err - } - if err := d.visitor.validate(); err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - // Check if pointer to value implements the Unmarshaler interface. - if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { - d.visitor.visitAll() - - if tval == nil { - return mvalPtr.Elem(), nil - } - - if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - - switch mval.Interface().(type) { - case Tree: - mval.Set(reflect.ValueOf(tval).Elem()) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if !opts.include { - continue - } - baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false - if tval != nil { - for _, key := range keysToTry { - exists := tval.HasPath([]string{key}) - if !exists { - continue - } - - d.visitor.push(key) - val := tval.GetPath([]string{key}) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.Field(i).Set(mvalf) - found = true - d.visitor.pop() - break - } - } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.String: - val = opts.defaultValue - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - case reflect.Uint: - val, err = strconv.ParseUint(opts.defaultValue, 10, 0) - case reflect.Uint8: - val, err = strconv.ParseUint(opts.defaultValue, 10, 8) - case reflect.Uint16: - val, err = strconv.ParseUint(opts.defaultValue, 10, 16) - case reflect.Uint32: - val, err = strconv.ParseUint(opts.defaultValue, 10, 32) - case reflect.Uint64: - val, err = strconv.ParseUint(opts.defaultValue, 10, 64) - case reflect.Int: - val, err = strconv.ParseInt(opts.defaultValue, 10, 0) - case reflect.Int8: - val, err = strconv.ParseInt(opts.defaultValue, 10, 8) - case reflect.Int16: - val, err = strconv.ParseInt(opts.defaultValue, 10, 16) - case reflect.Int32: - val, err = strconv.ParseInt(opts.defaultValue, 10, 32) - case reflect.Int64: - // Check if the provided number has a non-numeric extension. - var hasExtension bool - if len(opts.defaultValue) > 0 { - lastChar := opts.defaultValue[len(opts.defaultValue)-1] - if lastChar < '0' || lastChar > '9' { - hasExtension = true - } - } - // If the value is a time.Duration with extension, parse as duration. - // If the value is an int64 or a time.Duration without extension, parse as number. - if hasExtension && mvalf.Type().String() == "time.Duration" { - val, err = time.ParseDuration(opts.defaultValue) - } else { - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - } - case reflect.Float32: - val, err = strconv.ParseFloat(opts.defaultValue, 32) - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - default: - return mvalf, fmt.Errorf("unsupported field type for default option") - } - - if err != nil { - return mvalf, err - } - mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) - } - - // save the old behavior above and try to check structs - if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { - tmpTval := tval - if !mtypef.Anonymous { - tmpTval = nil - } - fval := mval.Field(i) - v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - d.visitor.push(key) - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) - d.visitor.pop() - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - d.visitor.push(strconv.Itoa(i)) - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - d.visitor.pop() - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val := reflect.ValueOf(tval) - length := val.Len() - - mval, err := makeSliceOrArray(mtype, length) - if err != nil { - return mval, err - } - - for i := 0; i < length; i++ { - val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Create a new slice or a new array with specified length -func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { - var mval reflect.Value - switch mtype.Kind() { - case reflect.Slice: - mval = reflect.MakeSlice(mtype, tLength, tLength) - case reflect.Array: - mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() - if tLength > mtype.Len() { - return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) - } - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - switch t := tval.(type) { - case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) - } - - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) - } else { - return d.valueFromToml(mval1.Elem().Type(), t, nil) - } - } - - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - d.visitor.visit() - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - d.visitor.visit() - mvalPtr := reflect.New(mtype) - - // Check if pointer to value implements the Unmarshaler interface. - if isCustomUnmarshaler(mvalPtr.Type()) { - if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - // Check if pointer to value implements the encoding.TextUnmarshaler. - if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { - if err := d.unmarshalText(tval, mvalPtr); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) - } - return mvalPtr.Elem(), nil - } - - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Interface: - if mval1 == nil || mval1.IsNil() { - return reflect.ValueOf(tval), nil - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - case reflect.Slice, reflect.Array: - if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { - return d.valueFromOtherSliceI(mtype, t) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { - var buf bytes.Buffer - fmt.Fprint(&buf, tval) - return callTextUnmarshaler(mval, buf.Bytes()) -} - -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get(an.comment); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - nameFromTag: false, - comment: comment, - commented: commented, - multiline: multiline, - literal: literal, - include: true, - omitempty: false, - defaultValue: defaultValue, - } - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - result.nameFromTag = true - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Slice, reflect.Array, reflect.Map: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} - -// visitorState keeps track of which keys were unmarshaled. -type visitorState struct { - tree *Tree - path []string - keys map[string]struct{} - active bool -} - -func newVisitorState(tree *Tree) visitorState { - path, result := []string{}, map[string]struct{}{} - insertKeys(path, result, tree) - return visitorState{ - tree: tree, - path: path[:0], - keys: result, - active: true, - } -} - -func (s *visitorState) push(key string) { - if s.active { - s.path = append(s.path, key) - } -} - -func (s *visitorState) pop() { - if s.active { - s.path = s.path[:len(s.path)-1] - } -} - -func (s *visitorState) visit() { - if s.active { - delete(s.keys, strings.Join(s.path, ".")) - } -} - -func (s *visitorState) visitAll() { - if s.active { - for k := range s.keys { - if strings.HasPrefix(k, strings.Join(s.path, ".")) { - delete(s.keys, k) - } - } - } -} - -func (s *visitorState) validate() error { - if !s.active { - return nil - } - undecoded := make([]string, 0, len(s.keys)) - for key := range s.keys { - undecoded = append(undecoded, key) - } - sort.Strings(undecoded) - if len(undecoded) > 0 { - return fmt.Errorf("undecoded keys: %q", undecoded) - } - return nil -} - -func insertKeys(path []string, m map[string]struct{}, tree *Tree) { - for k, v := range tree.values { - switch node := v.(type) { - case []*Tree: - for i, item := range node { - insertKeys(append(path, k, strconv.Itoa(i)), m, item) - } - case *Tree: - insertKeys(append(path, k), m, node) - case *tomlValue: - m[strings.Join(append(path, k), ".")] = struct{}{} - } - } -} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml deleted file mode 100644 index ba5e110b..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic] - bool = true - date = 1979-05-27T07:32:00Z - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - uint = 5001 - -[basic_lists] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - floats = [12.3,45.6,78.9] - ints = [8001,8001,8002] - strings = ["One","Two","Three"] - uints = [5002,5003] - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.first] - name = "First" - - [subdoc.second] - name = "Second" - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" - -[[subdocptrs]] - name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index f5e1a44f..00000000 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,508 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) - default: - p.raiseError(tok, "unexpected token %s", tok.typ) - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - destTree := p.tree.GetPath(keys) - if target, ok := destTree.(*Tree); ok && target != nil && target.inline { - p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", - strings.Join(keys, ".")) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - if targetNode.inline { - p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var errInvalidUnderscore = errors.New("invalid use of _ in number") - -func numberContainsInvalidUnderscore(value string) error { - // For large numbers, you may use underscores between digits to enhance - // readability. Each underscore must be surrounded by at least one digit on - // each side. - - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscore - } - } - hasBefore = isDigit(r) - } - return nil -} - -var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") - -func hexNumberContainsInvalidUnderscore(value string) error { - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscoreHex - } - } - hasBefore = isHexDigit(r) - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - var err error - var val int64 - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - err = hexNumberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) - case 'o': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) - case 'b': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) - default: - panic("invalid base") // the lexer should catch this first - } - } else { - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal, 10, 64) - } - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalTime: - val, err := ParseLocalTime(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - // a local date may be followed by: - // * nothing: this is a local date - // * a local time: this is a local date-time - - next := p.peek() - if next == nil || next.typ != tokenLocalTime { - val, err := ParseLocalDate(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - localDate := tok - localTime := p.getToken() - - next = p.peek() - if next == nil || next.typ != tokenTimeOffset { - v := localDate.val + "T" + localTime.val - val, err := ParseLocalDateTime(v) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - offset := p.getToken() - - layout := time.RFC3339Nano - v := localDate.val + "T" + localTime.val + offset.val - val, err := time.ParseInLocation(layout, v, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - default: - panic(fmt.Errorf("unhandled token: %v", tok)) - } - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey, tokenInteger, tokenString: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - - value := p.parseRvalue() - tree.SetPath(parsedKey, value) - case tokenComma: - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - tree.inline = true - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(newTree()) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if reflect.TypeOf(val) != arrayType { - arrayType = nil - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - - // if the array is a mixed-type array or its length is 0, - // don't convert it to a table array - if len(array) <= 0 { - arrayType = nil - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87..00000000 --- a/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index b437fdd3..00000000 --- a/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,136 +0,0 @@ -package toml - -import "fmt" - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenLocalDate - tokenLocalTime - tokenTimeOffset - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "LocalDate", - "LocalTime", - "TimeOffset", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 6d82587c..00000000 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,533 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - literal bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - inline bool - position Position -} - -func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: pos, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetArray returns the value at key in the Tree. -// It returns []string, []int64, etc type if key has homogeneous lists -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArray(key string) interface{} { - if key == "" { - return t - } - return t.GetArrayPath(strings.Split(key, ".")) -} - -// GetArrayPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArrayPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - switch n := node.value.(type) { - case []interface{}: - return getArray(n) - default: - return node.value - } - default: - return node - } -} - -// if homogeneous array, then return slice type object over []interface{} -func getArray(n []interface{}) interface{} { - var s []string - var i64 []int64 - var f64 []float64 - var bl []bool - for _, value := range n { - switch v := value.(type) { - case string: - s = append(s, v) - case int64: - i64 = append(i64, v) - case float64: - f64 = append(f64, v) - case bool: - bl = append(bl, v) - default: - return n - } - } - if len(s) == len(n) { - return s - } else if len(i64) == len(n) { - return i64 - } else if len(f64) == len(n) { - return f64 - } else if len(bl) == len(n) { - return bl - } - return n -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// SetPositionPath sets the position of element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree position is set. -func (t *Tree) SetPositionPath(keys []string, pos Position) { - if len(keys) == 0 { - t.position = pos - return - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - subtree = node[len(node)-1] - default: - return - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - node.position = pos - return - case *Tree: - node.position = pos - return - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - node[len(node)-1].position = pos - return - } -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool - Literal bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) - subtree.values[intermediateKey] = node - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch v := value.(type) { - case *Tree: - v.comment = opts.Comment - v.commented = opts.Commented - toInsert = value - case []*Tree: - for i := range v { - v[i].commented = opts.Commented - } - toInsert = value - case *tomlValue: - v.comment = opts.Comment - v.commented = opts.Commented - v.multiline = opts.Multiline - v.literal = opts.Literal - toInsert = v - default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - literal: opts.Literal, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err - } - return t.DeletePath(keys) -} - -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { - case *Tree: - delete(node.values, item) - return nil - } - return errors.New("no such key to delete") -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for i, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - tree.position = pos - tree.inline = subtree.inline - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = errors.New(r.(string)) - } - }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - - tree = parseToml(lexToml(b)) - return -} - -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go deleted file mode 100644 index 4136b462..00000000 --- a/vendor/github.com/pelletier/go-toml/tomlpub.go +++ /dev/null @@ -1,71 +0,0 @@ -package toml - -// PubTOMLValue wrapping tomlValue in order to access all properties from outside. -type PubTOMLValue = tomlValue - -func (ptv *PubTOMLValue) Value() interface{} { - return ptv.value -} -func (ptv *PubTOMLValue) Comment() string { - return ptv.comment -} -func (ptv *PubTOMLValue) Commented() bool { - return ptv.commented -} -func (ptv *PubTOMLValue) Multiline() bool { - return ptv.multiline -} -func (ptv *PubTOMLValue) Position() Position { - return ptv.position -} - -func (ptv *PubTOMLValue) SetValue(v interface{}) { - ptv.value = v -} -func (ptv *PubTOMLValue) SetComment(s string) { - ptv.comment = s -} -func (ptv *PubTOMLValue) SetCommented(c bool) { - ptv.commented = c -} -func (ptv *PubTOMLValue) SetMultiline(m bool) { - ptv.multiline = m -} -func (ptv *PubTOMLValue) SetPosition(p Position) { - ptv.position = p -} - -// PubTree wrapping Tree in order to access all properties from outside. -type PubTree = Tree - -func (pt *PubTree) Values() map[string]interface{} { - return pt.values -} - -func (pt *PubTree) Comment() string { - return pt.comment -} - -func (pt *PubTree) Commented() bool { - return pt.commented -} - -func (pt *PubTree) Inline() bool { - return pt.inline -} - -func (pt *PubTree) SetValues(v map[string]interface{}) { - pt.values = v -} - -func (pt *PubTree) SetComment(c string) { - pt.comment = c -} - -func (pt *PubTree) SetCommented(c bool) { - pt.commented = c -} - -func (pt *PubTree) SetInline(i bool) { - pt.inline = i -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 80353500..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - case []interface{}: - value := reflect.ValueOf(original) - length := value.Len() - arrayValue := reflect.MakeSlice(value.Type(), 0, length) - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return arrayValue.Interface(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index c9afbdab..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,552 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string, commented string) string { - var b bytes.Buffer - adjacentQuoteCount := 0 - - b.WriteString(commented) - for i, rr := range value { - if rr != '"' { - adjacentQuoteCount = 0 - } else { - adjacentQuoteCount++ - } - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n" + commented) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - if adjacentQuoteCount >= 3 || i == len(value)-1 { - adjacentQuoteCount = 0 - b.WriteString(`\"`) - } else { - b.WriteString(`"`) - } - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { - var orderedVals []sortNode - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - var values []string - for _, node := range orderedVals { - k := node.key - v := t.values[k] - - repr, err := tomlValueStringRepresentation(v, "", "", ord, false) - if err != nil { - return "", err - } - values = append(values, quoteKeyIfNeeded(k)+" = "+repr) - } - return "{ " + strings.Join(values, ", ") + " }", nil -} - -func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil - case string: - if tv.multiline { - if tv.literal { - b := strings.Builder{} - b.WriteString("'''\n") - b.Write([]byte(value)) - b.WriteString("\n'''") - return b.String(), nil - } else { - return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil - } - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return string(b), nil - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil - case *Tree: - return tomlTreeStringRepresentation(value, ord) - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(commented + value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + commented + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ", ") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func getTreeArrayLine(trees []*Tree) (line int) { - // Prevent returning 0 for empty trees - line = int(^uint(0) >> 1) - // get lowest line number >= 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) - default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) - } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } - - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } - - return vals -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := quoteKeyIfNeeded(k) - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if parentCommented || t.commented || tv.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - var commented string - if parentCommented || t.commented || subTree.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - var commented string - if parentCommented || t.commented || v.commented { - commented = "# " - } - repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - if !compactComments { - writtenBytesCountComment, errc := writeStrings(w, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - } - - return bytesCount, nil -} - -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() - if err != nil { - return "", err - } - return string(b), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = tomlValueToGo(node.value) - } - } - return result -} - -func tomlValueToGo(v interface{}) interface{} { - if tree, ok := v.(*Tree); ok { - return tree.ToMap() - } - - rv := reflect.ValueOf(v) - - if rv.Kind() != reflect.Slice { - return v - } - values := make([]interface{}, rv.Len()) - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - values[i] = tomlValueToGo(item) - } - return values -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go deleted file mode 100644 index fa326308..00000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go +++ /dev/null @@ -1,6 +0,0 @@ -package toml - -// ValueStringRepresentation transforms an interface{} value into its toml string representation. -func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de03..00000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e7..00000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cde..00000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb1..00000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade..00000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea25..00000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d0..00000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a8348..00000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/prometheus/client_golang/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index dd878a30..00000000 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,23 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f034..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 44986bff..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index 1e839650..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “uncheckedâ€, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. - Collect(chan<- Metric) -} - -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collector (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go deleted file mode 100644 index c4d0f5c3..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package collectors provides implementations of prometheus.Collector to -// conveniently collect process and Go-related metrics. -package collectors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go deleted file mode 100644 index e09f149d..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import ( - "database/sql" - - "github.com/prometheus/client_golang/prometheus" -) - -type dbStatsCollector struct { - db *sql.DB - - maxOpenConnections *prometheus.Desc - - openConnections *prometheus.Desc - inUseConnections *prometheus.Desc - idleConnections *prometheus.Desc - - waitCount *prometheus.Desc - waitDuration *prometheus.Desc - maxIdleClosed *prometheus.Desc - maxIdleTimeClosed *prometheus.Desc - maxLifetimeClosed *prometheus.Desc -} - -// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. -// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. -func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { - fqName := func(name string) string { - return "go_sql_" + name - } - return &dbStatsCollector{ - db: db, - maxOpenConnections: prometheus.NewDesc( - fqName("max_open_connections"), - "Maximum number of open connections to the database.", - nil, prometheus.Labels{"db_name": dbName}, - ), - openConnections: prometheus.NewDesc( - fqName("open_connections"), - "The number of established connections both in use and idle.", - nil, prometheus.Labels{"db_name": dbName}, - ), - inUseConnections: prometheus.NewDesc( - fqName("in_use_connections"), - "The number of connections currently in use.", - nil, prometheus.Labels{"db_name": dbName}, - ), - idleConnections: prometheus.NewDesc( - fqName("idle_connections"), - "The number of idle connections.", - nil, prometheus.Labels{"db_name": dbName}, - ), - waitCount: prometheus.NewDesc( - fqName("wait_count_total"), - "The total number of connections waited for.", - nil, prometheus.Labels{"db_name": dbName}, - ), - waitDuration: prometheus.NewDesc( - fqName("wait_duration_seconds_total"), - "The total time blocked waiting for a new connection.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxIdleClosed: prometheus.NewDesc( - fqName("max_idle_closed_total"), - "The total number of connections closed due to SetMaxIdleConns.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxIdleTimeClosed: prometheus.NewDesc( - fqName("max_idle_time_closed_total"), - "The total number of connections closed due to SetConnMaxIdleTime.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxLifetimeClosed: prometheus.NewDesc( - fqName("max_lifetime_closed_total"), - "The total number of connections closed due to SetConnMaxLifetime.", - nil, prometheus.Labels{"db_name": dbName}, - ), - } -} - -// Describe implements Collector. -func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- c.maxOpenConnections - ch <- c.openConnections - ch <- c.inUseConnections - ch <- c.idleConnections - ch <- c.waitCount - ch <- c.waitDuration - ch <- c.maxIdleClosed - ch <- c.maxLifetimeClosed - c.describeNewInGo115(ch) -} - -// Collect implements Collector. -func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { - stats := c.db.Stats() - ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) - ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) - ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) - ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) - ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) - ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) - ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) - ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) - c.collectNewInGo115(ch, stats) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go deleted file mode 100644 index a6e6268c..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.15 - -package collectors - -import ( - "database/sql" - - "github.com/prometheus/client_golang/prometheus" -) - -func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) { - ch <- c.maxIdleTimeClosed -} - -func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) { - ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go deleted file mode 100644 index 0568affe..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.15 - -package collectors - -import ( - "database/sql" - - "github.com/prometheus/client_golang/prometheus" -) - -func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) {} - -func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) {} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go deleted file mode 100644 index 3aa8d059..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewExpvarCollector returns a newly allocated expvar Collector. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewExpvarCollector(exports) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go deleted file mode 100644 index edaa4e50..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewGoCollector returns a collector that exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This requires to “stop the worldâ€, which usually only happens for -// garbage collection (GC). Take the following implications into account when -// deciding whether to use the Go collector: -// -// 1. The performance impact of stopping the world is the more relevant the more -// frequently metrics are collected. However, with Go1.9 or later the -// stop-the-world time per metrics collection is very short (~25µs) so that the -// performance impact will only matter in rare cases. However, with older Go -// versions, the stop-the-world duration depends on the heap size and can be -// quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -// -// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the -// metrics collection happens to coincide with GC, it will only complete after -// GC has finished. Usually, GC is fast enough to not cause problems. However, -// with a very large heap, GC might take multiple seconds, which is enough to -// cause scrape timeouts in common setups. To avoid this problem, the Go -// collector will use the memstats from a previous collection if -// runtime.ReadMemStats takes more than 1s. However, if there are no previously -// collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. -// -// NOTE: The problem is solved in Go 1.15, see -// https://github.com/golang/go/issues/19812 for the related Go issue. -func NewGoCollector() prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewGoCollector() -} - -// NewBuildInfoCollector returns a collector collecting a single metric -// "go_build_info" with the constant value 1 and three labels "path", "version", -// and "checksum". Their label values contain the main module path, version, and -// checksum, respectively. The labels will only have meaningful values if the -// binary is built with Go module support and from source code retrieved from -// the source repository (rather than the local file system). This is usually -// accomplished by building from outside of GOPATH, specifying the full address -// of the main package, e.g. "GO111MODULE=on go run -// github.com/prometheus/client_golang/examples/random". If built without Go -// module support, all label values will be "unknown". If built with Go module -// support but using the source code from the local file system, the "path" will -// be set appropriately, but "checksum" will be empty and "version" will be -// "(devel)". -// -// This collector uses only the build information for the main module. See -// https://github.com/povilasv/prommod for an example of a collector for the -// module dependencies. -func NewBuildInfoCollector() prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewBuildInfoCollector() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go deleted file mode 100644 index 24558f50..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. -// -// The collector only works on operating systems with a Linux-style proc -// filesystem and on Microsoft Windows. On other operating systems, it will not -// collect any metrics. -func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ - PidFn: opts.PidFn, - Namespace: opts.Namespace, - ReportErrors: opts.ReportErrors, - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index 3f8fd790..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// ExemplarAdder is implemented by Counters that offer the option of adding a -// value to the Counter together with an exemplar. Its AddWithExemplar method -// works like the Add method of the Counter interface but also replaces the -// currently saved exemplar (if any) with a new one, created from the provided -// value, the current time as timestamp, and the provided labels. Empty Labels -// will lead to a valid (label-less) exemplar. But if Labels is nil, the current -// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any -// of the provided labels are invalid, or if the provided labels contain more -// than 64 runes in total. -type ExemplarAdder interface { - AddWithExemplar(value float64, exemplar Labels) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation also implements ExemplarAdder. It is safe to -// perform the corresponding type assertion. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} - result.init(result) // Init self-collection. - return result -} - -type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - labelPairs []*dto.LabelPair - exemplar atomic.Value // Containing nil or a *dto.Exemplar. - - now func() time.Time // To mock out time.Now() for testing. -} - -func (c *counter) Desc() *Desc { - return c.desc -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) AddWithExemplar(v float64, e Labels) { - c.Add(v) - c.updateExemplar(v, e) -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) Write(out *dto.Metric) error { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - val := fval + float64(ival) - - var exemplar *dto.Exemplar - if e := c.exemplar.Load(); e != nil { - exemplar = e.(*dto.Exemplar) - } - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) -} - -func (c *counter) updateExemplar(v float64, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, c.now(), l) - if err != nil { - panic(err) - } - c.exemplar.Store(e) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -type CounterVec struct { - *MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -// -// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index 4bb816ab..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if !model.IsValidMetricName(model.LabelValue(fqName)) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - - xxh := xxhash.New() - for _, val := range labelValues { - xxh.WriteString(val) - xxh.Write(separatorByteSlice) - } - d.id = xxh.Sum64() - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - xxh.Reset() - xxh.WriteString(help) - xxh.Write(separatorByteSlice) - for _, labelName := range labelNames { - xxh.WriteString(labelName) - xxh.Write(separatorByteSlice) - } - d.dimHash = xxh.Sum64() - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(labelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 98450125..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// -// Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itselfâ€. Note that Gauge, Counter, Summary, and -// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, -// and HistogramVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. -// -// Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly†using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). NewConstMetric is used -// for all metric types with just a float64 as their value: Counter, Gauge, and -// a special “type†called Untyped. Use the latter if you are not sure if the -// mirrored metric is a Counter or a Gauge. Creation of the Metric instance -// happens in the Collect method. The Describe method has to return separate -// Desc instances, representative of the “throw-away†metrics to be created -// later. NewDesc comes in handy to create those Desc instances. Alternatively, -// you could return no Desc at all, which will mark the Collector “uncheckedâ€. -// No checks are performed at registration time, but metric consistency will -// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situation where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// -// Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index c41ab37f..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewExpvarCollector instead. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index 3d383a73..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index bd0733d6..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. -func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. Therefore, it must be safe to call the provided function -// concurrently. -// -// NewGaugeFunc is a good way to create an “info†style metric with a constant -// value of 1. Example: -// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index a96ed1ce..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "runtime" - "runtime/debug" - "sync" - "time" -) - -type goCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - goInfoDesc *Desc - - // ms... are memstats related. - msLast *runtime.MemStats // Previously collected memstats. - msLastTimestamp time.Time - msMtx sync.Mutex // Protects msLast and msLastTimestamp. - msMetrics memStatsMetrics - msRead func(*runtime.MemStats) // For mocking in tests. - msMaxWait time.Duration // Wait time for fresh memstats. - msMaxAge time.Duration // Maximum allowed age of old memstats. -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - return &goCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", - nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), - msLast: &runtime.MemStats{}, - msRead: runtime.ReadMemStats, - msMaxWait: time.Second, - msMaxAge: 5 * time.Minute, - msMetrics: memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - "Total number of mallocs.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of frees.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of allocated objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("last_gc_time_seconds"), - "Number of seconds since 1970 of last garbage collection.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }, - }, - } -} - -func memstatNamespace(s string) string { - return "go_memstats_" + s -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - ch <- c.goInfoDesc - for _, i := range c.msMetrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - var ( - ms = &runtime.MemStats{} - done = make(chan struct{}) - ) - // Start reading memstats first as it might take a while. - go func() { - c.msRead(ms) - c.msMtx.Lock() - c.msLast = ms - c.msLastTimestamp = time.Now() - c.msMtx.Unlock() - close(done) - }() - - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - - timer := time.NewTimer(c.msMaxWait) - select { - case <-done: // Our own ReadMemStats succeeded in time. Use it. - timer.Stop() // Important for high collection frequencies to not pile up timers. - c.msCollect(ch, ms) - return - case <-timer.C: // Time out, use last memstats if possible. Continue below. - } - c.msMtx.Lock() - if time.Since(c.msLastTimestamp) < c.msMaxAge { - // Last memstats are recent enough. Collect from them under the lock. - c.msCollect(ch, c.msLast) - c.msMtx.Unlock() - return - } - // If we are here, the last memstats are too old or don't exist. We have - // to wait until our own ReadMemStats finally completes. For that to - // happen, we have to release the lock. - c.msMtx.Unlock() - <-done - c.msCollect(ch, ms) -} - -func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} - -// memStatsMetrics provide description, value, and value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} - -// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewBuildInfoCollector instead. -func NewBuildInfoCollector() Collector { - path, version, sum := "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - c := &selfCollector{MustNewConstMetric( - NewDesc( - "go_build_info", - "Build information about the main Go module.", - nil, Labels{"path": path, "version": version, "checksum": sum}, - ), - GaugeValue, 1)} - c.init(c.self) - return c -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index 8425640b..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -// -// The returned implementation also implements ExemplarObserver. It is safe to -// perform the corresponding type assertion. Exemplars are tracked separately -// for each bucket. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make buckets - // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) - h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) - - h.init(h) // Init self-collection. - return h -} - -type histogramCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - buckets []uint64 -} - -type histogram struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // histogramCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the histogram) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - // - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - - now func() time.Time // To mock out time.Now() for testing. -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - h.observe(v, h.findBucket(v)) -} - -func (h *histogram) ObserveWithExemplar(v float64, e Labels) { - i := h.findBucket(v) - h.observe(v, i) - h.updateExemplar(v, i, e) -} - -func (h *histogram) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := h.counts[n>>63] - coldCounts := h.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - } - var cumCount uint64 - for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - his.Bucket[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), - UpperBound: proto.Float64(upperBound), - } - if e := h.exemplars[i].Load(); e != nil { - his.Bucket[i].Exemplar = e.(*dto.Exemplar) - } - } - // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. - if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e.(*dto.Exemplar), - } - his.Bucket = append(his.Bucket, b) - } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } - return nil -} - -// findBucket returns the index of the bucket for the provided value, or -// len(h.upperBounds) for the +Inf bucket. -func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - return sort.SearchFloat64s(h.upperBounds, v) -} - -// observe is the implementation for Observe without the findBucket part. -func (h *histogram) observe(v float64, bucket int) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] - - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) - } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. -func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, h.now(), l) - if err != nil { - panic(err) - } - h.exemplars[bucket].Store(e) -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstHistogram would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 351c26e1..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index 2744443a..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index dc121910..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "strings" - "time" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 44128016..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} - -// ExemplarObserver is implemented by Observers that offer the option of -// observing a value together with an exemplar. Its ObserveWithExemplar method -// works like the Observe method of an Observer but also replaces the currently -// saved exemplar (if any) with a new one, created from the provided value, the -// current time as timestamp, and the provided Labels. Empty Labels will lead to -// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is -// left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 64 runes in total. -type ExemplarObserver interface { - ObserveWithExemplar(value float64, exemplar Labels) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index 5bfe0ff5..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector is the obsolete version of collectors.NewProcessCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewProcessCollector instead. -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - } - - if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } - } else { - c.pidFn = opts.PidFn - } - - // Set up process metric collection if supported by the runtime. - if canCollectProcess() { - c.collectFn = c.processCollect - } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } - } - - return c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) - } - ch <- NewInvalidMetric(desc, err) -} - -// NewPidFileFn returns a function that retrieves a pid from the specified file. -// It is meant to be used for the PidFn field in ProcessCollectorOpts. -func NewPidFileFn(pidFilePath string) func() (int, error) { - return func() (int, error) { - content, err := ioutil.ReadFile(pidFilePath) - if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) - } - pid, err := strconv.Atoi(strings.TrimSpace(string(content))) - if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) - } - - return pid, nil - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go deleted file mode 100644 index 3117461c..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package prometheus - -import ( - "github.com/prometheus/procfs" -) - -func canCollectProcess() bool { - _, err := procfs.NewDefaultFS() - return err == nil -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.Stat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.Limits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go deleted file mode 100644 index f973398d..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -func canCollectProcess() bool { - return true -} - -var ( - modpsapi = syscall.NewLazyDLL("psapi.dll") - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") - procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") -) - -type processMemoryCounters struct { - // System interface description - // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex - - // Refer to the Golang internal implementation - // https://golang.org/src/internal/syscall/windows/psapi_windows.go - _ uint32 - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr -} - -func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { - mem := processMemoryCounters{} - r1, _, err := procGetProcessMemoryInfo.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&mem)), - uintptr(unsafe.Sizeof(mem)), - ) - if r1 != 1 { - return mem, err - } else { - return mem, nil - } -} - -func getProcessHandleCount(handle windows.Handle) (uint32, error) { - var count uint32 - r1, _, err := procGetProcessHandleCount.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&count)), - ) - if r1 != 1 { - return 0, err - } else { - return count, nil - } -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } - - var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) - - mem, err := getProcessMemoryInfo(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) - - handles, err := getProcessHandleCount(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. -} - -func fileTimeToSeconds(ft windows.Filetime) float64 { - return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go deleted file mode 100644 index f8d50d1f..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promauto provides alternative constructors for the fundamental -// Prometheus metric types and their …Vec and …Func variants. The difference to -// their counterparts in the prometheus package is that the promauto -// constructors return Collectors that are already registered with a -// registry. There are two sets of constructors. The constructors in the first -// set are top-level functions, while the constructors in the other set are -// methods of the Factory type. The top-level function return Collectors -// registered with the global registry (prometheus.DefaultRegisterer), while the -// methods return Collectors registered with the registry the Factory was -// constructed with. All constructors panic if the registration fails. -// -// The following example is a complete program to create a histogram of normally -// distributed random numbers from the math/rand package: -// -// package main -// -// import ( -// "math/rand" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// -// func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } -// } -// -// func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// Prometheus's version of a minimal hello-world program: -// -// package main -// -// import ( -// "fmt" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// func main() { -// http.Handle("/", promhttp.InstrumentHandlerCounter( -// promauto.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hello_requests_total", -// Help: "Total number of hello-world requests by HTTP code.", -// }, -// []string{"code"}, -// ), -// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprint(w, "Hello, world!") -// }), -// )) -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// A Factory is created with the With(prometheus.Registerer) function, which -// enables two usage pattern. With(prometheus.Registerer) can be called once per -// line: -// -// var ( -// reg = prometheus.NewRegistry() -// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = promauto.With(reg).NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) -// -// Or it can be used to create a Factory once to be used multiple times: -// -// var ( -// reg = prometheus.NewRegistry() -// factory = promauto.With(reg) -// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = factory.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) -// -// This appears very handy. So why are these constructors locked away in a -// separate package? -// -// The main problem is that registration may fail, e.g. if a metric inconsistent -// with or equal to the newly to be registered one is already registered. -// Therefore, the Register method in the prometheus.Registerer interface returns -// an error, and the same is the case for the top-level prometheus.Register -// function that registers with the global registry. The prometheus package also -// provides MustRegister versions for both. They panic if the registration -// fails, and they clearly call this out by using the Must… idiom. Panicking is -// problematic in this case because it doesn't just happen on input provided by -// the caller that is invalid on its own. Things are a bit more subtle here: -// Metric creation and registration tend to be spread widely over the -// codebase. It can easily happen that an incompatible metric is added to an -// unrelated part of the code, and suddenly code that used to work perfectly -// fine starts to panic (provided that the registration of the newly added -// metric happens before the registration of the previously existing -// metric). This may come as an even bigger surprise with the global registry, -// where simply importing another package can trigger a panic (if the newly -// imported package registers metrics in its init function). At least, in the -// prometheus package, creation of metrics and other collectors is separate from -// registration. You first create the metric, and then you decide explicitly if -// you want to register it with a local or the global registry, and if you want -// to handle the error or risk a panic. With the constructors in the promauto -// package, registration is automatic, and if it fails, it will always -// panic. Furthermore, the constructors will often be called in the var section -// of a file, which means that panicking will happen as a side effect of merely -// importing a package. -// -// A separate package allows conservative users to entirely ignore it. And -// whoever wants to use it, will do so explicitly, with an opportunity to read -// this warning. -// -// Enjoy promauto responsibly! -package promauto - -import "github.com/prometheus/client_golang/prometheus" - -// NewCounter works like the function of the same name in the prometheus package -// but it automatically registers the Counter with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. -func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { - return With(prometheus.DefaultRegisterer).NewCounter(opts) -} - -// NewCounterVec works like the function of the same name in the prometheus -// package but it automatically registers the CounterVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec -// panics. -func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { - return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames) -} - -// NewCounterFunc works like the function of the same name in the prometheus -// package but it automatically registers the CounterFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc -// panics. -func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { - return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function) -} - -// NewGauge works like the function of the same name in the prometheus package -// but it automatically registers the Gauge with the -// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. -func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { - return With(prometheus.DefaultRegisterer).NewGauge(opts) -} - -// NewGaugeVec works like the function of the same name in the prometheus -// package but it automatically registers the GaugeVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. -func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { - return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames) -} - -// NewGaugeFunc works like the function of the same name in the prometheus -// package but it automatically registers the GaugeFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. -func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { - return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function) -} - -// NewSummary works like the function of the same name in the prometheus package -// but it automatically registers the Summary with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. -func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { - return With(prometheus.DefaultRegisterer).NewSummary(opts) -} - -// NewSummaryVec works like the function of the same name in the prometheus -// package but it automatically registers the SummaryVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec -// panics. -func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { - return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames) -} - -// NewHistogram works like the function of the same name in the prometheus -// package but it automatically registers the Histogram with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. -func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { - return With(prometheus.DefaultRegisterer).NewHistogram(opts) -} - -// NewHistogramVec works like the function of the same name in the prometheus -// package but it automatically registers the HistogramVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec -// panics. -func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { - return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames) -} - -// NewUntypedFunc works like the function of the same name in the prometheus -// package but it automatically registers the UntypedFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc -// panics. -func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { - return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function) -} - -// Factory provides factory methods to create Collectors that are automatically -// registered with a Registerer. Create a Factory with the With function, -// providing a Registerer to auto-register created Collectors with. The zero -// value of a Factory creates Collectors that are not registered with any -// Registerer. All methods of the Factory panic if the registration fails. -type Factory struct { - r prometheus.Registerer -} - -// With creates a Factory using the provided Registerer for registration of the -// created Collectors. If the provided Registerer is nil, the returned Factory -// creates Collectors that are not registered with any Registerer. -func With(r prometheus.Registerer) Factory { return Factory{r} } - -// NewCounter works like the function of the same name in the prometheus package -// but it automatically registers the Counter with the Factory's Registerer. -func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter { - c := prometheus.NewCounter(opts) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewCounterVec works like the function of the same name in the prometheus -// package but it automatically registers the CounterVec with the Factory's -// Registerer. -func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { - c := prometheus.NewCounterVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewCounterFunc works like the function of the same name in the prometheus -// package but it automatically registers the CounterFunc with the Factory's -// Registerer. -func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { - c := prometheus.NewCounterFunc(opts, function) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewGauge works like the function of the same name in the prometheus package -// but it automatically registers the Gauge with the Factory's Registerer. -func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { - g := prometheus.NewGauge(opts) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewGaugeVec works like the function of the same name in the prometheus -// package but it automatically registers the GaugeVec with the Factory's -// Registerer. -func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { - g := prometheus.NewGaugeVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewGaugeFunc works like the function of the same name in the prometheus -// package but it automatically registers the GaugeFunc with the Factory's -// Registerer. -func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { - g := prometheus.NewGaugeFunc(opts, function) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewSummary works like the function of the same name in the prometheus package -// but it automatically registers the Summary with the Factory's Registerer. -func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { - s := prometheus.NewSummary(opts) - if f.r != nil { - f.r.MustRegister(s) - } - return s -} - -// NewSummaryVec works like the function of the same name in the prometheus -// package but it automatically registers the SummaryVec with the Factory's -// Registerer. -func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { - s := prometheus.NewSummaryVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(s) - } - return s -} - -// NewHistogram works like the function of the same name in the prometheus -// package but it automatically registers the Histogram with the Factory's -// Registerer. -func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { - h := prometheus.NewHistogram(opts) - if f.r != nil { - f.r.MustRegister(h) - } - return h -} - -// NewHistogramVec works like the function of the same name in the prometheus -// package but it automatically registers the HistogramVec with the Factory's -// Registerer. -func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { - h := prometheus.NewHistogramVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(h) - } - return h -} - -// NewUntypedFunc works like the function of the same name in the prometheus -// package but it automatically registers the UntypedFunc with the Factory's -// Registerer. -func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { - u := prometheus.NewUntypedFunc(opts, function) - if f.r != nil { - f.r.MustRegister(u) - } - return u -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index 383a7f59..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,950 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "strings" - "sync" - "unicode/utf8" - - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe method does not yield any descriptors) are excluded from the check. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -// Error formats the contained errors as a bullet point list, preceded by the -// total number of errors. Note that this results in a multi-line string. -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - uncheckedCollectors []Collector - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // All desc IDs XOR'd together. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, XOR it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID ^= desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // A Collector yielding no Desc at all is considered unchecked. - if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil - } - if existing, exists := r.collectorsByID[collectorID]; exists { - switch e := existing.(type) { - case *wrappingCollector: - return AlreadyRegisteredError{ - ExistingCollector: e.unwrapRecursively(), - NewCollector: c, - } - default: - return AlreadyRegisteredError{ - ExistingCollector: e, - NewCollector: c, - } - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // All desc IDs XOR'd together. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID ^= desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - r.mtx.RLock() - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) - for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector - } - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() - - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. - defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } - } - }() - - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckdMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calls are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - if multiErr, ok := err.(MultiError); ok { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// checkSuffixCollisions checks for collisions with the “magic†suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } - } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } - } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) - } - } - return nil -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, -) error { - name := metricFamily.GetName() - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), - ) - } - - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := xxhash.New() - h.WriteString(name) - h.Write(separatorByteSlice) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { - // We cannot sort dtoMetric.Label in place as it is immutable by contract. - copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) - copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) - dtoMetric.Label = copiedLabels - } - for _, lp := range dtoMetric.Label { - h.WriteString(lp.GetName()) - h.Write(separatorByteSlice) - h.WriteString(lp.GetValue()) - h.Write(separatorByteSlice) - } - hSum := h.Sum64() - if _, exists := metricHashes[hSum]; exists { - return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, - ) - } - metricHashes[hSum] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) - copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(labelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index c5fa8ed7..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,744 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v1.0.0 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -var errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v1.0.0 of the library. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile†- // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is an empty map, resulting in a summary without - // quantiles. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Only applies to pre-calculated quantiles, does not - // apply to _sum and _count. Must be positive. The default value is - // DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// Problem with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = map[float64]float64{} - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - if len(opts.Objectives) == 0 { - // Use the lock-free implementation of a Summary without objectives. - s := &noObjectivesSummary{ - desc: desc, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*summaryCounts{{}, {}}, - } - s.init(s) // Init self-collection. - return s - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: MakeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type summaryCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 -} - -type noObjectivesSummary struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // summaryCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the summary) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*summaryCounts - - labelPairs []*dto.LabelPair -} - -func (s *noObjectivesSummary) Desc() *Desc { - return s.desc -} - -func (s *noObjectivesSummary) Observe(v float64) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&s.countAndHotIdx, 1) - hotCounts := s.counts[n>>63] - - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (s *noObjectivesSummary) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - s.writeMtx.Lock() - defer s.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := s.counts[n>>63] - coldCounts := s.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - } - - out.Summary = sum - out.Label = s.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - return nil -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *MetricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile†is an illegal -// label name. NewSummaryVec will panic if this label name is used. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index 8d5f1052..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the -// following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. The observed -// duration is also returned. ObserveDuration is usually called with a defer -// statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() time.Duration { - d := time.Since(t.begin) - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 0f9ce63f..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index c778711b..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - "time" - "unicode/utf8" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - - dto "github.com/prometheus/client_model/go" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. Use UntypedValue to mark a metric -// with an unknown type. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: MakeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, nil, out) -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - e *dto.Exemplar, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -// MakeLabelPairs is a helper function to create protobuf LabelPairs from the -// variable and constant labels in the provided Desc. The values for the -// variable labels are defined by the labelValues slice, which must be in the -// same order as the corresponding variable labels in the Desc. -// -// This function is only needed for custom Metric implementations. See MetricVec -// example. -func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) - return labelPairs -} - -// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 64 - -// newExemplar creates a new dto.Exemplar from the provided values. An error is -// returned if any of the label names or values are invalid or if the total -// number of runes in the label names and values exceeds ExemplarMaxRunes. -func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { - e := &dto.Exemplar{} - e.Value = proto.Float64(value) - tsProto, err := ptypes.TimestampProto(ts) - if err != nil { - return nil, err - } - e.Timestamp = tsProto - labelPairs := make([]*dto.LabelPair, 0, len(l)) - var runes int - for name, value := range l { - if !checkLabelName(name) { - return nil, fmt.Errorf("exemplar label name %q is invalid", name) - } - runes += utf8.RuneCountInString(name) - if !utf8.ValidString(value) { - return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) - } - runes += utf8.RuneCountInString(value) - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(name), - Value: proto.String(value), - }) - } - if runes > ExemplarMaxRunes { - return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) - } - e.Label = labelPairs - return e, nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 4ababe6c..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// MetricVec is a Collector to bundle metrics of the same name that differ in -// their label values. MetricVec is not used directly but as a building block -// for implementations of vectors of a given metric type, like GaugeVec, -// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be -// used for custom Metric implementations. -// -// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in -// FooVec and initialize it with NewMetricVec. Implement wrappers for -// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather -// than (Metric, error). Similarly, create a wrapper for CurryWith that returns -// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also -// add the convenience methods WithLabelValues, With, and MustCurryWith, which -// panic instead of returning errors. See also the MetricVec example. -type MetricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 - hashAddByte func(h uint64, b byte) uint64 -} - -// NewMetricVec returns an initialized metricVec. -func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) -} - -// Without explicit forwarding of Describe, Collect, Reset, those methods won't -// show up in GoDoc. - -// Describe implements Collector. -func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { m.metricMap.Reset() } - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the MetricVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -// -// Note that CurryWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", label) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{i, val}) - } - } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &MetricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created (by -// calling the newMetric function provided during construction of the -// MetricVec). -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it in its initial state. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// -// Note that GetMetricWithLabelValues is usually not called directly but through -// a wrapper around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -// -// Note that GetMetricWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric implementation, -// for example GaugeVec. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", label) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.metrics { - delete(m.metrics, h) - } -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - - i := findMetricWithLabelValues(metrics, lvs, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - i := findMetricWithLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) - if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) - if !ok { - lvs := extractLabelValues(m.desc, labels, curry) - metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { - return false - } - var iLVs, iCurry int - for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { - return false - } - iLVs++ - } - return true -} - -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { - return false - } - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if values[i] != labels[k] { - return false - } - } - return true -} - -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = labels[k] - } - return labelValues -} - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index 74ee9328..00000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. Wrapping a nil value is valid, resulting -// in a no-op Registerer. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics -// exposed. See also -// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// Wrapping a nil value is valid, resulting in a no-op Registerer. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_†or “process_â€, -// respectively.) -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - if r.wrappedRegisterer == nil { - return nil - } - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - if r.wrappedRegisterer == nil { - return - } - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - if r.wrappedRegisterer == nil { - return false - } - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -func (c *wrappingCollector) unwrapRecursively() Collector { - switch wc := c.wrappedCollector.(type) { - case *wrappingCollector: - return wc.unwrapRecursively() - default: - return wc - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(labelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e41..00000000 --- a/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index 2f4930d9..00000000 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,723 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto - -package io_prometheus_client - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} - -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} - -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} - -func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} -} - -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} -} - -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} -} - -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} -} - -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} -} - -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} -} - -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} -} - -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} -} - -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} -} - -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} -} - -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} -} - -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, -} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1a..00000000 --- a/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index 7657f841..00000000 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occurred. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index bd4e3474..00000000 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -// Closer is implemented by Encoders that need to be closed to finalize -// encoding. (For example, OpenMetrics needs a final `# EOF` line.) -// -// Note that all Encoder implementations returned from this package implement -// Closer, too, even if the Close call is a no-op. This happens in preparation -// for adding a Close method to the Encoder interface directly in a (mildly -// breaking) release in the future. -type Closer interface { - Close() error -} - -type encoderCloser struct { - encode func(*dto.MetricFamily) error - close func() error -} - -func (ec encoderCloser) Encode(v *dto.MetricFamily) error { - return ec.encode(v) -} - -func (ec encoderCloser) Close() error { - return ec.close() -} - -// Negotiate returns the Content-Type based on the given Accept header. If no -// appropriate accepted type is found, FmtText is returned (which is the -// Prometheus text format). This function will never negotiate FmtOpenMetrics, -// as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NegotiateIncludingOpenMetrics works like Negotiate but includes -// FmtOpenMetrics as an option for the result. Note that this function is -// temporary and will disappear once FmtOpenMetrics is fully supported and as -// such may be negotiated by the normal Negotiate function. -func NegotiateIncludingOpenMetrics(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. All -// Encoder implementations returned by NewEncoder also implement Closer, and -// callers should always call the Close method. It is currently only required -// for FmtOpenMetrics, but a future (breaking) release will add the Close method -// to the Encoder interface directly. The current version of the Encoder -// interface is kept for backwards compatibility. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }, - close: func() error { return nil }, - } - case FmtProtoCompact: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }, - close: func() error { return nil }, - } - case FmtProtoText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }, - close: func() error { return nil }, - } - case FmtText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }, - close: func() error { return nil }, - } - case FmtOpenMetrics: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) - return err - }, - close: func() error { - _, err := FinalizeOpenMetrics(w) - return err - }, - } - } - panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index 0f176fa6..00000000 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire protocols. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index dc2eedee..00000000 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go deleted file mode 100644 index 8a9313a3..00000000 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - "github.com/golang/protobuf/ptypes" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the -// OpenMetrics text format and writes the resulting lines to 'out'. It returns -// the number of bytes written and any error encountered. The output will have -// the same order as the input, no further sorting is performed. Furthermore, -// this function assumes the input is already sanitized and does not perform any -// sanity checks. If the input contains duplicate metrics or invalid metric or -// label names, the conversion will result in invalid text format output. -// -// This function fulfills the type 'expfmt.encoder'. -// -// Note that OpenMetrics requires a final `# EOF` line. Since this function acts -// on individual metric families, it is the responsibility of the caller to -// append this line to 'out' once all metric families have been written. -// Conveniently, this can be done by calling FinalizeOpenMetrics. -// -// The output should be fully OpenMetrics compliant. However, there are a few -// missing features and peculiarities to avoid complications when switching from -// Prometheus to OpenMetrics or vice versa: -// -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. -// -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. -// -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). -// -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var ( - n int - metricType = in.GetType() - shortName = name - ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(shortName) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, true) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(shortName) - written += n - if err != nil { - return - } - switch metricType { - case dto.MetricType_COUNTER: - if strings.HasSuffix(name, "_total") { - n, err = w.WriteString(" counter\n") - } else { - n, err = w.WriteString(" unknown\n") - } - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" unknown\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), 0, false, - metric.Counter.Exemplar, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), 0, false, - nil, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), 0, false, - nil, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeOpenMetricsSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, - 0, metric.Summary.GetSampleCount(), true, - nil, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - 0, b.GetCumulativeCount(), true, - b.Exemplar, - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. -func FinalizeOpenMetrics(w io.Writer) (written int, err error) { - return w.Write([]byte("# EOF\n")) -} - -// writeOpenMetricsSample writes a single sample in OpenMetrics text format to -// w, given the metric name, the metric proto message itself, optionally an -// additional label name with a float64 value (use empty string as label name if -// not required), the value (optionally as float64 or uint64, determined by -// useIntValue), and optionally an exemplar (use nil if not required). The -// function returns the number of bytes written and any error encountered. -func writeOpenMetricsSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - floatValue float64, intValue uint64, useIntValue bool, - exemplar *dto.Exemplar, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - if useIntValue { - n, err = writeUint(w, intValue) - } else { - n, err = writeOpenMetricsFloat(w, floatValue) - } - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - // TODO(beorn7): Format this directly without converting to a float first. - n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) - written += n - if err != nil { - return written, err - } - } - if exemplar != nil { - n, err = writeExemplar(w, exemplar) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeExemplar writes the provided exemplar in OpenMetrics format to w. The -// function returns the number of bytes written and any error encountered. -func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { - written := 0 - n, err := w.WriteString(" # ") - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, e.GetValue()) - written += n - if err != nil { - return written, err - } - if e.Timestamp != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - ts, err := ptypes.Timestamp((*e).Timestamp) - if err != nil { - return written, err - } - // TODO(beorn7): Format this directly from components of ts to - // avoid overflow/underflow and precision issues of the float - // conversion. - n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting -// number would otherwise contain neither a "." nor an "e". -func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return w.WriteString("1.0") - case f == 0: - return w.WriteString("0.0") - case f == -1: - return w.WriteString("-1.0") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - if !bytes.ContainsAny(*bp, "e.") { - *bp = append(*bp, '.', '0') - } - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeUint is like writeInt just for uint64. -func writeUint(w enhancedWriter, u uint64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendUint((*bp)[:0], u, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index 5ba503b0..00000000 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "math" - "strconv" - "strings" - "sync" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// enhancedWriter has all the enhanced write functions needed here. bufio.Writer -// implements it. -type enhancedWriter interface { - io.Writer - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) - WriteByte(c byte) error -} - -const ( - initialNumBufSize = 24 -) - -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) - }, - } - numBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, initialNumBufSize) - return &b - }, - } -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { - // Fail-fast checks. - if len(in.Metric) == 0 { - return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var n int - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, false) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - metricType := in.GetType() - switch metricType { - case dto.MetricType_COUNTER: - n, err = w.WriteString(" counter\n") - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Summary.GetSampleCount()), - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// writeSample writes a single sample in text format to w, given the metric -// name, the metric proto message itself, optionally an additional label name -// with a float64 value (use empty string as label name if not required), and -// the value. The function returns the number of bytes written and any error -// encountered. -func writeSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - value float64, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeFloat(w, value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeInt(w, *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -var ( - escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) - quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { - if includeDoubleQuote { - return quotedEscaper.WriteString(w, v) - } - return escaper.WriteString(w, v) -} - -// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes -// a few common cases for increased efficiency. For non-hardcoded cases, it uses -// strconv.AppendFloat to avoid allocations, similar to writeInt. -func writeFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return 1, w.WriteByte('1') - case f == 0: - return 1, w.WriteByte('0') - case f == -1: - return w.WriteString("-1") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeInt is equivalent to fmt.Fprint with an int64 argument but uses -// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid -// allocations. -func writeInt(w enhancedWriter, i int64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendInt((*bp)[:0], i, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index b6079b31..00000000 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,775 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err == io.EOF { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - // Check for duplicate label names. - labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { - lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { - p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) - return nil - } - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := parseFloat(p.currentToken.String()) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} - -func parseFloat(s string) (float64, error) { - if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") - } - return strconv.ParseFloat(s, 64) -} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d..00000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 26e92288..00000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 35e739c7..00000000 --- a/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true off the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - if a.Resolved() { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) - } - if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de410..00000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 367afecd..00000000 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializes a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index ef895633..00000000 --- a/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the an alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ScrapeIntervalLabel is the name of the label that holds the scrape interval - // used to scrape a target. - ScrapeIntervalLabel = "__scrape_interval__" - - // ScrapeTimeoutLabel is the name of the label that holds the scrape - // timeout used to scrape a target. - ScrapeTimeoutLabel = "__scrape_timeout__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 6eda08a7..00000000 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index 00804b7f..00000000 --- a/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) -) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b96917..00000000 --- a/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 8762b13c..00000000 --- a/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index bb99889d..00000000 --- a/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) - } - } - if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") - } - return nil -} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 7f67b16e..00000000 --- a/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// Set implements pflag/flag.Value -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value -func (d *Duration) Type() string { - return "duration" -} - -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") - -// ParseDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { - case "0": - // Allow 0 without a unit. - return 0, nil - case "": - return 0, fmt.Errorf("empty duration string") - } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return - } - n, _ := strconv.Atoi(matches[pos]) - - // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") - } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") - } - } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" - ) - if ms == 0 { - return "0s" - } - - f := func(unit string, mult int64, exact bool) { - if exact && ms%mult != 0 { - return - } - if v := ms / mult; v > 0 { - r += fmt.Sprintf("%d%s", v, unit) - ms -= v * mult - } - } - - // Only format years and weeks if the remainder is zero, as it is often - // easier to read 90d than 12w6d. - f("y", 1000*60*60*24*365, true) - f("w", 1000*60*60*24*7, true) - - f("d", 1000*60*60*24, false) - f("h", 1000*60*60, false) - f("m", 1000*60, false) - f("s", 1000, false) - f("ms", 1, false) - - return r -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.String()) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *Duration) UnmarshalJSON(bytes []byte) error { - var s string - if err := json.Unmarshal(bytes, &s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface. -func (d *Duration) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (d *Duration) UnmarshalText(text []byte) error { - var err error - *d, err = ParseDuration(string(text)) - return err -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index c9d8fb1a..00000000 --- a/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "math" - "sort" - "strconv" - "strings" -) - -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 25e3659a..00000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml deleted file mode 100644 index 0aa09eda..00000000 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -linters: - enable: - - golint diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md deleted file mode 100644 index 9a1aff41..00000000 --- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -## Prometheus Community Code of Conduct - -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 943de761..00000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,121 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) a suitable maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). - -* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) - -## Steps to Contribute - -Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. - -Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). - -For quickly compiling and testing your changes do: -``` -make test # Make sure all the tests pass before you commit and push :) -``` - -We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. - -## Pull Request Checklist - -* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. - -* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). - -* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). - -* Add tests relevant to the fixed bug or new feature. - -## Dependency management - -The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. - -All dependencies are vendored in the `vendor/` directory. - -To add or update a new dependency, use the `go get` command: - -```bash -# Pick the latest tagged release. -go get example.com/some/module/pkg - -# Pick a specific version. -go get example.com/some/module/pkg@vX.Y.Z -``` - -Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: - - -```bash -# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. -GO111MODULE=on go mod tidy - -GO111MODULE=on go mod vendor -``` - -You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. - - -## API Implementation Guidelines - -### Naming and Documentation - -Public functions and structs should normally be named according to the file(s) being read and parsed. For example, -the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function -should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). - -### Reading vs. Parsing - -Most functionality in this library consists of reading files and then parsing the text into structured data. In most -cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and -a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested -directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types -such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files -in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. - -### /proc and /sys filesystem I/O - -The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. -Many of the files are changing continuously and the data being read can in some cases change between subsequent -reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls -to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the -full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of -the file. - -Note that parsing the file's contents can still be performed one line at a time. This is done by first reading -the full file, and then using a scanner on the `[]byte` or `string` containing the data. - -``` - data, err := util.ReadFileNoStat("/proc/cpuinfo") - if err != nil { - return err - } - reader := bytes.NewReader(data) - scanner := bufio.NewScanner(reader) -``` - -The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does -not bother to check the size of the file before reading. -``` - data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") -``` - diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index 56ba67d3..00000000 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1,2 +0,0 @@ -* Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 616a0d25..00000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -%/.unpacked: %.ttar - @echo ">> extracting fixtures" - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ - -.PHONY: build -build: - -.PHONY: test -test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common deleted file mode 100644 index 3ac29c63..00000000 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -GOTEST := $(GO) test -GOTEST_DIR := -ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) - GOTEST_DIR := test-results - GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- -endif -endif - -PROMU_VERSION ?= 0.7.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.18.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile -DOCKERBUILD_CONTEXT ?= ./ -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif - -.PHONY: update-go-deps -update-go-deps: - @echo ">> updating Go dependencies" - @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ - done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif - -.PHONY: common-test-short -common-test-short: $(GOTEST_DIR) - @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: $(GOTEST_DIR) - @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) - -$(GOTEST_DIR): - @mkdir -p $@ - -.PHONY: common-format -common-format: - @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" -ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE - @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) - @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" - -DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ - | sed -e '/install -d/d' \ - | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9aa..00000000 --- a/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 55d1e326..00000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# procfs - -This package provides functions to retrieve system, kernel, and process -metrics from the pseudo-filesystems /proc and /sys. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) - -## Usage - -The procfs library is organized by packages based on whether the gathered data is coming from -/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, -/sys, or both. For example, cpu statistics are gathered from -`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount -point is initialized, and then the stat information is read. - -```go -fs, err := procfs.NewFS("/proc") -stats, err := fs.Stat() -``` - -Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. - -```go - fs, err := blockdevice.NewFS("/proc", "/sys") - stats, err := fs.ProcDiskstats() -``` - -## Package Organization - -The packages in this project are organized according to (1) whether the data comes from the `/proc` or -`/sys` filesystem and (2) the type of information being retrieved. For example, most process information -can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives -is available in the `blockdevices` sub-package. - -## Building and Testing - -The procfs library is intended to be built as part of another application, so there are no distributable binaries. -However, most of the API includes unit tests which can be run with `make test`. - -### Updating Test Fixtures - -The procfs library includes a set of test fixtures which include many example files from -the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file -which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. - -```bash -rm -rf fixtures -make test -``` - -Next, make the required changes to the extracted files in the `fixtures` directory. When -the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file -based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md deleted file mode 100644 index 67741f01..00000000 --- a/vendor/github.com/prometheus/procfs/SECURITY.md +++ /dev/null @@ -1,6 +0,0 @@ -# Reporting a security issue - -The Prometheus security policy, including how to report vulnerabilities, can be -found here: - -https://prometheus.io/docs/operating/security/ diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go deleted file mode 100644 index 4e47e617..00000000 --- a/vendor/github.com/prometheus/procfs/arp.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "net" - "strings" -) - -// ARPEntry contains a single row of the columnar data represented in -// /proc/net/arp. -type ARPEntry struct { - // IP address - IPAddr net.IP - // MAC address - HWAddr net.HardwareAddr - // Name of the device - Device string -} - -// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, -// and then return a slice of ARPEntry's. -func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) - if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) - } - - return parseARPEntries(data) -} - -func parseARPEntries(data []byte) ([]ARPEntry, error) { - lines := strings.Split(string(data), "\n") - entries := make([]ARPEntry, 0) - var err error - const ( - expectedDataWidth = 6 - expectedHeaderWidth = 9 - ) - for _, line := range lines { - columns := strings.Fields(line) - width := len(columns) - - if width == expectedHeaderWidth || width == 0 { - continue - } else if width == expectedDataWidth { - entry, err := parseARPEntry(columns) - if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) - } - entries = append(entries, entry) - } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) - } - - } - - return entries, err -} - -func parseARPEntry(columns []string) (ARPEntry, error) { - ip := net.ParseIP(columns[0]) - mac := net.HardwareAddr(columns[3]) - - entry := ARPEntry{ - IPAddr: ip, - HWAddr: mac, - Device: columns[5], - } - - return entry, nil -} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go deleted file mode 100644 index f5b7939b..00000000 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// A BuddyInfo is the details parsed from /proc/buddyinfo. -// The data is comprised of an array of free fragments of each size. -// The sizes are 2^n*PAGE_SIZE, where n is the array index. -type BuddyInfo struct { - Node string - Zone string - Sizes []float64 -} - -// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) BuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.proc.Path("buddyinfo")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseBuddyInfo(file) -} - -func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { - var ( - buddyInfo = []BuddyInfo{} - scanner = bufio.NewScanner(r) - bucketCount = -1 - ) - - for scanner.Scan() { - var err error - line := scanner.Text() - parts := strings.Fields(line) - - if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") - } - - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") - arraySize := len(parts[4:]) - - if bucketCount == -1 { - bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) - } - } - - sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { - sizes[i], err = strconv.ParseFloat(parts[i+4], 64) - if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) - } - } - - buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) - } - - return buddyInfo, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go deleted file mode 100644 index 5623b24a..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo -type CPUInfo struct { - Processor uint - VendorID string - CPUFamily string - Model string - ModelName string - Stepping string - Microcode string - CPUMHz float64 - CacheSize string - PhysicalID string - Siblings uint - CoreID string - CPUCores uint - APICID string - InitialAPICID string - FPU string - FPUException string - CPUIDLevel uint - WP string - Flags []string - Bugs []string - BogoMips float64 - CLFlushSize uint - CacheAlignment uint - AddressSizes string - PowerManagement string -} - -var ( - cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) - cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) -) - -// CPUInfo returns information about current system CPUs. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) CPUInfo() ([]CPUInfo, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) - if err != nil { - return nil, err - } - return parseCPUInfo(data) -} - -func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "vendor", "vendor_id": - cpuinfo[i].VendorID = field[1] - case "cpu family": - cpuinfo[i].CPUFamily = field[1] - case "model": - cpuinfo[i].Model = field[1] - case "model name": - cpuinfo[i].ModelName = field[1] - case "stepping": - cpuinfo[i].Stepping = field[1] - case "microcode": - cpuinfo[i].Microcode = field[1] - case "cpu MHz": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "cache size": - cpuinfo[i].CacheSize = field[1] - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "apicid": - cpuinfo[i].APICID = field[1] - case "initial apicid": - cpuinfo[i].InitialAPICID = field[1] - case "fpu": - cpuinfo[i].FPU = field[1] - case "fpu_exception": - cpuinfo[i].FPUException = field[1] - case "cpuid level": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUIDLevel = uint(v) - case "wp": - cpuinfo[i].WP = field[1] - case "flags": - cpuinfo[i].Flags = strings.Fields(field[1]) - case "bugs": - cpuinfo[i].Bugs = strings.Fields(field[1]) - case "bogomips": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "clflush size": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CLFlushSize = uint(v) - case "cache_alignment": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CacheAlignment = uint(v) - case "address sizes": - cpuinfo[i].AddressSizes = field[1] - case "power management": - cpuinfo[i].PowerManagement = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) - if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - featuresLine := "" - commonCPUInfo := CPUInfo{} - i := 0 - if strings.TrimSpace(field[0]) == "Processor" { - commonCPUInfo = CPUInfo{ModelName: field[1]} - i = -1 - } else { - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo = []CPUInfo{firstcpu} - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "BogoMIPS": - if i == -1 { - cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor - i++ - cpuinfo[i].Processor = 0 - } - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "Features": - featuresLine = line - case "model name": - cpuinfo[i].ModelName = field[1] - } - } - fields := strings.SplitN(featuresLine, ": ", 2) - for i := range cpuinfo { - cpuinfo[i].Flags = strings.Fields(fields[1]) - } - return cpuinfo, nil - -} - -func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - commonCPUInfo := CPUInfo{VendorID: field[1]} - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "bogomips per cpu": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - commonCPUInfo.BogoMips = v - case "features": - commonCPUInfo.Flags = strings.Fields(field[1]) - } - if strings.HasPrefix(line, "processor") { - match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) - if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - cpu := commonCPUInfo - v, err := strconv.ParseUint(match[1], 0, 32) - if err != nil { - return nil, err - } - cpu.Processor = uint(v) - cpuinfo = append(cpuinfo, cpu) - } - if strings.HasPrefix(line, "cpu number") { - break - } - } - - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "cpu number": - i++ - case "cpu MHz dynamic": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - } - } - - return cpuinfo, nil -} - -func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "cpu model": - cpuinfo[i].ModelName = field[1] - case "BogoMIPS": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "cpu": - cpuinfo[i].VendorID = field[1] - case "clock": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - case "hart": - cpuinfo[i].CoreID = field[1] - case "isa": - cpuinfo[i].ModelName = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode - return nil, errors.New("not implemented") -} - -// firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line -func firstNonEmptyLine(scanner *bufio.Scanner) string { - for scanner.Scan() { - line := scanner.Text() - if strings.TrimSpace(line) != "" { - return line - } - } - return "" -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go deleted file mode 100644 index 44b590ed..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build arm arm64 - -package procfs - -var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go deleted file mode 100644 index 91e27257..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build mips mipsle mips64 mips64le - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go deleted file mode 100644 index 95b5b4ec..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x - -package procfs - -var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go deleted file mode 100644 index 6068bd57..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build ppc64 ppc64le - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go deleted file mode 100644 index e83c2e20..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build riscv riscv64 - -package procfs - -var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go deleted file mode 100644 index 26814eeb..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go deleted file mode 100644 index d5bedf97..00000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux -// +build 386 amd64 - -package procfs - -var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go deleted file mode 100644 index 5048ad1f..00000000 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Crypto holds info parsed from /proc/crypto. -type Crypto struct { - Alignmask *uint64 - Async bool - Blocksize *uint64 - Chunksize *uint64 - Ctxsize *uint64 - Digestsize *uint64 - Driver string - Geniv string - Internal string - Ivsize *uint64 - Maxauthsize *uint64 - MaxKeysize *uint64 - MinKeysize *uint64 - Module string - Name string - Priority *int64 - Refcnt *int64 - Seedsize *uint64 - Selftest string - Type string - Walksize *uint64 -} - -// Crypto parses an crypto-file (/proc/crypto) and returns a slice of -// structs containing the relevant info. More information available here: -// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html -func (fs FS) Crypto() ([]Crypto, error) { - path := fs.proc.Path("crypto") - b, err := util.ReadFileNoStat(path) - if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) - } - - crypto, err := parseCrypto(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) - } - - return crypto, nil -} - -// parseCrypto parses a /proc/crypto stream into Crypto elements. -func parseCrypto(r io.Reader) ([]Crypto, error) { - var out []Crypto - - s := bufio.NewScanner(r) - for s.Scan() { - text := s.Text() - switch { - case strings.HasPrefix(text, "name"): - // Each crypto element begins with its name. - out = append(out, Crypto{}) - case text == "": - continue - } - - kv := strings.Split(text, ":") - if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) - } - - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - - // Parse the key/value pair into the currently focused element. - c := &out[len(out)-1] - if err := c.parseKV(k, v); err != nil { - return nil, err - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -// parseKV parses a key/value pair into the appropriate field of c. -func (c *Crypto) parseKV(k, v string) error { - vp := util.NewValueParser(v) - - switch k { - case "async": - // Interpret literal yes as true. - c.Async = v == "yes" - case "blocksize": - c.Blocksize = vp.PUInt64() - case "chunksize": - c.Chunksize = vp.PUInt64() - case "digestsize": - c.Digestsize = vp.PUInt64() - case "driver": - c.Driver = v - case "geniv": - c.Geniv = v - case "internal": - c.Internal = v - case "ivsize": - c.Ivsize = vp.PUInt64() - case "maxauthsize": - c.Maxauthsize = vp.PUInt64() - case "max keysize": - c.MaxKeysize = vp.PUInt64() - case "min keysize": - c.MinKeysize = vp.PUInt64() - case "module": - c.Module = v - case "name": - c.Name = v - case "priority": - c.Priority = vp.PInt64() - case "refcnt": - c.Refcnt = vp.PInt64() - case "seedsize": - c.Seedsize = vp.PUInt64() - case "selftest": - c.Selftest = v - case "type": - c.Type = v - case "walksize": - c.Walksize = vp.PUInt64() - } - - return vp.Err() -} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index e2acd6d4..00000000 --- a/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.NewStat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// -package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 1e76173d..00000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,6553 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/environ -Lines: 1 -PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fdinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/0 -Lines: 6 -pos: 0 -flags: 02004000 -mnt_id: 13 -inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 -inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a -inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/1 -Lines: 4 -pos: 0 -flags: 02004002 -mnt_id: 13 -eventfd-count: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/10 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/2 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/3 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 18446744073708503040 18446744073708503040 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/schedstat -Lines: 1 -411605849 93680043 79 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps -Lines: 252 -00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager -Size: 8900 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2952 kB -Pss: 2952 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 2952 kB -Private_Dirty: 0 kB -Referenced: 2864 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me dw sd -00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager -Size: 10236 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 6152 kB -Pss: 6152 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 6152 kB -Private_Dirty: 0 kB -Referenced: 5308 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr mw me dw sd -016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager -Size: 424 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 176 kB -Pss: 176 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 84 kB -Private_Dirty: 92 kB -Referenced: 176 kB -Anonymous: 92 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 12 kB -SwapPss: 12 kB -Locked: 0 kB -VmFlags: rd wr mr mw me dw ac sd -0171a000-0173f000 rw-p 00000000 00:00 0 -Size: 148 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 76 kB -Pss: 76 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 76 kB -Referenced: 76 kB -Anonymous: 76 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000000000-c000400000 rw-p 00000000 00:00 0 -Size: 4096 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2564 kB -Pss: 2564 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 20 kB -Private_Dirty: 2544 kB -Referenced: 2544 kB -Anonymous: 2564 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1100 kB -SwapPss: 1100 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000400000-c001600000 rw-p 00000000 00:00 0 -Size: 18432 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 16024 kB -Pss: 16024 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 5864 kB -Private_Dirty: 10160 kB -Referenced: 11944 kB -Anonymous: 16024 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 440 kB -SwapPss: 440 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd nh -c001600000-c004000000 rw-p 00000000 00:00 0 -Size: 43008 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 -Size: 38596 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 1992 kB -Pss: 1992 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 476 kB -Private_Dirty: 1516 kB -Referenced: 1828 kB -Anonymous: 1992 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 384 kB -SwapPss: 384 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] -Size: 132 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 8 kB -Pss: 8 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 8 kB -Referenced: 8 kB -Anonymous: 8 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 4 kB -SwapPss: 4 kB -Locked: 0 kB -VmFlags: rd wr mr mw me gd ac -7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] -Size: 12 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr pf io de dd sd -7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] -Size: 8 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 4 kB -Pss: 0 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 4 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me de sd -ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] -Size: 4 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps_rollup -Lines: 17 -00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] -Rss: 29948 kB -Pss: 29944 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 15548 kB -Private_Dirty: 14396 kB -Referenced: 24752 kB -Anonymous: 20756 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1940 kB -SwapPss: 1940 kB -Locked: 0 kB -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 26231 -Ngid: 0 -Pid: 26231 -PPid: 1 -TracerPid: 0 -Uid: 1000 1000 1000 0 -Gid: 1001 1001 1001 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/wchan -Lines: 1 -poll_schedule_timeoutEOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/maps -Lines: 9 -55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat -55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat -55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] -7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive -7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so -7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] -7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] -7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] -ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/wchan -Lines: 1 -0EOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/schedstat -Lines: 8 - ____________________________________ -< this is a malformed schedstat file > - ------------------------------------ - \ ^__^ - \ (oo)\_______ - (__)\ )\/\ - ||----w | - || || -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26234 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26234/maps -Lines: 4 -08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh -08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh -0808c000-08146000 rwxp 00000000 00:00 0 -40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cpuinfo -Lines: 216 -processor : 0 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.998 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 1 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.037 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 2 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.010 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 3 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.028 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 6 -initial apicid : 6 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 4 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.989 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 5 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.083 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 6 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.017 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 7 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.030 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 7 -initial apicid : 7 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/crypto -Lines: 972 -name : ccm(aes) -driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) -module : ccm -priority : 300 -refcnt : 4 -selftest : passed -internal : no -type : aead -async : no -blocksize : 1 -ivsize : 16 -maxauthsize : 16 -geniv : - -name : cbcmac(aes) -driver : cbcmac(aes-aesni) -module : ccm -priority : 300 -refcnt : 7 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 16 - -name : ecdh -driver : ecdh-generic -module : ecdh_generic -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp -async : yes - -name : ecb(arc4) -driver : ecb(arc4)-generic -module : arc4 -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 1 -max keysize : 256 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : arc4 -driver : arc4-generic -module : arc4 -priority : 0 -refcnt : 3 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 1 -max keysize : 256 - -name : crct10dif -driver : crct10dif-pclmul -module : crct10dif_pclmul -priority : 200 -refcnt : 2 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32 -driver : crc32-pclmul -module : crc32_pclmul -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : __ghash -driver : cryptd(__ghash-pclmulqdqni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : ghash -driver : ghash-clmulni -module : ghash_clmulni_intel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : __ghash -driver : __ghash-pclmulqdqni -module : ghash_clmulni_intel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : shash -blocksize : 16 -digestsize : 16 - -name : crc32c -driver : crc32c-intel -module : crc32c_intel -priority : 200 -refcnt : 5 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : cbc(aes) -driver : cbc(aes-aesni) -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr(aes-aesni) -module : kernel -priority : 300 -refcnt : 5 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : pkcs1pad(rsa,sha256) -driver : pkcs1pad(rsa-generic,sha256) -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : __xts(aes) -driver : cryptd(__xts-aes-aesni) -module : kernel -priority : 451 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : xts(aes) -driver : xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : cryptd(__ctr-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 1 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : cryptd(__cbc-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : cbc(aes) -driver : cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : cryptd(__ecb-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : ecb(aes) -driver : ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __generic-gcm-aes-aesni -driver : cryptd(__driver-generic-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : gcm(aes) -driver : generic-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __generic-gcm-aes-aesni -driver : __driver-generic-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : cryptd(__driver-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : rfc4106(gcm(aes)) -driver : rfc4106-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : __driver-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __xts(aes) -driver : __xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : __ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : __cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : __ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __aes -driver : __aes-aesni -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : yes -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : aes -driver : aes-aesni -module : kernel -priority : 300 -refcnt : 8 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : hmac(sha1) -driver : hmac(sha1-generic) -module : kernel -priority : 100 -refcnt : 9 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : ghash -driver : ghash-generic -module : kernel -priority : 100 -refcnt : 3 -selftest : passed -internal : no -type : shash -blocksize : 16 -digestsize : 16 - -name : jitterentropy_rng -driver : jitterentropy_rng -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha256 -module : kernel -priority : 221 -refcnt : 2 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha512 -module : kernel -priority : 220 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha384 -module : kernel -priority : 219 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha1 -module : kernel -priority : 218 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha256 -module : kernel -priority : 217 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha512 -module : kernel -priority : 216 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha384 -module : kernel -priority : 215 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha1 -module : kernel -priority : 214 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes256 -module : kernel -priority : 213 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes192 -module : kernel -priority : 212 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes128 -module : kernel -priority : 211 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : hmac(sha256) -driver : hmac(sha256-generic) -module : kernel -priority : 100 -refcnt : 10 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : stdrng -driver : drbg_pr_hmac_sha256 -module : kernel -priority : 210 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha512 -module : kernel -priority : 209 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha384 -module : kernel -priority : 208 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha1 -module : kernel -priority : 207 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha256 -module : kernel -priority : 206 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha512 -module : kernel -priority : 205 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha384 -module : kernel -priority : 204 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha1 -module : kernel -priority : 203 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes256 -module : kernel -priority : 202 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes192 -module : kernel -priority : 201 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes128 -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : 842 -driver : 842-scomp -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : 842 -driver : 842-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo-rle -driver : lzo-rle-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo-rle -driver : lzo-rle-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo -driver : lzo-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo -driver : lzo-generic -module : kernel -priority : 0 -refcnt : 9 -selftest : passed -internal : no -type : compression - -name : crct10dif -driver : crct10dif-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32c -driver : crc32c-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : zlib-deflate -driver : zlib-deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : aes -driver : aes-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : sha224 -driver : sha224-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 28 - -name : sha256 -driver : sha256-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : sha1 -driver : sha1-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : md5 -driver : md5-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 16 - -name : ecb(cipher_null) -driver : ecb-cipher_null -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 0 -max keysize : 0 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : digest_null -driver : digest_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 0 - -name : compress_null -driver : compress_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : cipher_null -driver : cipher_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 0 -max keysize : 0 - -name : rsa -driver : rsa-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : dh -driver : dh-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : aes -driver : aes-asm -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -Mode: 444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 52 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 - 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 - 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/fscache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/fscache/stats -Lines: 24 -FS-Cache statistics -Cookies: idx=3 dat=67877 spc=0 -Objects: alc=67473 nal=0 avl=67473 ded=388 -ChkAux : non=12 ok=33 upd=44 obs=55 -Pages : mrk=547164 unc=364577 -Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 -Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 -Invals : n=14 run=13 -Updates: n=7 nul=3 run=8 -Relinqs: n=394 nul=1 wcr=2 rtr=3 -AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 -Allocs : n=20 ok=19 wt=18 nbf=17 int=16 -Allocs : ops=15 owt=14 abt=13 -Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 -Retrvls: ops=151959 owt=42747 abt=44 -Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 -Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 -VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 -Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 -Ops : ini=377538 dfr=27 rel=377538 gc=37 -CacheOp: alo=1 luo=2 luc=3 gro=4 -CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 -CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 -CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/loadavg -Lines: 1 -0.02 0.04 0.05 1/497 11947 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 60 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] - -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0](F) sdb3[1](S) - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md201 : active raid1 sda3[0] sdb3[1] - 1993728 blocks super 1.2 [2/2] [UU] - [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) - 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED - -md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks - -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) - 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING - -md12 : active raid0 sdc2[0] sdd2[1] - 3886394368 blocks super 1.2 512k chunks - -md126 : active raid0 sdb[1] sdc[0] - 1855870976 blocks super external:/md127/0 128k chunks - -md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm - -md00 : active raid0 xvdb[0] - 4186624 blocks super 1.2 256k chunks - -md120 : active linear sda1[1] sdb1[0] - 2095104 blocks super 1.2 0k rounding - -md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] - 322560 blocks super 1.2 512k chunks - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/meminfo -Lines: 42 -MemTotal: 15666184 kB -MemFree: 440324 kB -Buffers: 1020128 kB -Cached: 12007640 kB -SwapCached: 0 kB -Active: 6761276 kB -Inactive: 6532708 kB -Active(anon): 267256 kB -Inactive(anon): 268 kB -Active(file): 6494020 kB -Inactive(file): 6532440 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 768 kB -Writeback: 0 kB -AnonPages: 266216 kB -Mapped: 44204 kB -Shmem: 1308 kB -Slab: 1807264 kB -SReclaimable: 1738124 kB -SUnreclaim: 69140 kB -KernelStack: 1616 kB -PageTables: 5288 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 7833092 kB -Committed_AS: 530844 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 36596 kB -VmallocChunk: 34359637840 kB -HardwareCorrupted: 0 kB -AnonHugePages: 12288 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 91136 kB -DirectMap2M: 16039936 kB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/arp -Lines: 2 -IP address HW type Flags HW address Mask Device -192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/protocols -Lines: 14 -protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em -PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y -UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n -UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y -NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat -Lines: 6 -sockets: used 1602 -TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 -UDP: inuse 12 mem 62 -UDPLITE: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat6 -Lines: 5 -TCP6: inuse 17 -UDP6: inuse 9 -UDPLITE6: inuse 0 -RAW6: inuse 1 -FRAG6: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat -Lines: 2 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat.broken -Lines: 1 -00015c73 00020e76 F0000769 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp_broken -Lines: 2 - sl local_address rem_address st - 1: 00000000:0016 00000000:0000 0A -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/schedstat -Lines: 6 -version 15 -timestamp 15819019232 -cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 -domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 -cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 -domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/slabinfo -Lines: 302 -slabinfo - version: 2.1 -# name : tunables : slabdata -pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 -pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 -nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 -kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 -pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 -x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 -iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 -bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 -bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 -fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 -fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 -squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 -xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 -nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 -reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 -ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 -ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 -ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 -ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 -jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 -jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 -jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 -jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 -mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 -dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 -kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 -io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 -scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 -virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 -RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 -UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 -UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 -tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 -TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 -uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 -mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 -isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 -io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 -aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 -posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 -iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 -UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 -ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 -ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 -PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 -UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 -tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 -request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 -TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 -hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 -dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 -eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 -inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 -scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 -request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 -blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 -bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 -biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 -biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 -ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 -uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 -audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 -sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 -skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 -skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 -configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 -file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 -file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 -fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 -net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 -task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 -taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 -proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 -pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 -proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 -seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 -sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 -kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 -kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 -mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 -inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 -dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 -names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 -hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 -lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 -key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 -uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 -mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 -fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 -files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 -signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 -sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 -task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 -cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 -anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 -anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 -pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 -Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 -Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 -Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 -Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 -trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 -ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 -pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 -radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 -task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 -dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 -kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 -kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 -kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 -kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 -kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 -kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 -kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 -kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 -kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 -kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 -kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 -kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 -kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 -kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 -kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 -kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/swaps -Lines: 2 -Filename Type Size Used Priority -/dev/dm-2 partition 131068 176 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel/random -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/entropy_avail -Lines: 1 -3943 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/poolsize -Lines: 1 -4096 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold -Lines: 1 -3072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/vm -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/admin_reserve_kbytes -Lines: 1 -8192 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/block_dump -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/compact_unevictable_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_ratio -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_expire_centisecs -Lines: 1 -3000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_ratio -Lines: 1 -20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_writeback_centisecs -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirtytime_expire_seconds -Lines: 1 -43200 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/drop_caches -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/extfrag_threshold -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/hugetlb_shm_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/laptop_mode -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/legacy_va_layout -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/lowmem_reserve_ratio -Lines: 1 -256 256 32 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/max_map_count -Lines: 1 -65530 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_early_kill -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_recovery -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_free_kbytes -Lines: 1 -67584 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_slab_ratio -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_unmapped_ratio -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/mmap_min_addr -Lines: 1 -65536 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_overcommit_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_stat -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_zonelist_order -Lines: 1 -Node -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_dump_tasks -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_kill_allocating_task -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_kbytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_memory -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_ratio -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/page-cluster -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/panic_on_oom -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/percpu_pagelist_fraction -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/stat_interval -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/swappiness -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/user_reserve_kbytes -Lines: 1 -131072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/vfs_cache_pressure -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_boost_factor -Lines: 1 -15000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_scale_factor -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/zone_reclaim_mode -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/zoneinfo -Lines: 262 -Node 0, zone DMA - per-node stats - nr_inactive_anon 230981 - nr_active_anon 547580 - nr_inactive_file 316904 - nr_active_file 346282 - nr_unevictable 115467 - nr_slab_reclaimable 131220 - nr_slab_unreclaimable 47320 - nr_isolated_anon 0 - nr_isolated_file 0 - workingset_nodes 11627 - workingset_refault 466886 - workingset_activate 276925 - workingset_restore 84055 - workingset_nodereclaim 487 - nr_anon_pages 795576 - nr_mapped 215483 - nr_file_pages 761874 - nr_dirty 908 - nr_writeback 0 - nr_writeback_temp 0 - nr_shmem 224925 - nr_shmem_hugepages 0 - nr_shmem_pmdmapped 0 - nr_anon_transparent_hugepages 0 - nr_unstable 0 - nr_vmscan_write 12950 - nr_vmscan_immediate_reclaim 3033 - nr_dirtied 8007423 - nr_written 7752121 - nr_kernel_misc_reclaimable 0 - pages free 3952 - min 33 - low 41 - high 49 - spanned 4095 - present 3975 - managed 3956 - protection: (0, 2877, 7826, 7826, 7826) - nr_free_pages 3952 - nr_zone_inactive_anon 0 - nr_zone_active_anon 0 - nr_zone_inactive_file 0 - nr_zone_active_file 0 - nr_zone_unevictable 0 - nr_zone_write_pending 0 - nr_mlock 0 - nr_page_table_pages 0 - nr_kernel_stack 0 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 1 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 1 - numa_other 0 - pagesets - cpu: 0 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 1 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 2 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 3 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 4 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 5 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 6 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 7 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - node_unreclaimable: 0 - start_pfn: 1 -Node 0, zone DMA32 - pages free 204252 - min 19510 - low 21059 - high 22608 - spanned 1044480 - present 759231 - managed 742806 - protection: (0, 0, 4949, 4949, 4949) - nr_free_pages 204252 - nr_zone_inactive_anon 118558 - nr_zone_active_anon 106598 - nr_zone_inactive_file 75475 - nr_zone_active_file 70293 - nr_zone_unevictable 66195 - nr_zone_write_pending 64 - nr_mlock 4 - nr_page_table_pages 1756 - nr_kernel_stack 2208 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 113952967 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 113952967 - numa_other 0 - pagesets - cpu: 0 - count: 345 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 1 - count: 356 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 2 - count: 325 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 3 - count: 346 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 4 - count: 321 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 5 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 6 - count: 373 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 7 - count: 339 - high: 378 - batch: 63 - vm stats threshold: 48 - node_unreclaimable: 0 - start_pfn: 4096 -Node 0, zone Normal - pages free 18553 - min 11176 - low 13842 - high 16508 - spanned 1308160 - present 1308160 - managed 1268711 - protection: (0, 0, 0, 0, 0) - nr_free_pages 18553 - nr_zone_inactive_anon 112423 - nr_zone_active_anon 440982 - nr_zone_inactive_file 241429 - nr_zone_active_file 275989 - nr_zone_unevictable 49272 - nr_zone_write_pending 844 - nr_mlock 154 - nr_page_table_pages 9750 - nr_kernel_stack 15136 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 162718019 - numa_miss 0 - numa_foreign 0 - numa_interleave 26812 - numa_local 162718019 - numa_other 0 - pagesets - cpu: 0 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 1 - count: 366 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 2 - count: 60 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 3 - count: 256 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 4 - count: 253 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 5 - count: 159 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 6 - count: 311 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 7 - count: 264 - high: 378 - batch: 63 - vm stats threshold: 56 - node_unreclaimable: 0 - start_pfn: 1048576 -Node 0, zone Movable - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Node 0, zone Device - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/add_random -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/chunk_sectors -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/dax -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_granularity -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_zeroes_data -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/fua -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/hw_sector_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll_delay -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_timeout -Lines: 1 -30000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue/iosched -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_max -Lines: 1 -16384 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async -Lines: 1 -250 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/low_latency -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/max_budget -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle -Lines: 1 -8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us -Lines: 1 -8000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/timeout_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iostats -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/logical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_discard_segments -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb -Lines: 1 -32767 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_integrity_segments -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_sectors_kb -Lines: 1 -1280 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segment_size -Lines: 1 -65536 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segments -Lines: 1 -168 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/minimum_io_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nomerges -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_requests -Lines: 1 -64 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_zones -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/optimal_io_size -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/physical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/read_ahead_kb -Lines: 1 -128 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rotational -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rq_affinity -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/scheduler -Lines: 1 -mq-deadline kyber [bfq] none -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/wbt_lat_usec -Lines: 1 -75000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_cache -Lines: 1 -write back -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_same_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/zoned -Lines: 1 -none -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo -Lines: 1 -30 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/fabric_name -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/node_name -Lines: 1 -0x2000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_id -Lines: 1 -0x000002 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_name -Lines: 1 -0x1000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_state -Lines: 1 -Online -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_type -Lines: 1 -Point-To-Point (direct nport connection) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/speed -Lines: 1 -16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames -Lines: 1 -0xffffffffffffffff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/error_frames -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts -Lines: 1 -0x13 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count -Lines: 1 -0x2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count -Lines: 1 -0x8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count -Lines: 1 -0x9 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count -Lines: 1 -0x11 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count -Lines: 1 -0x10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/nos_count -Lines: 1 -0x12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames -Lines: 1 -0x3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_words -Lines: 1 -0x4 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset -Lines: 1 -0x7 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames -Lines: 1 -0x5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_words -Lines: 1 -0x6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_classes -Lines: 1 -Class 3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_speeds -Lines: 1 -4 Gbit, 8 Gbit, 16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/symbolic_name -Lines: 1 -Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver -Lines: 1 -2.31.5050 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type -Lines: 1 -MT4099 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data -Lines: 1 -2221223609 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets -Lines: 1 -87169372 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data -Lines: 1 -26509113295 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets -Lines: 1 -85734114 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait -Lines: 1 -3599 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data -Lines: 1 -2460436784 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets -Lines: 1 -89332064 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -26540356890 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets -Lines: 1 -88622850 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait -Lines: 1 -3846 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/device -SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0 -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/name -Lines: 1 -package-0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us -Lines: 1 -976 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj -Lines: 1 -118821284256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/name -Lines: 1 -core -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/name -Lines: 1 -package-10 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/cur_state -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/max_state -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/type -Lines: 1 -Processor -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/cur_state -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/max_state -Lines: 1 -27 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/type -Lines: 1 -intel_powerclamp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 --44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device -SymlinkTo: ../../../ACPI0003:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup -Lines: 1 -enabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms -Lines: 1 -10598 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm -Lines: 1 -2369000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device -SymlinkTo: ../../../PNP0C0A:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11750000 -POWER_SUPPLY_POWER_NOW=5064000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=47390000 -POWER_SUPPLY_ENERGY_NOW=40730000 -POWER_SUPPLY_CAPACITY=85 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class -Lines: 1 -0x020000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device -Lines: 1 -0x15d7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq -Lines: 1 -140 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias -Lines: 1 -pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource -Lines: 13 -0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision -Lines: 1 -0x21 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device -Lines: 1 -0x225a -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor -Lines: 1 -0x17aa -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent -Lines: 6 -DRIVER=e1000e -PCI_CLASS=20000 -PCI_ID=8086:15D7 -PCI_SUBSYS_ID=17AA:225A -PCI_SLOT_NAME=0000:00:1f.6 -MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor -Lines: 1 -0x8086 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name -Lines: 1 -demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool -Lines: 1 -iscsi-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name -Lines: 1 -wrong -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool -Lines: 1 -wrong-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node1/vmstat -Lines: 6 -nr_free_pages 1 -nr_zone_inactive_anon 2 -nr_zone_active_anon 3 -nr_zone_inactive_file 4 -nr_zone_active_file 5 -nr_zone_unevictable 6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node2/vmstat -Lines: 6 -nr_free_pages 7 -nr_zone_inactive_anon 8 -nr_zone_active_anon 9 -nr_zone_inactive_file 10 -nr_zone_active_file 11 -nr_zone_unevictable 12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count -Lines: 1 -10084 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings -Lines: 1 -11 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list -Lines: 1 -0,4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count -Lines: 1 -523 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings -Lines: 1 -22 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list -Lines: 1 -1,5 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly -Lines: 1 -131072 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used -Lines: 1 -1867776 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used -Lines: 1 -32768 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label -Lines: 1 -fixture -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid -Lines: 1 -0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly -Lines: 1 -262144 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 -SymlinkTo: ../../../../devices/virtual/block/loop22 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 -SymlinkTo: ../../../../devices/virtual/block/loop23 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 -SymlinkTo: ../../../../devices/virtual/block/loop24 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 -SymlinkTo: ../../../../devices/virtual/block/loop25 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid -Lines: 1 -7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path -Lines: 1 -/home/iscsi/file_back_1G -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path -Lines: 1 -/dev/rbd1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path -Lines: 1 -/dev/rbd/iscsi-images/demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d -SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -204950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -40325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 -SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -104950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -20095 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -71235 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 -SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -301950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -30195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 -SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -1234 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -1504 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -4733 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 0102ab0f..00000000 --- a/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "github.com/prometheus/procfs/internal/fs" -) - -// FS represents the pseudo-filesystem sys, which provides an interface to -// kernel data structures. -type FS struct { - proc fs.FS -} - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint - -// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. -// It will error if the mount point directory can't be read or is a file. -func NewDefaultFS() (FS, error) { - return NewFS(DefaultMountPoint) -} - -// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error -// if the mount point directory can't be read or is a file. -func NewFS(mountPoint string) (FS, error) { - fs, err := fs.NewFS(mountPoint) - if err != nil { - return FS{}, err - } - return FS{fs}, nil -} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go deleted file mode 100644 index f8070e6e..00000000 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Fscacheinfo represents fscache statistics. -type Fscacheinfo struct { - // Number of index cookies allocated - IndexCookiesAllocated uint64 - // data storage cookies allocated - DataStorageCookiesAllocated uint64 - // Number of special cookies allocated - SpecialCookiesAllocated uint64 - // Number of objects allocated - ObjectsAllocated uint64 - // Number of object allocation failures - ObjectAllocationsFailure uint64 - // Number of objects that reached the available state - ObjectsAvailable uint64 - // Number of objects that reached the dead state - ObjectsDead uint64 - // Number of objects that didn't have a coherency check - ObjectsWithoutCoherencyCheck uint64 - // Number of objects that passed a coherency check - ObjectsWithCoherencyCheck uint64 - // Number of objects that needed a coherency data update - ObjectsNeedCoherencyCheckUpdate uint64 - // Number of objects that were declared obsolete - ObjectsDeclaredObsolete uint64 - // Number of pages marked as being cached - PagesMarkedAsBeingCached uint64 - // Number of uncache page requests seen - UncachePagesRequestSeen uint64 - // Number of acquire cookie requests seen - AcquireCookiesRequestSeen uint64 - // Number of acq reqs given a NULL parent - AcquireRequestsWithNullParent uint64 - // Number of acq reqs rejected due to no cache available - AcquireRequestsRejectedNoCacheAvailable uint64 - // Number of acq reqs succeeded - AcquireRequestsSucceeded uint64 - // Number of acq reqs rejected due to error - AcquireRequestsRejectedDueToError uint64 - // Number of acq reqs failed on ENOMEM - AcquireRequestsFailedDueToEnomem uint64 - // Number of lookup calls made on cache backends - LookupsNumber uint64 - // Number of negative lookups made - LookupsNegative uint64 - // Number of positive lookups made - LookupsPositive uint64 - // Number of objects created by lookup - ObjectsCreatedByLookup uint64 - // Number of lookups timed out and requeued - LookupsTimedOutAndRequed uint64 - InvalidationsNumber uint64 - InvalidationsRunning uint64 - // Number of update cookie requests seen - UpdateCookieRequestSeen uint64 - // Number of upd reqs given a NULL parent - UpdateRequestsWithNullParent uint64 - // Number of upd reqs granted CPU time - UpdateRequestsRunning uint64 - // Number of relinquish cookie requests seen - RelinquishCookiesRequestSeen uint64 - // Number of rlq reqs given a NULL parent - RelinquishCookiesWithNullParent uint64 - // Number of rlq reqs waited on completion of creation - RelinquishRequestsWaitingCompleteCreation uint64 - // Relinqs rtr - RelinquishRetries uint64 - // Number of attribute changed requests seen - AttributeChangedRequestsSeen uint64 - // Number of attr changed requests queued - AttributeChangedRequestsQueued uint64 - // Number of attr changed rejected -ENOBUFS - AttributeChangedRejectDueToEnobufs uint64 - // Number of attr changed failed -ENOMEM - AttributeChangedFailedDueToEnomem uint64 - // Number of attr changed ops given CPU time - AttributeChangedOps uint64 - // Number of allocation requests seen - AllocationRequestsSeen uint64 - // Number of successful alloc reqs - AllocationOkRequests uint64 - // Number of alloc reqs that waited on lookup completion - AllocationWaitingOnLookup uint64 - // Number of alloc reqs rejected -ENOBUFS - AllocationsRejectedDueToEnobufs uint64 - // Number of alloc reqs aborted -ERESTARTSYS - AllocationsAbortedDueToErestartsys uint64 - // Number of alloc reqs submitted - AllocationOperationsSubmitted uint64 - // Number of alloc reqs waited for CPU time - AllocationsWaitedForCPU uint64 - // Number of alloc reqs aborted due to object death - AllocationsAbortedDueToObjectDeath uint64 - // Number of retrieval (read) requests seen - RetrievalsReadRequests uint64 - // Number of successful retr reqs - RetrievalsOk uint64 - // Number of retr reqs that waited on lookup completion - RetrievalsWaitingLookupCompletion uint64 - // Number of retr reqs returned -ENODATA - RetrievalsReturnedEnodata uint64 - // Number of retr reqs rejected -ENOBUFS - RetrievalsRejectedDueToEnobufs uint64 - // Number of retr reqs aborted -ERESTARTSYS - RetrievalsAbortedDueToErestartsys uint64 - // Number of retr reqs failed -ENOMEM - RetrievalsFailedDueToEnomem uint64 - // Number of retr reqs submitted - RetrievalsRequests uint64 - // Number of retr reqs waited for CPU time - RetrievalsWaitingCPU uint64 - // Number of retr reqs aborted due to object death - RetrievalsAbortedDueToObjectDeath uint64 - // Number of storage (write) requests seen - StoreWriteRequests uint64 - // Number of successful store reqs - StoreSuccessfulRequests uint64 - // Number of store reqs on a page already pending storage - StoreRequestsOnPendingStorage uint64 - // Number of store reqs rejected -ENOBUFS - StoreRequestsRejectedDueToEnobufs uint64 - // Number of store reqs failed -ENOMEM - StoreRequestsFailedDueToEnomem uint64 - // Number of store reqs submitted - StoreRequestsSubmitted uint64 - // Number of store reqs granted CPU time - StoreRequestsRunning uint64 - // Number of pages given store req processing time - StorePagesWithRequestsProcessing uint64 - // Number of store reqs deleted from tracking tree - StoreRequestsDeleted uint64 - // Number of store reqs over store limit - StoreRequestsOverStoreLimit uint64 - // Number of release reqs against pages with no pending store - ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 - // Number of release reqs against pages stored by time lock granted - ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 - // Number of release reqs ignored due to in-progress store - ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req - PageStoresCancelledByReleaseRequests uint64 - VmscanWaiting uint64 - // Number of times async ops added to pending queues - OpsPending uint64 - // Number of times async ops given CPU time - OpsRunning uint64 - // Number of times async ops queued for processing - OpsEnqueued uint64 - // Number of async ops cancelled - OpsCancelled uint64 - // Number of async ops rejected due to object lookup/create failure - OpsRejected uint64 - // Number of async ops initialised - OpsInitialised uint64 - // Number of async ops queued for deferred release - OpsDeferred uint64 - // Number of async ops released (should equal ini=N when idle) - OpsReleased uint64 - // Number of deferred-release async ops garbage collected - OpsGarbageCollected uint64 - // Number of in-progress alloc_object() cache ops - CacheopAllocationsinProgress uint64 - // Number of in-progress lookup_object() cache ops - CacheopLookupObjectInProgress uint64 - // Number of in-progress lookup_complete() cache ops - CacheopLookupCompleteInPorgress uint64 - // Number of in-progress grab_object() cache ops - CacheopGrabObjectInProgress uint64 - CacheopInvalidations uint64 - // Number of in-progress update_object() cache ops - CacheopUpdateObjectInProgress uint64 - // Number of in-progress drop_object() cache ops - CacheopDropObjectInProgress uint64 - // Number of in-progress put_object() cache ops - CacheopPutObjectInProgress uint64 - // Number of in-progress attr_changed() cache ops - CacheopAttributeChangeInProgress uint64 - // Number of in-progress sync_cache() cache ops - CacheopSyncCacheInProgress uint64 - // Number of in-progress read_or_alloc_page() cache ops - CacheopReadOrAllocPageInProgress uint64 - // Number of in-progress read_or_alloc_pages() cache ops - CacheopReadOrAllocPagesInProgress uint64 - // Number of in-progress allocate_page() cache ops - CacheopAllocatePageInProgress uint64 - // Number of in-progress allocate_pages() cache ops - CacheopAllocatePagesInProgress uint64 - // Number of in-progress write_page() cache ops - CacheopWritePagesInProgress uint64 - // Number of in-progress uncache_page() cache ops - CacheopUncachePagesInProgress uint64 - // Number of in-progress dissociate_pages() cache ops - CacheopDissociatePagesInProgress uint64 - // Number of object lookups/creations rejected due to lack of space - CacheevLookupsAndCreationsRejectedLackSpace uint64 - // Number of stale objects deleted - CacheevStaleObjectsDeleted uint64 - // Number of objects retired when relinquished - CacheevRetiredWhenReliquished uint64 - // Number of objects culled - CacheevObjectsCulled uint64 -} - -// Fscacheinfo returns information about current fscache statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt -func (fs FS) Fscacheinfo() (Fscacheinfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) - if err != nil { - return Fscacheinfo{}, err - } - - m, err := parseFscacheinfo(bytes.NewReader(b)) - if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) - } - - return *m, nil -} - -func setFSCacheFields(fields []string, setFields ...*uint64) error { - var err error - if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) - } - - for i := range setFields { - *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) - if err != nil { - return err - } - } - return nil -} - -func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { - var m Fscacheinfo - s := bufio.NewScanner(r) - for s.Scan() { - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) - } - - switch fields[0] { - case "Cookies:": - err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, - &m.SpecialCookiesAllocated) - if err != nil { - return &m, err - } - case "Objects:": - err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, - &m.ObjectsAvailable, &m.ObjectsDead) - if err != nil { - return &m, err - } - case "ChkAux": - err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, - &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) - if err != nil { - return &m, err - } - case "Pages": - err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) - if err != nil { - return &m, err - } - case "Acquire:": - err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, - &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, - &m.AcquireRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - case "Lookups:": - err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, - &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) - if err != nil { - return &m, err - } - case "Invals": - err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) - if err != nil { - return &m, err - } - case "Updates:": - err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, - &m.UpdateRequestsRunning) - if err != nil { - return &m, err - } - case "Relinqs:": - err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, - &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) - if err != nil { - return &m, err - } - case "AttrChg:": - err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, - &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) - if err != nil { - return &m, err - } - case "Allocs": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, - &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, - &m.AllocationsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Retrvls:": - if strings.Split(fields[1], "=")[0] == "n" { - err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, - &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, - &m.RetrievalsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Stores": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, - &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, - &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) - if err != nil { - return &m, err - } - } - case "VmScan": - err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, - &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, - &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) - if err != nil { - return &m, err - } - case "Ops": - if strings.Split(fields[2], "=")[0] == "pend" { - err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) - if err != nil { - return &m, err - } - } - case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { - err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, - &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) - if err != nil { - return &m, err - } - } else if strings.Split(fields[1], "=")[0] == "inv" { - err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, - &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, - &m.CacheopSyncCacheInProgress) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, - &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, - &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) - if err != nil { - return &m, err - } - } - case "CacheEv:": - err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, - &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) - if err != nil { - return &m, err - } - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go deleted file mode 100644 index 0040753b..00000000 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fs - -import ( - "fmt" - "os" - "path/filepath" -) - -const ( - // DefaultProcMountPoint is the common mount point of the proc filesystem. - DefaultProcMountPoint = "/proc" - - // DefaultSysMountPoint is the common mount point of the sys filesystem. - DefaultSysMountPoint = "/sys" - - // DefaultConfigfsMountPoint is the common mount point of the configfs - DefaultConfigfsMountPoint = "/sys/kernel/config" -) - -// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an -// interface to kernel data structures. -type FS string - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %q: %w", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %q is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path appends the given path elements to the filesystem path, adding separators -// as necessary. -func (fs FS) Path(p ...string) string { - return filepath.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 22cb07a6..00000000 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io/ioutil" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ParsePInt64s parses a slice of strings into a slice of int64 pointers. -func ParsePInt64s(ss []string) ([]*int64, error) { - us := make([]*int64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, &u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// ReadIntFromFile reads a file and attempts to parse a int64 from it. -func ReadIntFromFile(path string) (int64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) -} - -// ParseBool parses a string into a boolean pointer. -func ParseBool(b string) *bool { - var truth bool - switch b { - case "enabled": - truth = true - case "disabled": - truth = false - default: - return nil - } - return &truth -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go deleted file mode 100644 index 8051161b..00000000 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io" - "io/ioutil" - "os" -) - -// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. -// This is similar to ioutil.ReadFile but without the call to os.Stat, because -// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 512kB. For files larger than this, a scanner -// should be used. -func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 512 - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - - reader := io.LimitReader(f, maxBufferSize) - return ioutil.ReadAll(reader) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go deleted file mode 100644 index c07de0b6..00000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux,!appengine - -package util - -import ( - "bytes" - "os" - "syscall" -) - -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -// -// Note that this function will not read files larger than 128 bytes. -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - const sysFileBufferSize = 128 - b := make([]byte, sysFileBufferSize) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go deleted file mode 100644 index bd55b453..00000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux,appengine !linux - -package util - -import ( - "fmt" -) - -// SysReadFile is here implemented as a noop for builds that do not support -// the read syscall. For example Windows, or Linux on Google App Engine. -func SysReadFile(file string) (string, error) { - return "", fmt.Errorf("not supported on this platform") -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go deleted file mode 100644 index fe2355d3..00000000 --- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "strconv" -) - -// TODO(mdlayher): util packages are an anti-pattern and this should be moved -// somewhere else that is more focused in the future. - -// A ValueParser enables parsing a single string into a variety of data types -// in a concise and safe way. The Err method must be invoked after invoking -// any other methods to ensure a value was successfully parsed. -type ValueParser struct { - v string - err error -} - -// NewValueParser creates a ValueParser using the input string. -func NewValueParser(v string) *ValueParser { - return &ValueParser{v: v} -} - -// Int interprets the underlying value as an int and returns that value. -func (vp *ValueParser) Int() int { return int(vp.int64()) } - -// PInt64 interprets the underlying value as an int64 and returns a pointer to -// that value. -func (vp *ValueParser) PInt64() *int64 { - if vp.err != nil { - return nil - } - - v := vp.int64() - return &v -} - -// int64 interprets the underlying value as an int64 and returns that value. -// TODO: export if/when necessary. -func (vp *ValueParser) int64() int64 { - if vp.err != nil { - return 0 - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseInt(vp.v, base, 64) - if err != nil { - vp.err = err - return 0 - } - - return v -} - -// PUInt64 interprets the underlying value as an uint64 and returns a pointer to -// that value. -func (vp *ValueParser) PUInt64() *uint64 { - if vp.err != nil { - return nil - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseUint(vp.v, base, 64) - if err != nil { - vp.err = err - return nil - } - - return &v -} - -// Err returns the last error, if any, encountered by the ValueParser. -func (vp *ValueParser) Err() error { - return vp.err -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index 89e44774..00000000 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The remote (real) port. - RemotePort uint16 - // The local firewall mark - LocalMark string - // The transport protocol (TCP, UDP). - Proto string - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) IPVSStats() (IPVSStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - - return parseIPVSStats(bytes.NewReader(data)) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(r io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := ioutil.ReadAll(r) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localMark string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = "" - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "FWM": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = fields[1] - localAddress = nil - localPort = 0 - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - LocalMark: localMark, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - var ( - ip net.IP - err error - ) - - switch len(s) { - case 13: - ip, err = hex.DecodeString(s[0:8]) - if err != nil { - return nil, 0, err - } - case 46: - ip = net.ParseIP(s[1:40]) - if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) - } - default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) - } - - portString := s[len(s)-4:] - if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) - } - port, err := strconv.ParseUint(portString, 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go deleted file mode 100644 index da3a941d..00000000 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "os" - - "github.com/prometheus/procfs/internal/util" -) - -// KernelRandom contains information about to the kernel's random number generator. -type KernelRandom struct { - // EntropyAvaliable gives the available entropy, in bits. - EntropyAvaliable *uint64 - // PoolSize gives the size of the entropy pool, in bits. - PoolSize *uint64 - // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. - URandomMinReseedSeconds *uint64 - // WriteWakeupThreshold the number of bits of entropy below which we wake up processes - // that do a select(2) or poll(2) for write access to /dev/random. - WriteWakeupThreshold *uint64 - // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep - // waiting for entropy from /dev/random. - ReadWakeupThreshold *uint64 -} - -// KernelRandom returns values from /proc/sys/kernel/random. -func (fs FS) KernelRandom() (KernelRandom, error) { - random := KernelRandom{} - - for file, p := range map[string]**uint64{ - "entropy_avail": &random.EntropyAvaliable, - "poolsize": &random.PoolSize, - "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, - "write_wakeup_threshold": &random.WriteWakeupThreshold, - "read_wakeup_threshold": &random.ReadWakeupThreshold, - } { - val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) - if os.IsNotExist(err) { - continue - } - if err != nil { - return random, err - } - *p = &val - } - - return random, nil -} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go deleted file mode 100644 index 0cce190e..00000000 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// LoadAvg represents an entry in /proc/loadavg -type LoadAvg struct { - Load1 float64 - Load5 float64 - Load15 float64 -} - -// LoadAvg returns loadavg from /proc. -func (fs FS) LoadAvg() (*LoadAvg, error) { - path := fs.proc.Path("loadavg") - - data, err := util.ReadFileNoStat(path) - if err != nil { - return nil, err - } - return parseLoad(data) -} - -// Parse /proc loadavg and return 1m, 5m and 15m. -func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { - loads := make([]float64, 3) - parts := strings.Fields(string(loadavgBytes)) - if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) - } - - var err error - for i, load := range parts[0:3] { - loads[i], err = strconv.ParseFloat(load, 64) - if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) - } - } - return &LoadAvg{ - Load1: loads[0], - Load5: loads[1], - Load15: loads[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index 4c4493bf..00000000 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" -) - -var ( - statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device requires. - DisksTotal int64 - // Number of failed disks. - DisksFailed int64 - // Spare disks in the device. - DisksSpare int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 - // Name of md component devices - Devices []string -} - -// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. More information available here: -// https://raid.wiki.kernel.org/index.php/Mdstat -func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) - if err != nil { - return nil, err - } - mdstat, err := parseMDStat(data) - if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) - } - return mdstat, nil -} - -// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. -func parseMDStat(mdStatData []byte) ([]MDStat, error) { - mdStats := []MDStat{} - lines := strings.Split(string(mdStatData), "\n") - - for i, line := range lines { - if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || - strings.HasPrefix(line, "unused") { - continue - } - - deviceFields := strings.Fields(line) - if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) - } - mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive - - if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) - } - - // Failed disks have the suffix (F) & Spare disks have the suffix (S). - fail := int64(strings.Count(line, "(F)")) - spare := int64(strings.Count(line, "(S)")) - active, total, size, err := evalStatusLine(lines[i], lines[i+1]) - - if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) - } - - syncLineIdx := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - syncLineIdx++ - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size - recovering := strings.Contains(lines[syncLineIdx], "recovery") - resyncing := strings.Contains(lines[syncLineIdx], "resync") - checking := strings.Contains(lines[syncLineIdx], "check") - - // Append recovery and resyncing state info. - if recovering || resyncing || checking { - if recovering { - state = "recovering" - } else if checking { - state = "checking" - } else { - state = "resyncing" - } - - // Handle case when resync=PENDING or resync=DELAYED. - if strings.Contains(lines[syncLineIdx], "PENDING") || - strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 - } else { - syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) - if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) - } - } - } - - mdStats = append(mdStats, MDStat{ - Name: mdName, - ActivityState: state, - DisksActive: active, - DisksFailed: fail, - DisksSpare: spare, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - Devices: evalComponentDevices(deviceFields), - }) - } - - return mdStats, nil -} - -func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { - - sizeStr := strings.Fields(statusLine)[0] - size, err = strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) - } - - if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { - // In the device deviceLine, only disks have a number associated with them in []. - total = int64(strings.Count(deviceLine, "[")) - return total, total, size, nil - } - - if strings.Contains(deviceLine, "inactive") { - return 0, 0, size, nil - } - - matches := statusLineRE.FindStringSubmatch(statusLine) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) - } - - return active, total, size, nil -} - -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { - matches := recoveryLineRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) - } - - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) - } - - return syncedBlocks, nil -} - -func evalComponentDevices(deviceFields []string) []string { - mdComponentDevices := make([]string, 0) - if len(deviceFields) > 3 { - for _, field := range deviceFields[4:] { - match := componentDeviceRE.FindStringSubmatch(field) - if match == nil { - continue - } - mdComponentDevices = append(mdComponentDevices, match[1]) - } - } - - return mdComponentDevices -} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go deleted file mode 100644 index f65e174e..00000000 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Meminfo represents memory statistics. -type Meminfo struct { - // Total usable ram (i.e. physical ram minus a few reserved - // bits and the kernel binary code) - MemTotal *uint64 - // The sum of LowFree+HighFree - MemFree *uint64 - // An estimate of how much memory is available for starting - // new applications, without swapping. Calculated from - // MemFree, SReclaimable, the size of the file LRU lists, and - // the low watermarks in each zone. The estimate takes into - // account that the system needs some page cache to function - // well, and that not all reclaimable slab will be - // reclaimable, due to items being in use. The impact of those - // factors will vary from system to system. - MemAvailable *uint64 - // Relatively temporary storage for raw disk blocks shouldn't - // get tremendously large (20MB or so) - Buffers *uint64 - Cached *uint64 - // Memory that once was swapped out, is swapped back in but - // still also is in the swapfile (if memory is needed it - // doesn't need to be swapped out AGAIN because it is already - // in the swapfile. This saves I/O) - SwapCached *uint64 - // Memory that has been used more recently and usually not - // reclaimed unless absolutely necessary. - Active *uint64 - // Memory which has been less recently used. It is more - // eligible to be reclaimed for other purposes - Inactive *uint64 - ActiveAnon *uint64 - InactiveAnon *uint64 - ActiveFile *uint64 - InactiveFile *uint64 - Unevictable *uint64 - Mlocked *uint64 - // total amount of swap space available - SwapTotal *uint64 - // Memory which has been evicted from RAM, and is temporarily - // on the disk - SwapFree *uint64 - // Memory which is waiting to get written back to the disk - Dirty *uint64 - // Memory which is actively being written back to the disk - Writeback *uint64 - // Non-file backed pages mapped into userspace page tables - AnonPages *uint64 - // files which have been mapped, such as libraries - Mapped *uint64 - Shmem *uint64 - // in-kernel data structures cache - Slab *uint64 - // Part of Slab, that might be reclaimed, such as caches - SReclaimable *uint64 - // Part of Slab, that cannot be reclaimed on memory pressure - SUnreclaim *uint64 - KernelStack *uint64 - // amount of memory dedicated to the lowest level of page - // tables. - PageTables *uint64 - // NFS pages sent to the server, but not yet committed to - // stable storage - NFSUnstable *uint64 - // Memory used for block device "bounce buffers" - Bounce *uint64 - // Memory used by FUSE for temporary writeback buffers - WritebackTmp *uint64 - // Based on the overcommit ratio ('vm.overcommit_ratio'), - // this is the total amount of memory currently available to - // be allocated on the system. This limit is only adhered to - // if strict overcommit accounting is enabled (mode 2 in - // 'vm.overcommit_memory'). - // The CommitLimit is calculated with the following formula: - // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * - // overcommit_ratio / 100 + [total swap pages] - // For example, on a system with 1G of physical RAM and 7G - // of swap with a `vm.overcommit_ratio` of 30 it would - // yield a CommitLimit of 7.3G. - // For more details, see the memory overcommit documentation - // in vm/overcommit-accounting. - CommitLimit *uint64 - // The amount of memory presently allocated on the system. - // The committed memory is a sum of all of the memory which - // has been allocated by processes, even if it has not been - // "used" by them as of yet. A process which malloc()'s 1G - // of memory, but only touches 300M of it will show up as - // using 1G. This 1G is memory which has been "committed" to - // by the VM and can be used at any time by the allocating - // application. With strict overcommit enabled on the system - // (mode 2 in 'vm.overcommit_memory'),allocations which would - // exceed the CommitLimit (detailed above) will not be permitted. - // This is useful if one needs to guarantee that processes will - // not fail due to lack of memory once that memory has been - // successfully allocated. - CommittedAS *uint64 - // total size of vmalloc memory area - VmallocTotal *uint64 - // amount of vmalloc area which is used - VmallocUsed *uint64 - // largest contiguous block of vmalloc area which is free - VmallocChunk *uint64 - HardwareCorrupted *uint64 - AnonHugePages *uint64 - ShmemHugePages *uint64 - ShmemPmdMapped *uint64 - CmaTotal *uint64 - CmaFree *uint64 - HugePagesTotal *uint64 - HugePagesFree *uint64 - HugePagesRsvd *uint64 - HugePagesSurp *uint64 - Hugepagesize *uint64 - DirectMap4k *uint64 - DirectMap2M *uint64 - DirectMap1G *uint64 -} - -// Meminfo returns an information about current kernel/system memory statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Meminfo() (Meminfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) - if err != nil { - return Meminfo{}, err - } - - m, err := parseMemInfo(bytes.NewReader(b)) - if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) - } - - return *m, nil -} - -func parseMemInfo(r io.Reader) (*Meminfo, error) { - var m Meminfo - s := bufio.NewScanner(r) - for s.Scan() { - // Each line has at least a name and value; we ignore the unit. - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) - } - - v, err := strconv.ParseUint(fields[1], 0, 64) - if err != nil { - return nil, err - } - - switch fields[0] { - case "MemTotal:": - m.MemTotal = &v - case "MemFree:": - m.MemFree = &v - case "MemAvailable:": - m.MemAvailable = &v - case "Buffers:": - m.Buffers = &v - case "Cached:": - m.Cached = &v - case "SwapCached:": - m.SwapCached = &v - case "Active:": - m.Active = &v - case "Inactive:": - m.Inactive = &v - case "Active(anon):": - m.ActiveAnon = &v - case "Inactive(anon):": - m.InactiveAnon = &v - case "Active(file):": - m.ActiveFile = &v - case "Inactive(file):": - m.InactiveFile = &v - case "Unevictable:": - m.Unevictable = &v - case "Mlocked:": - m.Mlocked = &v - case "SwapTotal:": - m.SwapTotal = &v - case "SwapFree:": - m.SwapFree = &v - case "Dirty:": - m.Dirty = &v - case "Writeback:": - m.Writeback = &v - case "AnonPages:": - m.AnonPages = &v - case "Mapped:": - m.Mapped = &v - case "Shmem:": - m.Shmem = &v - case "Slab:": - m.Slab = &v - case "SReclaimable:": - m.SReclaimable = &v - case "SUnreclaim:": - m.SUnreclaim = &v - case "KernelStack:": - m.KernelStack = &v - case "PageTables:": - m.PageTables = &v - case "NFS_Unstable:": - m.NFSUnstable = &v - case "Bounce:": - m.Bounce = &v - case "WritebackTmp:": - m.WritebackTmp = &v - case "CommitLimit:": - m.CommitLimit = &v - case "Committed_AS:": - m.CommittedAS = &v - case "VmallocTotal:": - m.VmallocTotal = &v - case "VmallocUsed:": - m.VmallocUsed = &v - case "VmallocChunk:": - m.VmallocChunk = &v - case "HardwareCorrupted:": - m.HardwareCorrupted = &v - case "AnonHugePages:": - m.AnonHugePages = &v - case "ShmemHugePages:": - m.ShmemHugePages = &v - case "ShmemPmdMapped:": - m.ShmemPmdMapped = &v - case "CmaTotal:": - m.CmaTotal = &v - case "CmaFree:": - m.CmaFree = &v - case "HugePages_Total:": - m.HugePagesTotal = &v - case "HugePages_Free:": - m.HugePagesFree = &v - case "HugePages_Rsvd:": - m.HugePagesRsvd = &v - case "HugePages_Surp:": - m.HugePagesSurp = &v - case "Hugepagesize:": - m.Hugepagesize = &v - case "DirectMap4k:": - m.DirectMap4k = &v - case "DirectMap2M:": - m.DirectMap2M = &v - case "DirectMap1G:": - m.DirectMap1G = &v - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go deleted file mode 100644 index 59f4d505..00000000 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A MountInfo is a type that describes the details, options -// for each mount, parsed from /proc/self/mountinfo. -// The fields described in each entry of /proc/self/mountinfo -// is described in the following man page. -// http://man7.org/linux/man-pages/man5/proc.5.html -type MountInfo struct { - // Unique ID for the mount - MountID int - // The ID of the parent mount - ParentID int - // The value of `st_dev` for the files on this FS - MajorMinorVer string - // The pathname of the directory in the FS that forms - // the root for this mount - Root string - // The pathname of the mount point relative to the root - MountPoint string - // Mount options - Options map[string]string - // Zero or more optional fields - OptionalFields map[string]string - // The Filesystem type - FSType string - // FS specific information or "none" - Source string - // Superblock options - SuperOptions map[string]string -} - -// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. -func parseMountInfo(info []byte) ([]*MountInfo, error) { - mounts := []*MountInfo{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseMountInfoString(mountString) - if err != nil { - return nil, err - } - mounts = append(mounts, parsedMounts) - } - - err := scanner.Err() - return mounts, err -} - -// Parses a mountinfo file line, and converts it to a MountInfo struct. -// An important check here is to see if the hyphen separator, as if it does not exist, -// it means that the line is malformed. -func parseMountInfoString(mountString string) (*MountInfo, error) { - var err error - - mountInfo := strings.Split(mountString, " ") - mountInfoLength := len(mountInfo) - if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) - } - - if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) - } - - mount := &MountInfo{ - MajorMinorVer: mountInfo[2], - Root: mountInfo[3], - MountPoint: mountInfo[4], - Options: mountOptionsParser(mountInfo[5]), - OptionalFields: nil, - FSType: mountInfo[mountInfoLength-3], - Source: mountInfo[mountInfoLength-2], - SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), - } - - mount.MountID, err = strconv.Atoi(mountInfo[0]) - if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") - } - mount.ParentID, err = strconv.Atoi(mountInfo[1]) - if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") - } - // Has optional fields, which is a space separated list of values. - // Example: shared:2 master:7 - if mountInfo[6] != "" { - mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) - if err != nil { - return nil, err - } - } - return mount, nil -} - -// mountOptionsIsValidField checks a string against a valid list of optional fields keys. -func mountOptionsIsValidField(s string) bool { - switch s { - case - "shared", - "master", - "propagate_from", - "unbindable": - return true - } - return false -} - -// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. -func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { - optionalFields := make(map[string]string) - for _, field := range o { - optionSplit := strings.SplitN(field, ":", 2) - value := "" - if len(optionSplit) == 2 { - value = optionSplit[1] - } - if mountOptionsIsValidField(optionSplit[0]) { - optionalFields[optionSplit[0]] = value - } - } - return optionalFields, nil -} - -// mountOptionsParser parses the mount options, superblock options. -func mountOptionsParser(mountOptions string) map[string]string { - opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { - splitOption := strings.Split(opt, "=") - if len(splitOption) < 2 { - key := splitOption[0] - opts[key] = "" - } else { - key, value := splitOption[0], splitOption[1] - opts[key] = value - } - } - return opts -} - -// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. -func GetMounts() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat("/proc/self/mountinfo") - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. -func GetProcMounts(pid int) ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index f7a828bb..00000000 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The mount options of the NFS mount. - Opts map[string]string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read uint64 - // Number of bytes written using the write() syscall. - Write uint64 - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead uint64 - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite uint64 - // Number of bytes read from the NFS server, in total. - ReadTotal uint64 - // Number of bytes written to the NFS server, in total. - WriteTotal uint64 - // Number of pages read directly via mmap()'d files. - ReadPages uint64 - // Number of pages written directly via mmap()'d files. - WritePages uint64 -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate uint64 - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate uint64 - // Number of times an inode cache is cleared. - DataInvalidate uint64 - // Number of times cached inode attributes are invalidated. - AttributeInvalidate uint64 - // Number of times files or directories have been open()'d. - VFSOpen uint64 - // Number of times a directory lookup has occurred. - VFSLookup uint64 - // Number of times permissions have been checked. - VFSAccess uint64 - // Number of updates (and potential writes) to pages. - VFSUpdatePage uint64 - // Number of pages read directly via mmap()'d files. - VFSReadPage uint64 - // Number of times a group of pages have been read. - VFSReadPages uint64 - // Number of pages written directly via mmap()'d files. - VFSWritePage uint64 - // Number of times a group of pages have been written. - VFSWritePages uint64 - // Number of times directory entries have been read with getdents(). - VFSGetdents uint64 - // Number of times attributes have been set on inodes. - VFSSetattr uint64 - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush uint64 - // Number of times fsync() has been called on directories and files. - VFSFsync uint64 - // Number of times locking has been attempted on a file. - VFSLock uint64 - // Number of times files have been closed and released. - VFSFileRelease uint64 - // Unknown. Possibly unused. - CongestionWait uint64 - // Number of times files have been truncated. - Truncation uint64 - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension uint64 - // Number of times a file was removed while still open by another process. - SillyRename uint64 - // Number of times the NFS server gave less data than expected while reading. - ShortRead uint64 - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite uint64 - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay uint64 - // Number of NFS v4.1+ pNFS reads. - PNFSRead uint64 - // Number of NFS v4.1+ pNFS writes. - PNFSWrite uint64 -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests uint64 - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions uint64 - // Number of times a request has had a major timeout. - MajorTimeouts uint64 - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent uint64 - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived uint64 - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueMilliseconds uint64 - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseMilliseconds uint64 - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestMilliseconds uint64 - // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. - Errors uint64 -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string - // The local port used for the NFS mount. - Port uint64 - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind uint64 - // Number of times the client has made a TCP connection to the NFS server. - Connect uint64 - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime uint64 - // Duration since the NFS mount last saw any RPC traffic. - IdleTimeSeconds uint64 - // Number of RPC requests for this mount sent to the NFS server. - Sends uint64 - // Number of RPC responses for this mount received from the NFS server. - Receives uint64 - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs uint64 - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests uint64 - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog uint64 - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed uint64 - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue uint64 - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue uint64 -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldOpts = "opts:" - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - - switch ss[0] { - case fieldOpts: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - if stats.Opts == nil { - stats.Opts = map[string]string{} - } - for _, opt := range strings.Split(ss[1], ",") { - split := strings.Split(opt, "=") - if len(split) == 2 { - stats.Opts[split[0]] = split[1] - } else { - stats.Opts[opt] = "" - } - } - case fieldAge: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) - } - - tstats, err := parseNFSTransportStats(ss[1:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = *tstats - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them separately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) - } - - ns := make([]uint64, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) - } - - ns := make([]uint64, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Minimum number of expected fields in each per-operation statistics set - minFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) - } - - // Skip string operation name for integers - ns := make([]uint64, 0, minFields-1) - for _, st := range ss[1:] { - n, err := strconv.ParseUint(st, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - opStats := NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueMilliseconds: ns[5], - CumulativeTotalResponseMilliseconds: ns[6], - CumulativeTotalRequestMilliseconds: ns[7], - } - - if len(ns) > 8 { - opStats.Errors = ns[8] - } - - ops = append(ops, opStats) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - - switch statVersion { - case statVersion10: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport10UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) - } - case statVersion11: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport11UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) - } - default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. - // - // Note: slice length must be set to length of v1.1 stats to avoid a panic when - // only v1.0 stats are present. - // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) - for i, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns[i] = n - } - - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - if protocol == "udp" { - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } - - return &NFSTransportStats{ - Protocol: protocol, - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTimeSeconds: ns[4], - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go deleted file mode 100644 index 9964a360..00000000 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core -type ConntrackStatEntry struct { - Entries uint64 - Found uint64 - Invalid uint64 - Ignore uint64 - Insert uint64 - InsertFailed uint64 - Drop uint64 - EarlyDrop uint64 - SearchRestart uint64 -} - -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores -func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { - return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) -} - -// Parses a slice of ConntrackStatEntries from the given filepath -func readConntrackStat(path string) ([]ConntrackStatEntry, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(path) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseConntrackStat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) - } - - return stat, nil -} - -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries -func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { - var entries []ConntrackStatEntry - - scanner := bufio.NewScanner(r) - scanner.Scan() - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - conntrackEntry, err := parseConntrackStatEntry(fields) - if err != nil { - return nil, err - } - entries = append(entries, *conntrackEntry) - } - - return entries, nil -} - -// Parses a ConntrackStatEntry from given array of fields -func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) - if err != nil { - return nil, err - } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err - } - entry.Found = found - - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err - } - entry.Invalid = invalid - - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err - } - entry.Ignore = ignore - - insert, err := parseConntrackStatField(fields[8]) - if err != nil { - return nil, err - } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err - } - entry.InsertFailed = insertFailed - - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err - } - entry.Drop = drop - - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err - } - entry.EarlyDrop = earlyDrop - - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) - } - return val, err -} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index 47a710be..00000000 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NetDev() (NetDev, error) { - return newNetDev(fs.proc.Path("net/dev")) -} - -// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - netDev := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := netDev.parseLine(s.Text()) - if err != nil { - return netDev, err - } - - netDev[line.Name] = *line - } - - return netDev, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(parts[1])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(parts[0]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (netDev NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(netDev)) - for _, ifc := range netDev { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go deleted file mode 100644 index ac01dd84..00000000 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" -) - -const ( - // readLimit is used by io.LimitReader while reading the content of the - // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic - // as each line represents a single used socket. - // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. - // With e.g. 150 Byte per line and the maximum number of 65535, - // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. - readLimit = 4294967296 // Byte -> 4 GiB -) - -// this contains generic data structures for both udp and tcp sockets -type ( - // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. - NetIPSocket []*netIPSocketLine - - // NetIPSocketSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetIPSocket it does not collect - // the parsed lines into a slice. - NetIPSocketSummary struct { - // TxQueueLength shows the total queue length of all parsed tx_queue lengths. - TxQueueLength uint64 - // RxQueueLength shows the total queue length of all parsed rx_queue lengths. - RxQueueLength uint64 - // UsedSockets shows the total number of parsed lines representing the - // number of used sockets. - UsedSockets uint64 - } - - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. - // For the proc file format details, see https://linux.die.net/man/5/proc. - netIPSocketLine struct { - Sl uint64 - LocalAddr net.IP - LocalPort uint64 - RemAddr net.IP - RemPort uint64 - St uint64 - TxQueue uint64 - RxQueue uint64 - UID uint64 - } -) - -func newNetIPSocket(file string) (NetIPSocket, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocket NetIPSocket - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) - if err != nil { - return nil, err - } - netIPSocket = append(netIPSocket, line) - } - if err := s.Err(); err != nil { - return nil, err - } - return netIPSocket, nil -} - -// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. -func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocketSummary NetIPSocketSummary - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) - if err != nil { - return nil, err - } - netIPSocketSummary.TxQueueLength += line.TxQueue - netIPSocketSummary.RxQueueLength += line.RxQueue - netIPSocketSummary.UsedSockets++ - } - if err := s.Err(); err != nil { - return nil, err - } - return &netIPSocketSummary, nil -} - -// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. - -func parseIP(hexIP string) (net.IP, error) { - var byteIP []byte - byteIP, err := hex.DecodeString(hexIP) - if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) - } - switch len(byteIP) { - case 4: - return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil - case 16: - i := net.IP{ - byteIP[3], byteIP[2], byteIP[1], byteIP[0], - byteIP[7], byteIP[6], byteIP[5], byteIP[4], - byteIP[11], byteIP[10], byteIP[9], byteIP[8], - byteIP[15], byteIP[14], byteIP[13], byteIP[12], - } - return i, nil - default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) - } -} - -// parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { - line := &netIPSocketLine{} - if len(fields) < 8 { - return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 8 columns %q", - strings.Join(fields, " "), - ) - } - var err error // parse error - - // sl - s := strings.Split(fields[0], ":") - if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) - } - - if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) - } - // local_address - l := strings.Split(fields[1], ":") - if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) - } - if line.LocalAddr, err = parseIP(l[0]); err != nil { - return nil, err - } - if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) - } - - // remote_address - r := strings.Split(fields[2], ":") - if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) - } - if line.RemAddr, err = parseIP(r[0]); err != nil { - return nil, err - } - if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) - } - - // st - if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) - } - - // tx_queue and rx_queue - q := strings.Split(fields[4], ":") - if len(q) != 2 { - return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", - fields[4], - ) - } - if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) - } - if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) - } - - // uid - if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) - } - - return line, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go deleted file mode 100644 index 8c6de379..00000000 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// NetProtocolStats stores the contents from /proc/net/protocols -type NetProtocolStats map[string]NetProtocolStatLine - -// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We -// only care about the first six columns as the rest are not likely to change -// and only serve to provide a set of capabilities for each protocol. -type NetProtocolStatLine struct { - Name string // 0 The name of the protocol - Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) - Sockets int64 // 2 Number of sockets in use by this protocol - Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol - Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. - MaxHeader uint64 // 5 Protocol specific max header size - Slab bool // 6 Indicates whether or not memory is allocated from the SLAB - ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module - Capabilities NetProtocolCapabilities -} - -// NetProtocolCapabilities contains a list of capabilities for each protocol -type NetProtocolCapabilities struct { - Close bool // 8 - Connect bool // 9 - Disconnect bool // 10 - Accept bool // 11 - IoCtl bool // 12 - Init bool // 13 - Destroy bool // 14 - Shutdown bool // 15 - SetSockOpt bool // 16 - GetSockOpt bool // 17 - SendMsg bool // 18 - RecvMsg bool // 19 - SendPage bool // 20 - Bind bool // 21 - BacklogRcv bool // 22 - Hash bool // 23 - UnHash bool // 24 - GetPort bool // 25 - EnterMemoryPressure bool // 26 -} - -// NetProtocols reads stats from /proc/net/protocols and returns a map of -// PortocolStatLine entries. As of this writing no official Linux Documentation -// exists, however the source is fairly self-explanatory and the format seems -// stable since its introduction in 2.6.12-rc2 -// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 -// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 -func (fs FS) NetProtocols() (NetProtocolStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) - if err != nil { - return NetProtocolStats{}, err - } - return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) -} - -func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { - nps := NetProtocolStats{} - - // Skip the header line - s.Scan() - - for s.Scan() { - line, err := nps.parseLine(s.Text()) - if err != nil { - return NetProtocolStats{}, err - } - - nps[line.Name] = *line - } - return nps, nil -} - -func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { - line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} - var err error - const enabled = "yes" - const disabled = "no" - - fields := strings.Fields(rawLine) - line.Name = fields[0] - line.Size, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.Memory, err = strconv.ParseInt(fields[3], 10, 64) - if err != nil { - return nil, err - } - if fields[4] == enabled { - line.Pressure = 1 - } else if fields[4] == disabled { - line.Pressure = 0 - } else { - line.Pressure = -1 - } - line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - if fields[6] == enabled { - line.Slab = true - } else if fields[6] == disabled { - line.Slab = false - } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) - } - line.ModuleName = fields[7] - - err = line.Capabilities.parseCapabilities(fields[8:]) - if err != nil { - return nil, err - } - - return line, nil -} - -func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { - // The capabilities are all bools so we can loop over to map them - capabilityFields := [...]*bool{ - &pc.Close, - &pc.Connect, - &pc.Disconnect, - &pc.Accept, - &pc.IoCtl, - &pc.Init, - &pc.Destroy, - &pc.Shutdown, - &pc.SetSockOpt, - &pc.GetSockOpt, - &pc.SendMsg, - &pc.RecvMsg, - &pc.SendPage, - &pc.Bind, - &pc.BacklogRcv, - &pc.Hash, - &pc.UnHash, - &pc.GetPort, - &pc.EnterMemoryPressure, - } - - for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { - *capabilityFields[i] = true - } else if capabilities[i] == "n" { - *capabilityFields[i] = false - } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) - } - } - return nil -} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go deleted file mode 100644 index e36f4872..00000000 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, -// respectively. -type NetSockstat struct { - // Used is non-nil for IPv4 sockstat results, but nil for IPv6. - Used *int - Protocols []NetSockstatProtocol -} - -// A NetSockstatProtocol contains statistics about a given socket protocol. -// Pointer fields indicate that the value may or may not be present on any -// given protocol. -type NetSockstatProtocol struct { - Protocol string - InUse int - Orphan *int - TW *int - Alloc *int - Mem *int - Memory *int -} - -// NetSockstat retrieves IPv4 socket statistics. -func (fs FS) NetSockstat() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat")) -} - -// NetSockstat6 retrieves IPv6 socket statistics. -// -// If IPv6 is disabled on this kernel, the returned error can be checked with -// os.IsNotExist. -func (fs FS) NetSockstat6() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat6")) -} - -// readSockstat opens and parses a NetSockstat from the input file. -func readSockstat(name string) (*NetSockstat, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(name) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseSockstat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) - } - - return stat, nil -} - -// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. -func parseSockstat(r io.Reader) (*NetSockstat, error) { - var stat NetSockstat - s := bufio.NewScanner(r) - for s.Scan() { - // Expect a minimum of a protocol and one key/value pair. - fields := strings.Split(s.Text(), " ") - if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) - } - - // The remaining fields are key/value pairs. - kvs, err := parseSockstatKVs(fields[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) - } - - // The first field is the protocol. We must trim its colon suffix. - proto := strings.TrimSuffix(fields[0], ":") - switch proto { - case "sockets": - // Special case: IPv4 has a sockets "used" key/value pair that we - // embed at the top level of the structure. - used := kvs["used"] - stat.Used = &used - default: - // Parse all other lines as individual protocols. - nsp := parseSockstatProtocol(kvs) - nsp.Protocol = proto - stat.Protocols = append(stat.Protocols, nsp) - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return &stat, nil -} - -// parseSockstatKVs parses a string slice into a map of key/value pairs. -func parseSockstatKVs(kvs []string) (map[string]int, error) { - if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") - } - - // Iterate two values at a time to gather key/value pairs. - out := make(map[string]int, len(kvs)/2) - for i := 0; i < len(kvs); i += 2 { - vp := util.NewValueParser(kvs[i+1]) - out[kvs[i]] = vp.Int() - - if err := vp.Err(); err != nil { - return nil, err - } - } - - return out, nil -} - -// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. -func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { - var nsp NetSockstatProtocol - for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v - switch k { - case "inuse": - nsp.InUse = v - case "orphan": - nsp.Orphan = &v - case "tw": - nsp.TW = &v - case "alloc": - nsp.Alloc = &v - case "mem": - nsp.Mem = &v - case "memory": - nsp.Memory = &v - } - } - - return nsp -} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go deleted file mode 100644 index 46f12c61..00000000 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// For the proc file format details, -// See: -// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. - -// SoftnetStat contains a single row of data from /proc/net/softnet_stat -type SoftnetStat struct { - // Number of processed packets - Processed uint32 - // Number of dropped packets - Dropped uint32 - // Number of times processing packets ran out of quota - TimeSqueezed uint32 -} - -var softNetProcFile = "net/softnet_stat" - -// NetSoftnetStat reads data from /proc/net/softnet_stat. -func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { - b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) - if err != nil { - return nil, err - } - - entries, err := parseSoftnet(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) - } - - return entries, nil -} - -func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { - const minColumns = 9 - - s := bufio.NewScanner(r) - - var stats []SoftnetStat - for s.Scan() { - columns := strings.Fields(s.Text()) - width := len(columns) - - if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) - } - - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err - } - - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) - } - - return stats, nil -} - -func parseHexUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go deleted file mode 100644 index 52776295..00000000 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. - NetTCP []*netIPSocketLine - - // NetTCPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetTCP it does not collect - // the parsed lines into a slice. - NetTCPSummary NetIPSocketSummary -) - -// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp. -func (fs FS) NetTCP() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp")) -} - -// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp6. -func (fs FS) NetTCP6() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp6")) -} - -// NetTCPSummary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp. -func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp")) -} - -// NetTCP6Summary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp6. -func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp6")) -} - -// newNetTCP creates a new NetTCP{,6} from the contents of the given file. -func newNetTCP(file string) (NetTCP, error) { - n, err := newNetIPSocket(file) - n1 := NetTCP(n) - return n1, err -} - -func newNetTCPSummary(file string) (*NetTCPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetTCPSummary(*n) - return &n1, err -} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go deleted file mode 100644 index 9ac3daf2..00000000 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetUDP represents the contents of /proc/net/udp{,6} file without the header. - NetUDP []*netIPSocketLine - - // NetUDPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetUDP it does not collect - // the parsed lines into a slice. - NetUDPSummary NetIPSocketSummary -) - -// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp. -func (fs FS) NetUDP() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp")) -} - -// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp6. -func (fs FS) NetUDP6() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp6")) -} - -// NetUDPSummary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp. -func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp")) -} - -// NetUDP6Summary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp6. -func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp6")) -} - -// newNetUDP creates a new NetUDP{,6} from the contents of the given file. -func newNetUDP(file string) (NetUDP, error) { - n, err := newNetIPSocket(file) - n1 := NetUDP(n) - return n1, err -} - -func newNetUDPSummary(file string) (*NetUDPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetUDPSummary(*n) - return &n1, err -} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go deleted file mode 100644 index 98aa8e1c..00000000 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 -// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. - -// Constants for the various /proc/net/unix enumerations. -// TODO: match against x/sys/unix or similar? -const ( - netUnixTypeStream = 1 - netUnixTypeDgram = 2 - netUnixTypeSeqpacket = 5 - - netUnixFlagDefault = 0 - netUnixFlagListen = 1 << 16 - - netUnixStateUnconnected = 1 - netUnixStateConnecting = 2 - netUnixStateConnected = 3 - netUnixStateDisconnected = 4 -) - -// NetUNIXType is the type of the type field. -type NetUNIXType uint64 - -// NetUNIXFlags is the type of the flags field. -type NetUNIXFlags uint64 - -// NetUNIXState is the type of the state field. -type NetUNIXState uint64 - -// NetUNIXLine represents a line of /proc/net/unix. -type NetUNIXLine struct { - KernelPtr string - RefCount uint64 - Protocol uint64 - Flags NetUNIXFlags - Type NetUNIXType - State NetUNIXState - Inode uint64 - Path string -} - -// NetUNIX holds the data read from /proc/net/unix. -type NetUNIX struct { - Rows []*NetUNIXLine -} - -// NetUNIX returns data read from /proc/net/unix. -func (fs FS) NetUNIX() (*NetUNIX, error) { - return readNetUNIX(fs.proc.Path("net/unix")) -} - -// readNetUNIX reads data in /proc/net/unix format from the specified file. -func readNetUNIX(file string) (*NetUNIX, error) { - // This file could be quite large and a streaming read is desirable versus - // reading the entire contents at once. - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - return parseNetUNIX(f) -} - -// parseNetUNIX creates a NetUnix structure from the incoming stream. -func parseNetUNIX(r io.Reader) (*NetUNIX, error) { - // Begin scanning by checking for the existence of Inode. - s := bufio.NewScanner(r) - s.Scan() - - // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. This code works for both cases. - hasInode := strings.Contains(s.Text(), "Inode") - - // Expect a minimum number of fields, but Inode and Path are optional: - // Num RefCount Protocol Flags Type St Inode Path - minFields := 6 - if hasInode { - minFields++ - } - - var nu NetUNIX - for s.Scan() { - line := s.Text() - item, err := nu.parseLine(line, hasInode, minFields) - if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) - } - - nu.Rows = append(nu.Rows, item) - } - - if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) - } - - return &nu, nil -} - -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { - fields := strings.Fields(line) - - l := len(fields) - if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) - } - - // Field offsets are as follows: - // Num RefCount Protocol Flags Type St Inode Path - - kernelPtr := strings.TrimSuffix(fields[0], ":") - - users, err := u.parseUsers(fields[1]) - if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) - } - - flags, err := u.parseFlags(fields[3]) - if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) - } - - typ, err := u.parseType(fields[4]) - if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) - } - - state, err := u.parseState(fields[5]) - if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) - } - - var inode uint64 - if hasInode { - inode, err = u.parseInode(fields[6]) - if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) - } - } - - n := &NetUNIXLine{ - KernelPtr: kernelPtr, - RefCount: users, - Type: typ, - Flags: flags, - State: state, - Inode: inode, - } - - // Path field is optional. - if l > min { - // Path occurs at either index 6 or 7 depending on whether inode is - // already present. - pathIdx := 7 - if !hasInode { - pathIdx-- - } - - n.Path = fields[pathIdx] - } - - return n, nil -} - -func (u NetUNIX) parseUsers(s string) (uint64, error) { - return strconv.ParseUint(s, 16, 32) -} - -func (u NetUNIX) parseType(s string) (NetUNIXType, error) { - typ, err := strconv.ParseUint(s, 16, 16) - if err != nil { - return 0, err - } - - return NetUNIXType(typ), nil -} - -func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { - flags, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return 0, err - } - - return NetUNIXFlags(flags), nil -} - -func (u NetUNIX) parseState(s string) (NetUNIXState, error) { - st, err := strconv.ParseInt(s, 16, 8) - if err != nil { - return 0, err - } - - return NetUNIXState(st), nil -} - -func (u NetUNIX) parseInode(s string) (uint64, error) { - return strconv.ParseUint(s, 10, 64) -} - -func (t NetUNIXType) String() string { - switch t { - case netUnixTypeStream: - return "stream" - case netUnixTypeDgram: - return "dgram" - case netUnixTypeSeqpacket: - return "seqpacket" - } - return "unknown" -} - -func (f NetUNIXFlags) String() string { - switch f { - case netUnixFlagListen: - return "listen" - default: - return "default" - } -} - -func (s NetUNIXState) String() string { - switch s { - case netUnixStateUnconnected: - return "unconnected" - case netUnixStateConnecting: - return "connecting" - case netUnixStateConnected: - return "connected" - case netUnixStateDisconnected: - return "disconnected" - } - return "unknown" -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index 28f69680..00000000 --- a/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs fs.FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.proc.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// NewProc returns a process for the given pid. -// -// Deprecated: use fs.Proc() instead -func (fs FS) NewProc(pid int) (Proc, error) { - return fs.Proc(pid) -} - -// Proc returns a process for the given pid. -func (fs FS) Proc(pid int) (Proc, error) { - if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs.proc}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.proc.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(p.path("cmdline")) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil -} - -// Wchan returns the wchan (wait channel) of a process. -func (p Proc) Wchan() (string, error) { - f, err := os.Open(p.path("wchan")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - - wchan := string(data) - if wchan == "" || wchan == "0" { - return "", nil - } - - return wchan, nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - data, err := util.ReadFileNoStat(p.path("comm")) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot) -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - -// MountInfo retrieves mount information for mount points in a -// process's namespace. -// It supplies information missing in `/proc/self/mounts` and -// fixes various other problems with that file too. -func (p Proc) MountInfo() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(p.path("mountinfo")) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} - -// FileDescriptorsInfo retrieves information about all file descriptors of -// the process. -func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - var fdinfos ProcFDInfos - - for _, n := range names { - fdinfo, err := p.FDInfo(n) - if err != nil { - continue - } - fdinfos = append(fdinfos, *fdinfo) - } - - return fdinfos, nil -} - -// Schedstat returns task scheduling information for the process. -func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := ioutil.ReadFile(p.path("schedstat")) - if err != nil { - return ProcSchedstat{}, err - } - return parseProcSchedstat(string(contents)) -} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go deleted file mode 100644 index 0094a13c..00000000 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource -// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies -// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in -// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of -// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID -// in this hierarchy -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type Cgroup struct { - // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one - // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number - HierarchyID int - // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For - // Cgroups V2 this may be empty, as all active controllers use the same hierarchy - Controllers []string - // Path of this control group, relative to the mount point of the cgroupfs representing this specific - // hierarchy - Path string -} - -// parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path -func parseCgroupString(cgroupStr string) (*Cgroup, error) { - var err error - - fields := strings.SplitN(cgroupStr, ":", 3) - if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) - } - - cgroup := &Cgroup{ - Path: fields[2], - Controllers: nil, - } - cgroup.HierarchyID, err = strconv.Atoi(fields[0]) - if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") - } - if fields[1] != "" { - ssNames := strings.Split(fields[1], ",") - cgroup.Controllers = append(cgroup.Controllers, ssNames...) - } - return cgroup, nil -} - -// parseCgroups reads each line of the /proc/[pid]/cgroup file -func parseCgroups(data []byte) ([]Cgroup, error) { - var cgroups []Cgroup - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseCgroupString(mountString) - if err != nil { - return nil, err - } - cgroups = append(cgroups, *parsedMounts) - } - - err := scanner.Err() - return cgroups, err -} - -// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process -// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system -func (p Proc) Cgroups() ([]Cgroup, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID)) - if err != nil { - return nil, err - } - return parseCgroups(data) -} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go deleted file mode 100644 index 6134b358..00000000 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Environ reads process environments from /proc//environ -func (p Proc) Environ() ([]string, error) { - environments := make([]string, 0) - - data, err := util.ReadFileNoStat(p.path("environ")) - if err != nil { - return environments, err - } - - environments = strings.Split(string(data), "\000") - if len(environments) > 0 { - environments = environments[:len(environments)-1] - } - - return environments, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go deleted file mode 100644 index cf63227f..00000000 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - - "github.com/prometheus/procfs/internal/util" -) - -// Regexp variables -var ( - rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) - rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) - rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) - rInotify = regexp.MustCompile(`^inotify`) - rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) -) - -// ProcFDInfo contains represents file descriptor information. -type ProcFDInfo struct { - // File descriptor - FD string - // File offset - Pos string - // File access mode and status flags - Flags string - // Mount point ID - MntID string - // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) - InotifyInfos []InotifyInfo -} - -// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. -func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { - data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) - if err != nil { - return nil, err - } - - var text, pos, flags, mntid string - var inotify []InotifyInfo - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - text = scanner.Text() - if rPos.MatchString(text) { - pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { - flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { - mntid = rMntID.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { - newInotify, err := parseInotifyInfo(text) - if err != nil { - return nil, err - } - inotify = append(inotify, *newInotify) - } - } - - i := &ProcFDInfo{ - FD: fd, - Pos: pos, - Flags: flags, - MntID: mntid, - InotifyInfos: inotify, - } - - return i, nil -} - -// InotifyInfo represents a single inotify line in the fdinfo file. -type InotifyInfo struct { - // Watch descriptor number - WD string - // Inode number - Ino string - // Device ID - Sdev string - // Mask of events being monitored - Mask string -} - -// InotifyInfo constructor. Only available on kernel 3.8+. -func parseInotifyInfo(line string) (*InotifyInfo, error) { - m := rInotifyParts.FindStringSubmatch(line) - if len(m) >= 4 { - var mask string - if len(m) == 5 { - mask = m[4] - } - i := &InotifyInfo{ - WD: m[1], - Ino: m[2], - Sdev: m[3], - Mask: mask, - } - return i, nil - } - return nil, fmt.Errorf("invalid inode entry: %q", line) -} - -// ProcFDInfos represents a list of ProcFDInfo structs. -type ProcFDInfos []ProcFDInfo - -func (p ProcFDInfos) Len() int { return len(p) } -func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } - -// InotifyWatchLen returns the total number of inotify watches -func (p ProcFDInfos) InotifyWatchLen() (int, error) { - length := 0 - for _, f := range p { - length += len(f.InotifyInfos) - } - - return length, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index 776f3497..00000000 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// IO creates a new ProcIO instance from a given Proc instance. -func (p Proc) IO() (ProcIO, error) { - pio := ProcIO{} - - data, err := util.ReadFileNoStat(p.path("io")) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - - return pio, err -} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index dd20f198..00000000 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime uint64 - // Maximum size of files that the process may create. - FileSize uint64 - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize uint64 - // Maximum size of the process stack in bytes. - StackSize uint64 - // Maximum size of a core file. - CoreFileSize uint64 - // Limit of the process's resident set in pages. - ResidentSet uint64 - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes uint64 - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles uint64 - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory uint64 - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace uint64 - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks uint64 - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals uint64 - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize uint64 - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority uint64 - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority uint64 - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout uint64 -} - -const ( - limitsFields = 4 - limitsUnlimited = "unlimited" -) - -var ( - limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) -) - -// NewLimits returns the current soft limits of the process. -// -// Deprecated: use p.Limits() instead -func (p Proc) NewLimits() (ProcLimits, error) { - return p.Limits() -} - -// Limits returns the current soft limits of the process. -func (p Proc) Limits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - - s.Scan() // Skip limits header - - for s.Scan() { - //fields := limitsMatch.Split(s.Text(), limitsFields) - fields := limitsMatch.FindStringSubmatch(s.Text()) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) - } - - switch fields[1] { - case "Max cpu time": - l.CPUTime, err = parseUint(fields[2]) - case "Max file size": - l.FileSize, err = parseUint(fields[2]) - case "Max data size": - l.DataSize, err = parseUint(fields[2]) - case "Max stack size": - l.StackSize, err = parseUint(fields[2]) - case "Max core file size": - l.CoreFileSize, err = parseUint(fields[2]) - case "Max resident set": - l.ResidentSet, err = parseUint(fields[2]) - case "Max processes": - l.Processes, err = parseUint(fields[2]) - case "Max open files": - l.OpenFiles, err = parseUint(fields[2]) - case "Max locked memory": - l.LockedMemory, err = parseUint(fields[2]) - case "Max address space": - l.AddressSpace, err = parseUint(fields[2]) - case "Max file locks": - l.FileLocks, err = parseUint(fields[2]) - case "Max pending signals": - l.PendingSignals, err = parseUint(fields[2]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseUint(fields[2]) - case "Max nice priority": - l.NicePriority, err = parseUint(fields[2]) - case "Max realtime priority": - l.RealtimePriority, err = parseUint(fields[2]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseUint(fields[2]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseUint(s string) (uint64, error) { - if s == limitsUnlimited { - return 18446744073709551615, nil - } - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) - } - return i, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go deleted file mode 100644 index 1d7772d5..00000000 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// ProcMapPermissions contains permission settings read from /proc/[pid]/maps -type ProcMapPermissions struct { - // mapping has the [R]ead flag set - Read bool - // mapping has the [W]rite flag set - Write bool - // mapping has the [X]ecutable flag set - Execute bool - // mapping has the [S]hared flag set - Shared bool - // mapping is marked as [P]rivate (copy on write) - Private bool -} - -// ProcMap contains the process memory-mappings of the process, -// read from /proc/[pid]/maps -type ProcMap struct { - // The start address of current mapping. - StartAddr uintptr - // The end address of the current mapping - EndAddr uintptr - // The permissions for this mapping - Perms *ProcMapPermissions - // The current offset into the file/fd (e.g., shared libs) - Offset int64 - // Device owner of this mapping (major:minor) in Mkdev format. - Dev uint64 - // The inode of the device above - Inode uint64 - // The file or psuedofile (or empty==anonymous) - Pathname string -} - -// parseDevice parses the device token of a line and converts it to a dev_t -// (mkdev) like structure. -func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") - } - - major, err := strconv.ParseUint(toks[0], 16, 0) - if err != nil { - return 0, err - } - - minor, err := strconv.ParseUint(toks[1], 16, 0) - if err != nil { - return 0, err - } - - return unix.Mkdev(uint32(major), uint32(minor)), nil -} - -// parseAddress just converts a hex-string to a uintptr -func parseAddress(s string) (uintptr, error) { - a, err := strconv.ParseUint(s, 16, 0) - if err != nil { - return 0, err - } - - return uintptr(a), nil -} - -// parseAddresses parses the start-end address -func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") - } - - saddr, err := parseAddress(toks[0]) - if err != nil { - return 0, 0, err - } - - eaddr, err := parseAddress(toks[1]) - if err != nil { - return 0, 0, err - } - - return saddr, eaddr, nil -} - -// parsePermissions parses a token and returns any that are set. -func parsePermissions(s string) (*ProcMapPermissions, error) { - if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") - } - - perms := ProcMapPermissions{} - for _, ch := range s { - switch ch { - case 'r': - perms.Read = true - case 'w': - perms.Write = true - case 'x': - perms.Execute = true - case 'p': - perms.Private = true - case 's': - perms.Shared = true - } - } - - return &perms, nil -} - -// parseProcMap will attempt to parse a single line within a proc/[pid]/maps -// buffer. -func parseProcMap(text string) (*ProcMap, error) { - fields := strings.Fields(text) - if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") - } - - saddr, eaddr, err := parseAddresses(fields[0]) - if err != nil { - return nil, err - } - - perms, err := parsePermissions(fields[1]) - if err != nil { - return nil, err - } - - offset, err := strconv.ParseInt(fields[2], 16, 0) - if err != nil { - return nil, err - } - - device, err := parseDevice(fields[3]) - if err != nil { - return nil, err - } - - inode, err := strconv.ParseUint(fields[4], 10, 0) - if err != nil { - return nil, err - } - - pathname := "" - - if len(fields) >= 5 { - pathname = strings.Join(fields[5:], " ") - } - - return &ProcMap{ - StartAddr: saddr, - EndAddr: eaddr, - Perms: perms, - Offset: offset, - Dev: device, - Inode: inode, - Pathname: pathname, - }, nil -} - -// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the -// process. -func (p Proc) ProcMaps() ([]*ProcMap, error) { - file, err := os.Open(p.path("maps")) - if err != nil { - return nil, err - } - defer file.Close() - - maps := []*ProcMap{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - m, err := parseProcMap(scan.Text()) - if err != nil { - return nil, err - } - - maps = append(maps, m) - } - - return maps, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index 391b4cbd..00000000 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// Namespaces reads from /proc//ns/* to get the namespaces of which the -// process is a member. -func (p Proc) Namespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go deleted file mode 100644 index dc6c14f0..00000000 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// The PSI / pressure interface is described at -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt -// Each resource (cpu, io, memory, ...) is exposed as a single file. -// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. -// Each line contains several averages (over n seconds) and a total in µs. -// -// Example io pressure file: -// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 -// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" - -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds -type PSILine struct { - Avg10 float64 - Avg60 float64 - Avg300 float64 - Total uint64 -} - -// PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously -type PSIStats struct { - Some *PSILine - Full *PSILine -} - -// PSIStatsForResource reads pressure stall information for the specified -// resource from /proc/pressure/. At time of writing this can be -// either "cpu", "memory" or "io". -func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) - if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) - } - - return parsePSIStats(resource, bytes.NewReader(data)) -} - -// parsePSIStats parses the specified file for pressure stall information -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { - psiStats := PSIStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - l := scanner.Text() - prefix := strings.Split(l, " ")[0] - switch prefix { - case "some": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Some = &psi - case "full": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Full = &psi - default: - // If we encounter a line with an unknown prefix, ignore it and move on - // Should new measurement types be added in the future we'll simply ignore them instead - // of erroring on retrieval - continue - } - } - - return psiStats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go deleted file mode 100644 index a576a720..00000000 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "bufio" - "errors" - "fmt" - "os" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - // match the header line before each mapped zone in /proc/pid/smaps - procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) -) - -type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM - Rss uint64 - // Process's proportional share of this mapping - Pss uint64 - // Size in bytes of clean shared pages - SharedClean uint64 - // Size in bytes of dirty shared pages - SharedDirty uint64 - // Size in bytes of clean private pages - PrivateClean uint64 - // Size in bytes of dirty private pages - PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed - Referenced uint64 - // Amount of memory that does not belong to any file - Anonymous uint64 - // Amount would-be-anonymous memory currently on swap - Swap uint64 - // Process's proportional memory on swap - SwapPss uint64 -} - -// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the -// process. -// -// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will -// we read and summed. -func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { - data, err := util.ReadFileNoStat(p.path("smaps_rollup")) - if err != nil && os.IsNotExist(err) { - return p.procSMapsRollupManual() - } - if err != nil { - return ProcSMapsRollup{}, err - } - - lines := strings.Split(string(data), "\n") - smaps := ProcSMapsRollup{} - - // skip first line which don't contains information we need - lines = lines[1:] - for _, line := range lines { - if line == "" { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -// Read /proc/pid/smaps and do the roll-up in Go code. -func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { - file, err := os.Open(p.path("smaps")) - if err != nil { - return ProcSMapsRollup{}, err - } - defer file.Close() - - smaps := ProcSMapsRollup{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - line := scan.Text() - - if procSMapsHeaderLine.MatchString(line) { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -func (s *ProcSMapsRollup) parseLine(line string) error { - kv := strings.SplitN(line, ":", 2) - if len(kv) != 2 { - fmt.Println(line) - return errors.New("invalid net/dev line, missing colon") - } - - k := kv[0] - if k == "VmFlags" { - return nil - } - - v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") - - vKBytes, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return err - } - vBytes := vKBytes * 1024 - - s.addValue(k, v, vKBytes, vBytes) - - return nil -} - -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { - switch k { - case "Rss": - s.Rss += vUintBytes - case "Pss": - s.Pss += vUintBytes - case "Shared_Clean": - s.SharedClean += vUintBytes - case "Shared_Dirty": - s.SharedDirty += vUintBytes - case "Private_Clean": - s.PrivateClean += vUintBytes - case "Private_Dirty": - s.PrivateDirty += vUintBytes - case "Referenced": - s.Referenced += vUintBytes - case "Anonymous": - s.Anonymous += vUintBytes - case "Swap": - s.Swap += vUintBytes - case "SwapPss": - s.SwapPss += vUintBytes - } -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index 67ca0e9f..00000000 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "os" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime uint - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime uint - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize uint - // Resident set size in pages. - RSS int - - proc fs.FS -} - -// NewStat returns the current status information of the process. -// -// Deprecated: use p.Stat() instead -func (p Proc) NewStat() (ProcStat, error) { - return p.Stat() -} - -// Stat returns the current status information of the process. -func (p Proc) Stat() (ProcStat, error) { - data, err := util.ReadFileNoStat(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - - var ( - ignore int - - s = ProcStat{PID: p.PID, proc: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) - } - - s.Comm = string(data[l+1 : r]) - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignore, - &s.Starttime, - &s.VSize, - &s.RSS, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() uint { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go deleted file mode 100644 index 6edd8333..00000000 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStatus struct { - // The process ID. - PID int - // The process name. - Name string - - // Thread group ID. - TGID int - - // Peak virtual memory size. - VmPeak uint64 // nolint:golint - // Virtual memory size. - VmSize uint64 // nolint:golint - // Locked memory size. - VmLck uint64 // nolint:golint - // Pinned memory size. - VmPin uint64 // nolint:golint - // Peak resident set size. - VmHWM uint64 // nolint:golint - // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:golint - // Size of resident anonymous memory. - RssAnon uint64 // nolint:golint - // Size of resident file mappings. - RssFile uint64 // nolint:golint - // Size of resident shared memory. - RssShmem uint64 // nolint:golint - // Size of data segments. - VmData uint64 // nolint:golint - // Size of stack segments. - VmStk uint64 // nolint:golint - // Size of text segments. - VmExe uint64 // nolint:golint - // Shared library code size. - VmLib uint64 // nolint:golint - // Page table entries size. - VmPTE uint64 // nolint:golint - // Size of second-level page tables. - VmPMD uint64 // nolint:golint - // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:golint - // Size of hugetlb memory portions - HugetlbPages uint64 - - // Number of voluntary context switches. - VoluntaryCtxtSwitches uint64 - // Number of involuntary context switches. - NonVoluntaryCtxtSwitches uint64 - - // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string - // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string -} - -// NewStatus returns the current status information of the process. -func (p Proc) NewStatus() (ProcStatus, error) { - data, err := util.ReadFileNoStat(p.path("status")) - if err != nil { - return ProcStatus{}, err - } - - s := ProcStatus{PID: p.PID} - - lines := strings.Split(string(data), "\n") - for _, line := range lines { - if !bytes.Contains([]byte(line), []byte(":")) { - continue - } - - kv := strings.SplitN(line, ":", 2) - - // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) - // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) - - // value to int when possible - // we can skip error check here, 'cause vKBytes is not used when value is a string - vKBytes, _ := strconv.ParseUint(v, 10, 64) - // convert kB to B - vBytes := vKBytes * 1024 - - s.fillStatus(k, v, vKBytes, vBytes) - } - - return s, nil -} - -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { - switch k { - case "Tgid": - s.TGID = int(vUint) - case "Name": - s.Name = vString - case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) - case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) - case "VmPeak": - s.VmPeak = vUintBytes - case "VmSize": - s.VmSize = vUintBytes - case "VmLck": - s.VmLck = vUintBytes - case "VmPin": - s.VmPin = vUintBytes - case "VmHWM": - s.VmHWM = vUintBytes - case "VmRSS": - s.VmRSS = vUintBytes - case "RssAnon": - s.RssAnon = vUintBytes - case "RssFile": - s.RssFile = vUintBytes - case "RssShmem": - s.RssShmem = vUintBytes - case "VmData": - s.VmData = vUintBytes - case "VmStk": - s.VmStk = vUintBytes - case "VmExe": - s.VmExe = vUintBytes - case "VmLib": - s.VmLib = vUintBytes - case "VmPTE": - s.VmPTE = vUintBytes - case "VmPMD": - s.VmPMD = vUintBytes - case "VmSwap": - s.VmSwap = vUintBytes - case "HugetlbPages": - s.HugetlbPages = vUintBytes - case "voluntary_ctxt_switches": - s.VoluntaryCtxtSwitches = vUint - case "nonvoluntary_ctxt_switches": - s.NonVoluntaryCtxtSwitches = vUint - } -} - -// TotalCtxtSwitches returns the total context switch. -func (s ProcStatus) TotalCtxtSwitches() uint64 { - return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches -} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go deleted file mode 100644 index 28228164..00000000 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "regexp" - "strconv" -) - -var ( - cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) - procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) -) - -// Schedstat contains scheduler statistics from /proc/schedstat -// -// See -// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt -// for a detailed description of what these numbers mean. -// -// Note the current kernel documentation claims some of the time units are in -// jiffies when they are actually in nanoseconds since 2.6.23 with the -// introduction of CFS. A fix to the documentation is pending. See -// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 -type Schedstat struct { - CPUs []*SchedstatCPU -} - -// SchedstatCPU contains the values from one "cpu" line -type SchedstatCPU struct { - CPUNum string - - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// ProcSchedstat contains the values from /proc//schedstat -type ProcSchedstat struct { - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// Schedstat reads data from /proc/schedstat -func (fs FS) Schedstat() (*Schedstat, error) { - file, err := os.Open(fs.proc.Path("schedstat")) - if err != nil { - return nil, err - } - defer file.Close() - - stats := &Schedstat{} - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - match := cpuLineRE.FindStringSubmatch(scanner.Text()) - if match != nil { - cpu := &SchedstatCPU{} - cpu.CPUNum = match[1] - - cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) - if err != nil { - continue - } - - cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) - if err != nil { - continue - } - - cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) - if err != nil { - continue - } - - stats.CPUs = append(stats.CPUs, cpu) - } - } - - return stats, nil -} - -func parseProcSchedstat(contents string) (ProcSchedstat, error) { - var ( - stats ProcSchedstat - err error - ) - match := procLineRE.FindStringSubmatch(contents) - - if match != nil { - stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) - if err != nil { - return stats, err - } - - stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) - if err != nil { - return stats, err - } - - stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) - return stats, err - } - - return stats, errors.New("could not parse schedstat") -} diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go deleted file mode 100644 index 7896fd72..00000000 --- a/vendor/github.com/prometheus/procfs/slab.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - slabSpace = regexp.MustCompile(`\s+`) - slabVer = regexp.MustCompile(`slabinfo -`) - slabHeader = regexp.MustCompile(`# name`) -) - -// Slab represents a slab pool in the kernel. -type Slab struct { - Name string - ObjActive int64 - ObjNum int64 - ObjSize int64 - ObjPerSlab int64 - PagesPerSlab int64 - // tunables - Limit int64 - Batch int64 - SharedFactor int64 - SlabActive int64 - SlabNum int64 - SharedAvail int64 -} - -// SlabInfo represents info for all slabs. -type SlabInfo struct { - Slabs []*Slab -} - -func shouldParseSlab(line string) bool { - if slabVer.MatchString(line) { - return false - } - if slabHeader.MatchString(line) { - return false - } - return true -} - -// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. -func parseV21SlabEntry(line string) (*Slab, error) { - // First cleanup whitespace. - l := slabSpace.ReplaceAllString(line, " ") - s := strings.Split(l, " ") - if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) - } - var err error - i := &Slab{Name: s[0]} - i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) - if err != nil { - return nil, err - } - i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) - if err != nil { - return nil, err - } - i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) - if err != nil { - return nil, err - } - i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) - if err != nil { - return nil, err - } - i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) - if err != nil { - return nil, err - } - i.Limit, err = strconv.ParseInt(s[8], 10, 64) - if err != nil { - return nil, err - } - i.Batch, err = strconv.ParseInt(s[9], 10, 64) - if err != nil { - return nil, err - } - i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) - if err != nil { - return nil, err - } - i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) - if err != nil { - return nil, err - } - i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) - if err != nil { - return nil, err - } - i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) - if err != nil { - return nil, err - } - return i, nil -} - -// parseSlabInfo21 is used to parse a slabinfo 2.1 file. -func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { - scanner := bufio.NewScanner(r) - s := SlabInfo{Slabs: []*Slab{}} - for scanner.Scan() { - line := scanner.Text() - if !shouldParseSlab(line) { - continue - } - slab, err := parseV21SlabEntry(line) - if err != nil { - return s, err - } - s.Slabs = append(s.Slabs, slab) - } - return s, nil -} - -// SlabInfo reads data from /proc/slabinfo -func (fs FS) SlabInfo() (SlabInfo, error) { - // TODO: Consider passing options to allow for parsing different - // slabinfo versions. However, slabinfo 2.1 has been stable since - // kernel 2.6.10 and later. - data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) - if err != nil { - return SlabInfo{}, err - } - - return parseSlabInfo21(bytes.NewReader(data)) -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index 6d872754..00000000 --- a/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU []CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat -} - -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) - } - - return softIRQStat, total, nil -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: use fs.Stat() instead -func NewStat() (Stat, error) { - fs, err := NewFS(fs.DefaultProcMountPoint) - if err != nil { - return Stat{}, err - } - return fs.Stat() -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: use fs.Stat() instead -func (fs FS) NewStat() (Stat, error) { - return fs.Stat() -} - -// Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Stat() (Stat, error) { - fileName := fs.proc.Path("stat") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Stat{}, err - } - - stat := Stat{} - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } - stat.CPU[cpuID] = cpuStat - } - } - } - - if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) - } - - return stat, nil -} diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go deleted file mode 100644 index 15edc221..00000000 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Swap represents an entry in /proc/swaps. -type Swap struct { - Filename string - Type string - Size int - Used int - Priority int -} - -// Swaps returns a slice of all configured swap devices on the system. -func (fs FS) Swaps() ([]*Swap, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) - if err != nil { - return nil, err - } - return parseSwaps(data) -} - -func parseSwaps(info []byte) ([]*Swap, error) { - swaps := []*Swap{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - scanner.Scan() // ignore header line - for scanner.Scan() { - swapString := scanner.Text() - parsedSwap, err := parseSwapString(swapString) - if err != nil { - return nil, err - } - swaps = append(swaps, parsedSwap) - } - - err := scanner.Err() - return swaps, err -} - -func parseSwapString(swapString string) (*Swap, error) { - var err error - - swapFields := strings.Fields(swapString) - swapLength := len(swapFields) - if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) - } - - swap := &Swap{ - Filename: swapFields[0], - Type: swapFields[1], - } - - swap.Size, err = strconv.Atoi(swapFields[2]) - if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) - } - swap.Used, err = strconv.Atoi(swapFields[3]) - if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) - } - swap.Priority, err = strconv.Atoi(swapFields[4]) - if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) - } - - return swap, nil -} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index 19ef02b8..00000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C ] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - --recursive-unlink (recursively delete existing directory if path - collides with file or directory to extract) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE -unset RECURSIVE_UNLINK - -while getopts :cf:-:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - -) - case $OPTARG in - recursive-unlink) - RECURSIVE_UNLINK="yes" - ;; - *) - echo -e "Error: invalid option -$OPTARG" - echo - usage 1 - ;; - esac - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -L "$path" ]; then - rm "$path" - elif [ -d "$path" ]; then - if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then - rm -r "$path" - else - # Safe because symlinks to directories are dealt with above - rmdir "$path" - fi - elif [ -e "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go deleted file mode 100644 index cb138914..00000000 --- a/vendor/github.com/prometheus/procfs/vm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -// Each setting is exposed as a single file. -// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string -type VM struct { - AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes - BlockDump *int64 // /proc/sys/vm/block_dump - CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed - DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes - DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio - DirtyBytes *int64 // /proc/sys/vm/dirty_bytes - DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs - DirtyRatio *int64 // /proc/sys/vm/dirty_ratio - DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds - DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs - DropCaches *int64 // /proc/sys/vm/drop_caches - ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold - HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group - LaptopMode *int64 // /proc/sys/vm/laptop_mode - LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout - LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio - MaxMapCount *int64 // /proc/sys/vm/max_map_count - MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill - MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery - MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes - MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio - MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio - MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr - NrHugepages *int64 // /proc/sys/vm/nr_hugepages - NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy - NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages - NumaStat *int64 // /proc/sys/vm/numa_stat - NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order - OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks - OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task - OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes - OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory - OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio - PageCluster *int64 // /proc/sys/vm/page-cluster - PanicOnOom *int64 // /proc/sys/vm/panic_on_oom - PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction - StatInterval *int64 // /proc/sys/vm/stat_interval - Swappiness *int64 // /proc/sys/vm/swappiness - UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes - VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure - WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor - WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor - ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode -} - -// VM reads the VM statistics from the specified `proc` filesystem. -func (fs FS) VM() (*VM, error) { - path := fs.proc.Path("sys/vm") - file, err := os.Stat(path) - if err != nil { - return nil, err - } - if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) - } - - files, err := ioutil.ReadDir(path) - if err != nil { - return nil, err - } - - var vm VM - for _, f := range files { - if f.IsDir() { - continue - } - - name := filepath.Join(path, f.Name()) - // ignore errors on read, as there are some write only - // in /proc/sys/vm - value, err := util.SysReadFile(name) - if err != nil { - continue - } - vp := util.NewValueParser(value) - - switch f.Name() { - case "admin_reserve_kbytes": - vm.AdminReserveKbytes = vp.PInt64() - case "block_dump": - vm.BlockDump = vp.PInt64() - case "compact_unevictable_allowed": - vm.CompactUnevictableAllowed = vp.PInt64() - case "dirty_background_bytes": - vm.DirtyBackgroundBytes = vp.PInt64() - case "dirty_background_ratio": - vm.DirtyBackgroundRatio = vp.PInt64() - case "dirty_bytes": - vm.DirtyBytes = vp.PInt64() - case "dirty_expire_centisecs": - vm.DirtyExpireCentisecs = vp.PInt64() - case "dirty_ratio": - vm.DirtyRatio = vp.PInt64() - case "dirtytime_expire_seconds": - vm.DirtytimeExpireSeconds = vp.PInt64() - case "dirty_writeback_centisecs": - vm.DirtyWritebackCentisecs = vp.PInt64() - case "drop_caches": - vm.DropCaches = vp.PInt64() - case "extfrag_threshold": - vm.ExtfragThreshold = vp.PInt64() - case "hugetlb_shm_group": - vm.HugetlbShmGroup = vp.PInt64() - case "laptop_mode": - vm.LaptopMode = vp.PInt64() - case "legacy_va_layout": - vm.LegacyVaLayout = vp.PInt64() - case "lowmem_reserve_ratio": - stringSlice := strings.Fields(value) - pint64Slice := make([]*int64, 0, len(stringSlice)) - for _, value := range stringSlice { - vp := util.NewValueParser(value) - pint64Slice = append(pint64Slice, vp.PInt64()) - } - vm.LowmemReserveRatio = pint64Slice - case "max_map_count": - vm.MaxMapCount = vp.PInt64() - case "memory_failure_early_kill": - vm.MemoryFailureEarlyKill = vp.PInt64() - case "memory_failure_recovery": - vm.MemoryFailureRecovery = vp.PInt64() - case "min_free_kbytes": - vm.MinFreeKbytes = vp.PInt64() - case "min_slab_ratio": - vm.MinSlabRatio = vp.PInt64() - case "min_unmapped_ratio": - vm.MinUnmappedRatio = vp.PInt64() - case "mmap_min_addr": - vm.MmapMinAddr = vp.PInt64() - case "nr_hugepages": - vm.NrHugepages = vp.PInt64() - case "nr_hugepages_mempolicy": - vm.NrHugepagesMempolicy = vp.PInt64() - case "nr_overcommit_hugepages": - vm.NrOvercommitHugepages = vp.PInt64() - case "numa_stat": - vm.NumaStat = vp.PInt64() - case "numa_zonelist_order": - vm.NumaZonelistOrder = value - case "oom_dump_tasks": - vm.OomDumpTasks = vp.PInt64() - case "oom_kill_allocating_task": - vm.OomKillAllocatingTask = vp.PInt64() - case "overcommit_kbytes": - vm.OvercommitKbytes = vp.PInt64() - case "overcommit_memory": - vm.OvercommitMemory = vp.PInt64() - case "overcommit_ratio": - vm.OvercommitRatio = vp.PInt64() - case "page-cluster": - vm.PageCluster = vp.PInt64() - case "panic_on_oom": - vm.PanicOnOom = vp.PInt64() - case "percpu_pagelist_fraction": - vm.PercpuPagelistFraction = vp.PInt64() - case "stat_interval": - vm.StatInterval = vp.PInt64() - case "swappiness": - vm.Swappiness = vp.PInt64() - case "user_reserve_kbytes": - vm.UserReserveKbytes = vp.PInt64() - case "vfs_cache_pressure": - vm.VfsCachePressure = vp.PInt64() - case "watermark_boost_factor": - vm.WatermarkBoostFactor = vp.PInt64() - case "watermark_scale_factor": - vm.WatermarkScaleFactor = vp.PInt64() - case "zone_reclaim_mode": - vm.ZoneReclaimMode = vp.PInt64() - } - if err := vp.Err(); err != nil { - return nil, err - } - } - - return &vm, nil -} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go deleted file mode 100644 index eed07c7d..00000000 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int - XfrmOutStateInvalid int - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.proc.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go deleted file mode 100644 index 0b9bb679..00000000 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "regexp" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Zoneinfo holds info parsed from /proc/zoneinfo. -type Zoneinfo struct { - Node string - Zone string - NrFreePages *int64 - Min *int64 - Low *int64 - High *int64 - Scanned *int64 - Spanned *int64 - Present *int64 - Managed *int64 - NrActiveAnon *int64 - NrInactiveAnon *int64 - NrIsolatedAnon *int64 - NrAnonPages *int64 - NrAnonTransparentHugepages *int64 - NrActiveFile *int64 - NrInactiveFile *int64 - NrIsolatedFile *int64 - NrFilePages *int64 - NrSlabReclaimable *int64 - NrSlabUnreclaimable *int64 - NrMlockStack *int64 - NrKernelStack *int64 - NrMapped *int64 - NrDirty *int64 - NrWriteback *int64 - NrUnevictable *int64 - NrShmem *int64 - NrDirtied *int64 - NrWritten *int64 - NumaHit *int64 - NumaMiss *int64 - NumaForeign *int64 - NumaInterleave *int64 - NumaLocal *int64 - NumaOther *int64 - Protection []*int64 -} - -var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) - -// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of -// structs containing the relevant info. More information available here: -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) - if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) - } - zoneinfo, err := parseZoneinfo(data) - if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) - } - return zoneinfo, nil -} - -func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { - - zoneinfo := []Zoneinfo{} - - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { - var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { - - if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { - zoneinfoElement.Node = nodeZone[1] - zoneinfoElement.Zone = nodeZone[2] - continue - } - if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { - zoneinfoElement.Zone = "" - continue - } - parts := strings.Fields(strings.TrimSpace(line)) - if len(parts) < 2 { - continue - } - vp := util.NewValueParser(parts[1]) - switch parts[0] { - case "nr_free_pages": - zoneinfoElement.NrFreePages = vp.PInt64() - case "min": - zoneinfoElement.Min = vp.PInt64() - case "low": - zoneinfoElement.Low = vp.PInt64() - case "high": - zoneinfoElement.High = vp.PInt64() - case "scanned": - zoneinfoElement.Scanned = vp.PInt64() - case "spanned": - zoneinfoElement.Spanned = vp.PInt64() - case "present": - zoneinfoElement.Present = vp.PInt64() - case "managed": - zoneinfoElement.Managed = vp.PInt64() - case "nr_active_anon": - zoneinfoElement.NrActiveAnon = vp.PInt64() - case "nr_inactive_anon": - zoneinfoElement.NrInactiveAnon = vp.PInt64() - case "nr_isolated_anon": - zoneinfoElement.NrIsolatedAnon = vp.PInt64() - case "nr_anon_pages": - zoneinfoElement.NrAnonPages = vp.PInt64() - case "nr_anon_transparent_hugepages": - zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() - case "nr_active_file": - zoneinfoElement.NrActiveFile = vp.PInt64() - case "nr_inactive_file": - zoneinfoElement.NrInactiveFile = vp.PInt64() - case "nr_isolated_file": - zoneinfoElement.NrIsolatedFile = vp.PInt64() - case "nr_file_pages": - zoneinfoElement.NrFilePages = vp.PInt64() - case "nr_slab_reclaimable": - zoneinfoElement.NrSlabReclaimable = vp.PInt64() - case "nr_slab_unreclaimable": - zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() - case "nr_mlock_stack": - zoneinfoElement.NrMlockStack = vp.PInt64() - case "nr_kernel_stack": - zoneinfoElement.NrKernelStack = vp.PInt64() - case "nr_mapped": - zoneinfoElement.NrMapped = vp.PInt64() - case "nr_dirty": - zoneinfoElement.NrDirty = vp.PInt64() - case "nr_writeback": - zoneinfoElement.NrWriteback = vp.PInt64() - case "nr_unevictable": - zoneinfoElement.NrUnevictable = vp.PInt64() - case "nr_shmem": - zoneinfoElement.NrShmem = vp.PInt64() - case "nr_dirtied": - zoneinfoElement.NrDirtied = vp.PInt64() - case "nr_written": - zoneinfoElement.NrWritten = vp.PInt64() - case "numa_hit": - zoneinfoElement.NumaHit = vp.PInt64() - case "numa_miss": - zoneinfoElement.NumaMiss = vp.PInt64() - case "numa_foreign": - zoneinfoElement.NumaForeign = vp.PInt64() - case "numa_interleave": - zoneinfoElement.NumaInterleave = vp.PInt64() - case "numa_local": - zoneinfoElement.NumaLocal = vp.PInt64() - case "numa_other": - zoneinfoElement.NumaOther = vp.PInt64() - case "protection:": - protectionParts := strings.Split(line, ":") - protectionValues := strings.Replace(protectionParts[1], "(", "", 1) - protectionValues = strings.Replace(protectionValues, ")", "", 1) - protectionValues = strings.TrimSpace(protectionValues) - protectionStringMap := strings.Split(protectionValues, ", ") - val, err := util.ParsePInt64s(protectionStringMap) - if err == nil { - zoneinfoElement.Protection = val - } - } - - } - - zoneinfo = append(zoneinfo, zoneinfoElement) - } - return zoneinfo, nil -} diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml deleted file mode 100644 index c73bb33b..00000000 --- a/vendor/github.com/rs/xid/.appveyor.yml +++ /dev/null @@ -1,27 +0,0 @@ -version: 1.0.0.{build} - -platform: x64 - -branches: - only: - - master - -clone_folder: c:\gopath\src\github.com\rs\xid - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - - go get -t . - -build_script: - - go build - -test_script: - - go test - diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml deleted file mode 100644 index b37da159..00000000 --- a/vendor/github.com/rs/xid/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: -- "1.9" -- "1.10" -- "master" -matrix: - allow_failures: - - go: "master" diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE deleted file mode 100644 index 47c5e9d2..00000000 --- a/vendor/github.com/rs/xid/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md deleted file mode 100644 index 1f886fd7..00000000 --- a/vendor/github.com/rs/xid/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Globally Unique ID Generator - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) - -Package xid is a globally unique id generator library, ready to be used safely directly in your server code. - -Xid is using Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: -https://docs.mongodb.org/manual/reference/object-id/ - -- 4-byte value representing the seconds since the Unix epoch, -- 3-byte machine identifier, -- 2-byte process id, and -- 3-byte counter, starting with a random value. - -The binary representation of the id is compatible with Mongo 12 bytes Object IDs. -The string representation is using base32 hex (w/o padding) for better space efficiency -when stored in that form (20 bytes). The hex variant of base32 is used to retain the -sortable property of the id. - -Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an -issue when transported as a string between various systems. Base36 wasn't retained either -because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) -and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, -all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). - -UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake -ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central -generator servers. xid stands in between with 12 bytes (96 bits) and a more compact -URL-safe string representation (20 chars). No configuration or central generator server -is required so it can be used directly in server's code. - -| Name | Binary Size | String Size | Features -|-------------|-------------|----------------|---------------- -| [UUID] | 16 bytes | 36 chars | configuration free, not sortable -| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable -| [Snowflake] | 8 bytes | up to 20 chars | needs machin/DC configuration, needs central server, sortable -| [MongoID] | 12 bytes | 24 chars | configuration free, sortable -| xid | 12 bytes | 20 chars | configuration free, sortable - -[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier -[shortuuid]: https://github.com/stochastic-technologies/shortuuid -[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake -[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ - -Features: - -- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake -- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) -- Non configured, you don't need set a unique machine and/or data center id -- K-ordered -- Embedded time with 1 second precision -- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process -- Lock-free (i.e.: unlike UUIDv1 and v2) - -Best used with [zerolog](https://github.com/rs/zerolog)'s -[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). - -Notes: - -- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most of the other UUID like implementations are also not cryptographically secure. You shoud use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. - -References: - -- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems -- https://en.wikipedia.org/wiki/Universally_unique_identifier -- https://blog.twitter.com/2010/announcing-snowflake -- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid -- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride - -## Install - - go get github.com/rs/xid - -## Usage - -```go -guid := xid.New() - -println(guid.String()) -// Output: 9m4e2mr0ui3e8a215n4g -``` - -Get `xid` embedded info: - -```go -guid.Machine() -guid.Pid() -guid.Time() -guid.Counter() -``` - -## Benchmark - -Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). - -``` -BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op -BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op -BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op -BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op -BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op -BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op -BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op -BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op -BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op -``` - -Note: UUIDv1 requires a global lock, hence the performence degrading as we add more CPUs. - -## Licenses - -All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go deleted file mode 100644 index 08351ff7..00000000 --- a/vendor/github.com/rs/xid/hostid_darwin.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin - -package xid - -import "syscall" - -func readPlatformMachineID() (string, error) { - return syscall.Sysctl("kern.uuid") -} diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go deleted file mode 100644 index 7fbd3c00..00000000 --- a/vendor/github.com/rs/xid/hostid_fallback.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !darwin,!linux,!freebsd,!windows - -package xid - -import "errors" - -func readPlatformMachineID() (string, error) { - return "", errors.New("not implemented") -} diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go deleted file mode 100644 index be25a039..00000000 --- a/vendor/github.com/rs/xid/hostid_freebsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build freebsd - -package xid - -import "syscall" - -func readPlatformMachineID() (string, error) { - return syscall.Sysctl("kern.hostuuid") -} diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go deleted file mode 100644 index 7d0c4a9e..00000000 --- a/vendor/github.com/rs/xid/hostid_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux - -package xid - -import "io/ioutil" - -func readPlatformMachineID() (string, error) { - b, err := ioutil.ReadFile("/sys/class/dmi/id/product_uuid") - return string(b), err -} diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go deleted file mode 100644 index ec2593ee..00000000 --- a/vendor/github.com/rs/xid/hostid_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build windows - -package xid - -import ( - "fmt" - "syscall" - "unsafe" -) - -func readPlatformMachineID() (string, error) { - // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go - var h syscall.Handle - err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) - if err != nil { - return "", err - } - defer syscall.RegCloseKey(h) - - const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 - const uuidLen = 36 - - var regBuf [syscallRegBufLen]uint16 - bufLen := uint32(syscallRegBufLen) - var valType uint32 - err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) - if err != nil { - return "", err - } - - hostID := syscall.UTF16ToString(regBuf[:]) - hostIDLen := len(hostID) - if hostIDLen != uuidLen { - return "", fmt.Errorf("HostID incorrect: %q\n", hostID) - } - - return hostID, nil -} diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go deleted file mode 100644 index 466faf26..00000000 --- a/vendor/github.com/rs/xid/id.go +++ /dev/null @@ -1,365 +0,0 @@ -// Package xid is a globally unique id generator suited for web scale -// -// Xid is using Mongo Object ID algorithm to generate globally unique ids: -// https://docs.mongodb.org/manual/reference/object-id/ -// -// - 4-byte value representing the seconds since the Unix epoch, -// - 3-byte machine identifier, -// - 2-byte process id, and -// - 3-byte counter, starting with a random value. -// -// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. -// The string representation is using base32 hex (w/o padding) for better space efficiency -// when stored in that form (20 bytes). The hex variant of base32 is used to retain the -// sortable property of the id. -// -// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an -// issue when transported as a string between various systems. Base36 wasn't retained either -// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) -// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, -// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). -// -// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between -// with 12 bytes with a more compact string representation ready for the web and no -// required configuration or central generation server. -// -// Features: -// -// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake -// - Base32 hex encoded by default (16 bytes storage when transported as printable string) -// - Non configured, you don't need set a unique machine and/or data center id -// - K-ordered -// - Embedded time with 1 second precision -// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process -// -// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). -// -// References: -// -// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems -// - https://en.wikipedia.org/wiki/Universally_unique_identifier -// - https://blog.twitter.com/2010/announcing-snowflake -package xid - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io/ioutil" - "os" - "sort" - "sync/atomic" - "time" -) - -// Code inspired from mgo/bson ObjectId - -// ID represents a unique request id -type ID [rawLen]byte - -const ( - encodedLen = 20 // string encoded len - rawLen = 12 // binary raw len - - // encoding stores a custom version of the base32 encoding with lower case - // letters. - encoding = "0123456789abcdefghijklmnopqrstuv" -) - -var ( - // ErrInvalidID is returned when trying to unmarshal an invalid ID - ErrInvalidID = errors.New("xid: invalid ID") - - // objectIDCounter is atomically incremented when generating a new ObjectId - // using NewObjectId() function. It's used as a counter part of an id. - // This id is initialized with a random value. - objectIDCounter = randInt() - - // machineId stores machine id generated once and used in subsequent calls - // to NewObjectId function. - machineID = readMachineID() - - // pid stores the current process id - pid = os.Getpid() - - nilID ID - - // dec is the decoding map for base32 encoding - dec [256]byte -) - -func init() { - for i := 0; i < len(dec); i++ { - dec[i] = 0xFF - } - for i := 0; i < len(encoding); i++ { - dec[encoding[i]] = byte(i) - } - - // If /proc/self/cpuset exists and is not /, we can assume that we are in a - // form of container and use the content of cpuset xor-ed with the PID in - // order get a reasonable machine global unique PID. - b, err := ioutil.ReadFile("/proc/self/cpuset") - if err == nil && len(b) > 1 { - pid ^= int(crc32.ChecksumIEEE(b)) - } -} - -// readMachineId generates machine id and puts it into the machineId global -// variable. If this function fails to get the hostname, it will cause -// a runtime error. -func readMachineID() []byte { - id := make([]byte, 3) - hid, err := readPlatformMachineID() - if err != nil || len(hid) == 0 { - hid, err = os.Hostname() - } - if err == nil && len(hid) != 0 { - hw := md5.New() - hw.Write([]byte(hid)) - copy(id, hw.Sum(nil)) - } else { - // Fallback to rand number if machine id can't be gathered - if _, randErr := rand.Reader.Read(id); randErr != nil { - panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) - } - } - return id -} - -// randInt generates a random uint32 -func randInt() uint32 { - b := make([]byte, 3) - if _, err := rand.Reader.Read(b); err != nil { - panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) - } - return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) -} - -// New generates a globally unique ID -func New() ID { - return NewWithTime(time.Now()) -} - -// NewWithTime generates a globally unique ID with the passed in time -func NewWithTime(t time.Time) ID { - var id ID - // Timestamp, 4 bytes, big endian - binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) - // Machine, first 3 bytes of md5(hostname) - id[4] = machineID[0] - id[5] = machineID[1] - id[6] = machineID[2] - // Pid, 2 bytes, specs don't specify endianness, but we use big endian. - id[7] = byte(pid >> 8) - id[8] = byte(pid) - // Increment, 3 bytes, big endian - i := atomic.AddUint32(&objectIDCounter, 1) - id[9] = byte(i >> 16) - id[10] = byte(i >> 8) - id[11] = byte(i) - return id -} - -// FromString reads an ID from its string representation -func FromString(id string) (ID, error) { - i := &ID{} - err := i.UnmarshalText([]byte(id)) - return *i, err -} - -// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). -func (id ID) String() string { - text := make([]byte, encodedLen) - encode(text, id[:]) - return string(text) -} - -// MarshalText implements encoding/text TextMarshaler interface -func (id ID) MarshalText() ([]byte, error) { - text := make([]byte, encodedLen) - encode(text, id[:]) - return text, nil -} - -// MarshalJSON implements encoding/json Marshaler interface -func (id ID) MarshalJSON() ([]byte, error) { - if id.IsNil() { - return []byte("null"), nil - } - text, err := id.MarshalText() - return []byte(`"` + string(text) + `"`), err -} - -// encode by unrolling the stdlib base32 algorithm + removing all safe checks -func encode(dst, id []byte) { - dst[0] = encoding[id[0]>>3] - dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] - dst[2] = encoding[(id[1]>>1)&0x1F] - dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] - dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] - dst[5] = encoding[(id[3]>>2)&0x1F] - dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] - dst[7] = encoding[id[4]&0x1F] - dst[8] = encoding[id[5]>>3] - dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] - dst[10] = encoding[(id[6]>>1)&0x1F] - dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] - dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] - dst[13] = encoding[(id[8]>>2)&0x1F] - dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] - dst[15] = encoding[id[9]&0x1F] - dst[16] = encoding[id[10]>>3] - dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] - dst[18] = encoding[(id[11]>>1)&0x1F] - dst[19] = encoding[(id[11]<<4)&0x1F] -} - -// UnmarshalText implements encoding/text TextUnmarshaler interface -func (id *ID) UnmarshalText(text []byte) error { - if len(text) != encodedLen { - return ErrInvalidID - } - for _, c := range text { - if dec[c] == 0xFF { - return ErrInvalidID - } - } - decode(id, text) - return nil -} - -// UnmarshalJSON implements encoding/json Unmarshaler interface -func (id *ID) UnmarshalJSON(b []byte) error { - s := string(b) - if s == "null" { - *id = nilID - return nil - } - return id.UnmarshalText(b[1 : len(b)-1]) -} - -// decode by unrolling the stdlib base32 algorithm + removing all safe checks -func decode(id *ID, src []byte) { - id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 - id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 - id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 - id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 - id[4] = dec[src[6]]<<5 | dec[src[7]] - id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 - id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 - id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 - id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 - id[9] = dec[src[14]]<<5 | dec[src[15]] - id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 - id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 -} - -// Time returns the timestamp part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Time() time.Time { - // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. - secs := int64(binary.BigEndian.Uint32(id[0:4])) - return time.Unix(secs, 0) -} - -// Machine returns the 3-byte machine id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Machine() []byte { - return id[4:7] -} - -// Pid returns the process id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Pid() uint16 { - return binary.BigEndian.Uint16(id[7:9]) -} - -// Counter returns the incrementing value part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Counter() int32 { - b := id[9:12] - // Counter is stored as big-endian 3-byte value - return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) -} - -// Value implements the driver.Valuer interface. -func (id ID) Value() (driver.Value, error) { - if id.IsNil() { - return nil, nil - } - b, err := id.MarshalText() - return string(b), err -} - -// Scan implements the sql.Scanner interface. -func (id *ID) Scan(value interface{}) (err error) { - switch val := value.(type) { - case string: - return id.UnmarshalText([]byte(val)) - case []byte: - return id.UnmarshalText(val) - case nil: - *id = nilID - return nil - default: - return fmt.Errorf("xid: scanning unsupported type: %T", value) - } -} - -// IsNil Returns true if this is a "nil" ID -func (id ID) IsNil() bool { - return id == nilID -} - -// NilID returns a zero value for `xid.ID`. -func NilID() ID { - return nilID -} - -// Bytes returns the byte array representation of `ID` -func (id ID) Bytes() []byte { - return id[:] -} - -// FromBytes convert the byte array representation of `ID` back to `ID` -func FromBytes(b []byte) (ID, error) { - var id ID - if len(b) != rawLen { - return id, ErrInvalidID - } - copy(id[:], b) - return id, nil -} - -// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. -// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, -// and 1 if current id is greater than the other. -func (id ID) Compare(other ID) int { - return bytes.Compare(id[:], other[:]) -} - -type sorter []ID - -func (s sorter) Len() int { - return len(s) -} - -func (s sorter) Less(i, j int) bool { - return s[i].Compare(s[j]) < 0 -} - -func (s sorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Sort sorts an array of IDs inplace. -// It works by wrapping `[]ID` and use `sort.Sort`. -func Sort(ids []ID) { - sort.Sort(sorter(ids)) -} diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore deleted file mode 100644 index 75623dcc..00000000 --- a/vendor/github.com/russross/blackfriday/v2/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.out -*.swp -*.8 -*.6 -_obj -_test* -markdown -tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml deleted file mode 100644 index b0b525a5..00000000 --- a/vendor/github.com/russross/blackfriday/v2/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: go -go: - - "1.10.x" - - "1.11.x" - - tip -matrix: - fast_finish: true - allow_failures: - - go: tip -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt deleted file mode 100644 index 2885af36..00000000 --- a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md deleted file mode 100644 index d5a8649b..00000000 --- a/vendor/github.com/russross/blackfriday/v2/README.md +++ /dev/null @@ -1,291 +0,0 @@ -Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with any modern Go release. With Go 1.7 and git -installed: - - go get gopkg.in/russross/blackfriday.v2 - -will download, compile, and install the package into your `$GOPATH` -directory hierarchy. Alternatively, you can achieve the same if you -import it into a project: - - import "gopkg.in/russross/blackfriday.v2" - -and `go get` without parameters. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the -documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. - -It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -Usage ------ - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "github.com/russross/blackfriday" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ```go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled newlines in the input - translate into line breaks in the output. This extension is off by default. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX): - renders output as LaTeX. - -* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. - - -Todo ----- - -* More unit testing -* Improve unicode support. It does not understand all unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all utf-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go deleted file mode 100644 index b8607474..00000000 --- a/vendor/github.com/russross/blackfriday/v2/block.go +++ /dev/null @@ -1,1590 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse block-level elements. -// - -package blackfriday - -import ( - "bytes" - "html" - "regexp" - "strings" - - "github.com/shurcooL/sanitized_anchor_name" -) - -const ( - charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" - escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" -) - -var ( - reBackslashOrAmp = regexp.MustCompile("[\\&]") - reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) -) - -// Parse block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *Markdown) block(data []byte) { - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // prefixed heading: - // - // # Heading 1 - // ## Heading 2 - // ... - // ###### Heading 6 - if p.isPrefixHeading(data) { - data = data[p.prefixHeading(data):] - continue - } - - // block of preformatted HTML: - // - //
- // ... - //
- if data[0] == '<' { - if i := p.html(data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.extensions&Titleblock != 0 { - if data[0] == '%' { - if i := p.titleBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(data):] - continue - } - - // fenced code block: - // - // ``` go - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.addBlock(HorizontalRule, nil) - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.extensions&Tables != 0 { - if i := p.table(data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(data, ListTypeOrdered):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(data, ListTypeDefinition):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headings, too - data = data[p.paragraph(data):] - } - - p.nesting-- -} - -func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { - p.closeUnmatchedBlocks() - container := p.addChild(typ, 0) - container.content = content - return container -} - -func (p *Markdown) isPrefixHeading(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.extensions&SpaceHeadings != 0 { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - if level == len(data) || data[level] != ' ' { - return false - } - } - return true -} - -func (p *Markdown) prefixHeading(data []byte) int { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[i:end])) - } - block := p.addBlock(Heading, data[i:end]) - block.HeadingID = id - block.Level = level - } - return skip -} - -func (p *Markdown) isUnderlinedHeading(data []byte) int { - // test of level 1 heading - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 1 - } - return 0 - } - - // test of level 2 heading - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 2 - } - return 0 - } - - return 0 -} - -func (p *Markdown) titleBlock(data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - consumed := len(data) - data = bytes.TrimPrefix(data, []byte("% ")) - data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) - block := p.addBlock(Heading, data) - block.Level = 1 - block.IsTitleblock = true - - return consumed -} - -func (p *Markdown) html(data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(data, doRender); size > 0 { - return size - } - - // check for an
tag - if size := p.htmlHr(data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - - return i -} - -func finalizeHTMLBlock(block *Node) { - block.Literal = block.content - block.content = nil -} - -// HTML comment, lax form -func (p *Markdown) htmlComment(data []byte, doRender bool) int { - i := p.inlineHTMLComment(data) - // needs to end with a blank line - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - block := p.addBlock(HTMLBlock, data[:end]) - finalizeHTMLBlock(block) - } - return size - } - return 0 -} - -// HR, which is the only self-closing block tag considered -func (p *Markdown) htmlHr(data []byte, doRender bool) int { - if len(data) < 4 { - return 0 - } - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
tag after all; at least not a valid one - return 0 - } - i := 3 - for i < len(data) && data[i] != '>' && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '>' { - i++ - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - return size - } - } - return 0 -} - -func (p *Markdown) htmlFindTag(data []byte) (string, bool) { - i := 0 - for i < len(data) && isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *Markdown) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - if tag == "hr" { - return 2 - } - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.extensions&LaxHTMLBlocks != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*Markdown) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - if i < len(data) && data[i] == '\n' { - i++ - } - return i -} - -func (*Markdown) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for i < len(data) && data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If info is not nil, it gets set to the syntax specified in the fence line. -func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the info string, and discard it if the caller doesn't care. - if info != nil { - infoLength := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if i == len(data) { - return i, marker - } - return 0, "" - } - - infoStart := i - - if data[i] == '{' { - i++ - infoStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - infoLength++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for infoLength > 0 && isspace(data[infoStart]) { - infoStart++ - infoLength-- - } - - for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { - infoLength-- - } - i++ - i = skipChar(data, i, ' ') - } else { - for i < len(data) && !isverticalspace(data[i]) { - infoLength++ - i++ - } - } - - *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) - } - - if i == len(data) { - return i, marker - } - if i > len(data) || data[i] != '\n' { - return 0, "" - } - return i + 1, marker // Take newline into account. -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { - var info string - beg, marker := isFenceLine(data, &info, "") - if beg == 0 || beg >= len(data) { - return 0 - } - - var work bytes.Buffer - work.Write([]byte(info)) - work.WriteByte('\n') - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - fenceEnd, _ := isFenceLine(data[beg:], nil, marker) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = true - finalizeCodeBlock(block) - } - - return beg -} - -func unescapeChar(str []byte) []byte { - if str[0] == '\\' { - return []byte{str[1]} - } - return []byte(html.UnescapeString(string(str))) -} - -func unescapeString(str []byte) []byte { - if reBackslashOrAmp.Match(str) { - return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) - } - return str -} - -func finalizeCodeBlock(block *Node) { - if block.IsFenced { - newlinePos := bytes.IndexByte(block.content, '\n') - firstLine := block.content[:newlinePos] - rest := block.content[newlinePos+1:] - block.Info = unescapeString(bytes.Trim(firstLine, "\n")) - block.Literal = rest - } else { - block.Literal = block.content - } - block.content = nil -} - -func (p *Markdown) table(data []byte) int { - table := p.addBlock(Table, nil) - i, columns := p.tableHeader(data) - if i == 0 { - p.tip = table.Parent - table.Unlink() - return 0 - } - - p.addBlock(TableBody, nil) - - for i < len(data) { - pipes, rowStart := 0, i - for ; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - if i < len(data) && data[i] == '\n' { - i++ - } - p.tableRow(data[rowStart:i], columns, false) - } - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { - i := 0 - colCount := 1 - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - j := i - if j < len(data) && data[j] == '\n' { - j++ - } - header := data[:j] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]CellAlignFlags, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for i < len(data) && data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TableAlignmentLeft - dashes++ - } - for i < len(data) && data[i] == '-' { - i++ - dashes++ - } - if i < len(data) && data[i] == ':' { - i++ - columns[col] |= TableAlignmentRight - dashes++ - } - for i < len(data) && data[i] == ' ' { - i++ - } - if i == len(data) { - return - } - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for i < len(data) && data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && i < len(data) && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.addBlock(TableHead, nil) - p.tableRow(header, columns, true) - size = i - if size < len(data) && data[size] == '\n' { - size++ - } - return -} - -func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { - p.addBlock(TableRow, nil) - i, col := 0, 0 - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for i < len(data) && data[i] == ' ' { - i++ - } - - cellStart := i - - for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { - cellEnd-- - } - - cell := p.addBlock(TableCell, data[cellStart:cellEnd]) - cell.IsHeader = header - cell.Align = columns[col] - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - cell := p.addBlock(TableCell, nil) - cell.IsHeader = header - cell.Align = columns[col] - } - - // silently ignore rows with too many cells -} - -// returns blockquote prefix length -func (p *Markdown) quotePrefix(data []byte) int { - i := 0 - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - if i < len(data) && data[i] == '>' { - if i+1 < len(data) && data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *Markdown) quote(data []byte) int { - block := p.addBlock(BlockQuote, nil) - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - if end < len(data) && data[end] == '\n' { - end++ - } - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - p.block(raw.Bytes()) - p.finalize(block) - return end -} - -// returns prefix length for block code -func (p *Markdown) codePrefix(data []byte) int { - if len(data) >= 1 && data[0] == '\t' { - return 1 - } - if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *Markdown) code(data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for i < len(data) && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '\n' { - i++ - } - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffer - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = false - finalizeCodeBlock(block) - - return i -} - -// returns unordered list item prefix -func (p *Markdown) uliPrefix(data []byte) int { - i := 0 - // start with up to 3 spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - if i >= len(data)-1 { - return 0 - } - // need one of {'*', '+', '-'} followed by a space or a tab - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - (data[i+1] != ' ' && data[i+1] != '\t') { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *Markdown) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for i < len(data) && data[i] >= '0' && data[i] <= '9' { - i++ - } - if start == i || i >= len(data)-1 { - return 0 - } - - // we need >= 1 digits followed by a dot and a space or a tab - if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *Markdown) dliPrefix(data []byte) int { - if len(data) < 2 { - return 0 - } - i := 0 - // need a ':' followed by a space or a tab - if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - for i < len(data) && data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *Markdown) list(data []byte, flags ListType) int { - i := 0 - flags |= ListItemBeginningOfList - block := p.addBlock(List, nil) - block.ListFlags = flags - block.Tight = true - - for i < len(data) { - skip := p.listItem(data[i:], &flags) - if flags&ListItemContainsBlock != 0 { - block.ListData.Tight = false - } - i += skip - if skip == 0 || flags&ListItemEndOfList != 0 { - break - } - flags &= ^ListItemBeginningOfList - } - - above := block.Parent - finalizeList(block) - p.tip = above - return i -} - -// Returns true if the list item is not the same type as its parent list -func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { - if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { - return true - } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { - return true - } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { - return true - } - return false -} - -// Returns true if block ends with a blank line, descending if needed -// into lists and sublists. -func endsWithBlankLine(block *Node) bool { - // TODO: figure this out. Always false now. - for block != nil { - //if block.lastLineBlank { - //return true - //} - t := block.Type - if t == List || t == Item { - block = block.LastChild - } else { - break - } - } - return false -} - -func finalizeList(block *Node) { - block.open = false - item := block.FirstChild - for item != nil { - // check for non-final list item ending with blank line: - if endsWithBlankLine(item) && item.Next != nil { - block.ListData.Tight = false - break - } - // recurse into children of list item, to see if there are spaces - // between any of them: - subItem := item.FirstChild - for subItem != nil { - if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { - block.ListData.Tight = false - break - } - subItem = subItem.Next - } - item = item.Next - } -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *Markdown) listItem(data []byte, flags *ListType) int { - // keep track of the indentation of the first line - itemIndent := 0 - if data[0] == '\t' { - itemIndent += 4 - } else { - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - } - - var bulletChar byte = '*' - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } else { - bulletChar = data[i-2] - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^ListTypeTerm - } - } - if i == 0 { - // if in definition list, set term flag and continue - if *flags&ListTypeDefinition != 0 { - *flags |= ListTypeTerm - } else { - return 0 - } - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - line = i - continue - } - - // calculate the indentation - indent := 0 - indentIndex := 0 - if data[line] == '\t' { - indentIndex++ - indent += 4 - } else { - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - indentIndex++ - } - } - - chunk := data[line+indentIndex : i] - - if p.extensions&FencedCode != 0 { - // determine if in or out of codeblock - // if in codeblock, ignore normal list processing - _, marker := isFenceLine(chunk, nil, codeBlockMarker) - if marker != "" { - if codeBlockMarker == "" { - // start of codeblock - codeBlockMarker = marker - } else { - // end of codeblock. - codeBlockMarker = "" - } - } - // we are in a codeblock, write line, and continue - if codeBlockMarker != "" || marker != "" { - raw.Write(data[line+indentIndex : i]) - line = i - continue gatherlines - } - } - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - // to be a nested list, it must be indented more - // if not, it is either a different kind of list - // or the next item in the same list - if indent <= itemIndent { - if p.listTypeChanged(chunk, flags) { - *flags |= ListItemEndOfList - } else if containsBlankLine { - *flags |= ListItemContainsBlock - } - - break gatherlines - } - - if containsBlankLine { - *flags |= ListItemContainsBlock - } - - // is this the first item in the nested list? - if sublist == 0 { - sublist = raw.Len() - } - - // is this a nested prefix heading? - case p.isPrefixHeading(chunk): - // if the heading is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= ListItemEndOfList - break gatherlines - } - *flags |= ListItemContainsBlock - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&ListTypeDefinition != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for next < len(data) && data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= ListItemEndOfList - } - } else { - *flags |= ListItemEndOfList - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - raw.WriteByte('\n') - *flags |= ListItemContainsBlock - } - - // if this line was preceded by one or more blanks, - // re-introduce the blank into the buffer - if containsBlankLine { - containsBlankLine = false - raw.WriteByte('\n') - } - - // add the line into the working buffer without prefix - raw.Write(data[line+indentIndex : i]) - - line = i - } - - rawBytes := raw.Bytes() - - block := p.addBlock(Item, nil) - block.ListFlags = *flags - block.Tight = false - block.BulletChar = bulletChar - block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark - - // render the contents of the list item - if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(rawBytes[:sublist]) - p.block(rawBytes[sublist:]) - } else { - p.block(rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - child := p.addChild(Paragraph, 0) - child.content = rawBytes[:sublist] - p.block(rawBytes[sublist:]) - } else { - child := p.addChild(Paragraph, 0) - child.content = rawBytes - } - } - return line -} - -// render a single paragraph that has already been parsed out -func (p *Markdown) renderParagraph(data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - end := len(data) - // trim trailing newline - if data[len(data)-1] == '\n' { - end-- - } - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - p.addBlock(Paragraph, data[beg:end]) -} - -func (p *Markdown) paragraph(data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - tabSize := TabSizeDefault - if p.extensions&TabSizeEight != 0 { - tabSize = TabSizeDouble - } - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a reference or a footnote? If so, end a paragraph - // preceding it and report that we have consumed up to the end of that - // reference: - if refEnd := isReference(p, current, tabSize); refEnd > 0 { - p.renderParagraph(data[:i]) - return i + refEnd - } - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.extensions&DefinitionLists != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(data[prev:], ListTypeDefinition) - } - } - - p.renderParagraph(data[:i]) - return i + n - } - - // an underline under some text marks a heading, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeading(current); level > 0 { - // render the paragraph - p.renderParagraph(data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - id := "" - if p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[prev:eol])) - } - - block := p.addBlock(Heading, data[prev:eol]) - block.Level = level - block.HeadingID = id - - // find the end of the underline - for i < len(data) && data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.extensions&LaxHTMLBlocks != 0 { - if data[i] == '<' && p.html(current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a prefixed heading or a horizontal rule after this, paragraph is over - if p.isPrefixHeading(current) || p.isHRule(current) { - p.renderParagraph(data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.extensions&FencedCode != 0 { - if p.fencedCodeBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ListTypeDefinition) - return ret - } - } - - // if there's a list after this, paragraph is over - if p.extensions&NoEmptyLineBeforeBlock != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - nl := bytes.IndexByte(data[i:], '\n') - if nl >= 0 { - i += nl + 1 - } else { - i += len(data[i:]) - } - } - - p.renderParagraph(data[:i]) - return i -} - -func skipChar(data []byte, start int, char byte) int { - i := start - for i < len(data) && data[i] == char { - i++ - } - return i -} - -func skipUntilChar(text []byte, start int, char byte) int { - i := start - for i < len(text) && text[i] != char { - i++ - } - return i -} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go deleted file mode 100644 index 5b3fa987..00000000 --- a/vendor/github.com/russross/blackfriday/v2/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package blackfriday is a markdown processor. -// -// It translates plain text with simple formatting rules into an AST, which can -// then be further processed to HTML (provided by Blackfriday itself) or other -// formats (provided by the community). -// -// The simplest way to invoke Blackfriday is to call the Run function. It will -// take a text input and produce a text output in HTML (or other format). -// -// A slightly more sophisticated way to use Blackfriday is to create a Markdown -// processor and to call Parse, which returns a syntax tree for the input -// document. You can leverage Blackfriday's parsing for content extraction from -// markdown documents. You can assign a custom renderer and set various options -// to the Markdown processor. -// -// If you're interested in calling Blackfriday from command line, see -// https://github.com/russross/blackfriday-tool. -package blackfriday diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go deleted file mode 100644 index 6385f27c..00000000 --- a/vendor/github.com/russross/blackfriday/v2/esc.go +++ /dev/null @@ -1,34 +0,0 @@ -package blackfriday - -import ( - "html" - "io" -) - -var htmlEscaper = [256][]byte{ - '&': []byte("&"), - '<': []byte("<"), - '>': []byte(">"), - '"': []byte("""), -} - -func escapeHTML(w io.Writer, s []byte) { - var start, end int - for end < len(s) { - escSeq := htmlEscaper[s[end]] - if escSeq != nil { - w.Write(s[start:end]) - w.Write(escSeq) - start = end + 1 - } - end++ - } - if start < len(s) && end <= len(s) { - w.Write(s[start:end]) - } -} - -func escLink(w io.Writer, text []byte) { - unesc := html.UnescapeString(string(text)) - escapeHTML(w, []byte(unesc)) -} diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go deleted file mode 100644 index 284c8718..00000000 --- a/vendor/github.com/russross/blackfriday/v2/html.go +++ /dev/null @@ -1,949 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// HTMLFlags control optional behavior of HTML renderer. -type HTMLFlags int - -// HTML renderer configuration options. -const ( - HTMLFlagsNone HTMLFlags = 0 - SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks - SkipImages // Skip embedded images - SkipLinks // Skip all links - Safelink // Only link to trusted protocols - NofollowLinks // Only link with rel="nofollow" - NoreferrerLinks // Only link with rel="noreferrer" - NoopenerLinks // Only link with rel="noopener" - HrefTargetBlank // Add a blank target - CompletePage // Generate a complete HTML page - UseXHTML // Generate XHTML output instead of HTML - FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source - Smartypants // Enable smart punctuation substitutions - SmartypantsFractions // Enable smart fractions (with Smartypants) - SmartypantsDashes // Enable smart dashes (with Smartypants) - SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) - SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering - SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) - TOC // Generate a table of contents -) - -var ( - htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) -) - -const ( - htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + - processingInstruction + "|" + declaration + "|" + cdata + ")" - closeTag = "]" - openTag = "<" + tagName + attribute + "*" + "\\s*/?>" - attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" - attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" - attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" - attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" - cdata = "" - declaration = "]*>" - doubleQuotedValue = "\"[^\"]*\"" - htmlComment = "|" - processingInstruction = "[<][?].*?[?][>]" - singleQuotedValue = "'[^']*'" - tagName = "[A-Za-z][A-Za-z0-9-]*" - unquotedValue = "[^\"'=<>`\\x00-\\x20]+" -) - -// HTMLRendererParameters is a collection of supplementary parameters tweaking -// the behavior of various parts of HTML renderer. -type HTMLRendererParameters struct { - // Prepend this text to each relative URL. - AbsolutePrefix string - // Add this text to each footnote anchor, to ensure uniqueness. - FootnoteAnchorPrefix string - // Show this text inside the tag for a footnote return link, if the - // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string - // [return] is used. - FootnoteReturnLinkContents string - // If set, add this text to the front of each Heading ID, to ensure - // uniqueness. - HeadingIDPrefix string - // If set, add this text to the back of each Heading ID, to ensure uniqueness. - HeadingIDSuffix string - // Increase heading levels: if the offset is 1,

becomes

etc. - // Negative offset is also valid. - // Resulting levels are clipped between 1 and 6. - HeadingLevelOffset int - - Title string // Document title (used if CompletePage is set) - CSS string // Optional CSS file URL (used if CompletePage is set) - Icon string // Optional icon file URL (used if CompletePage is set) - - Flags HTMLFlags // Flags allow customizing this renderer's behavior -} - -// HTMLRenderer is a type that implements the Renderer interface for HTML output. -// -// Do not create this directly, instead use the NewHTMLRenderer function. -type HTMLRenderer struct { - HTMLRendererParameters - - closeTag string // how to end singleton tags: either " />" or ">" - - // Track heading IDs to prevent ID collision in a single generation. - headingIDs map[string]int - - lastOutputLen int - disableTags int - - sr *SPRenderer -} - -const ( - xhtmlClose = " />" - htmlClose = ">" -) - -// NewHTMLRenderer creates and configures an HTMLRenderer object, which -// satisfies the Renderer interface. -func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { - // configure the rendering engine - closeTag := htmlClose - if params.Flags&UseXHTML != 0 { - closeTag = xhtmlClose - } - - if params.FootnoteReturnLinkContents == "" { - params.FootnoteReturnLinkContents = `[return]` - } - - return &HTMLRenderer{ - HTMLRendererParameters: params, - - closeTag: closeTag, - headingIDs: make(map[string]int), - - sr: NewSmartypantsRenderer(params.Flags), - } -} - -func isHTMLTag(tag []byte, tagname string) bool { - found, _ := findHTMLTagPos(tag, tagname) - return found -} - -// Look for a character, but ignore it when it's in any kind of quotes, it -// might be JavaScript -func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { - inSingleQuote := false - inDoubleQuote := false - inGraveQuote := false - i := start - for i < len(html) { - switch { - case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: - return i - case html[i] == '\'': - inSingleQuote = !inSingleQuote - case html[i] == '"': - inDoubleQuote = !inDoubleQuote - case html[i] == '`': - inGraveQuote = !inGraveQuote - } - i++ - } - return start -} - -func findHTMLTagPos(tag []byte, tagname string) (bool, int) { - i := 0 - if i < len(tag) && tag[0] != '<' { - return false, -1 - } - i++ - i = skipSpace(tag, i) - - if i < len(tag) && tag[i] == '/' { - i++ - } - - i = skipSpace(tag, i) - j := 0 - for ; i < len(tag); i, j = i+1, j+1 { - if j >= len(tagname) { - break - } - - if strings.ToLower(string(tag[i]))[0] != tagname[j] { - return false, -1 - } - } - - if i == len(tag) { - return false, -1 - } - - rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') - if rightAngle >= i { - return true, rightAngle - } - - return false, -1 -} - -func skipSpace(tag []byte, i int) int { - for i < len(tag) && isspace(tag[i]) { - i++ - } - return i -} - -func isRelativeLink(link []byte) (yes bool) { - // a tag begin with '#' - if link[0] == '#' { - return true - } - - // link begin with '/' but not '//', the second maybe a protocol relative link - if len(link) >= 2 && link[0] == '/' && link[1] != '/' { - return true - } - - // only the root '/' - if len(link) == 1 && link[0] == '/' { - return true - } - - // current directory : begin with "./" - if bytes.HasPrefix(link, []byte("./")) { - return true - } - - // parent directory : begin with "../" - if bytes.HasPrefix(link, []byte("../")) { - return true - } - - return false -} - -func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { - for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { - tmp := fmt.Sprintf("%s-%d", id, count+1) - - if _, tmpFound := r.headingIDs[tmp]; !tmpFound { - r.headingIDs[id] = count + 1 - id = tmp - } else { - id = id + "-1" - } - } - - if _, found := r.headingIDs[id]; !found { - r.headingIDs[id] = 0 - } - - return id -} - -func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { - if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { - newDest := r.AbsolutePrefix - if link[0] != '/' { - newDest += "/" - } - newDest += string(link) - return []byte(newDest) - } - return link -} - -func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { - if isRelativeLink(link) { - return attrs - } - val := []string{} - if flags&NofollowLinks != 0 { - val = append(val, "nofollow") - } - if flags&NoreferrerLinks != 0 { - val = append(val, "noreferrer") - } - if flags&NoopenerLinks != 0 { - val = append(val, "noopener") - } - if flags&HrefTargetBlank != 0 { - attrs = append(attrs, "target=\"_blank\"") - } - if len(val) == 0 { - return attrs - } - attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) - return append(attrs, attr) -} - -func isMailto(link []byte) bool { - return bytes.HasPrefix(link, []byte("mailto:")) -} - -func needSkipLink(flags HTMLFlags, dest []byte) bool { - if flags&SkipLinks != 0 { - return true - } - return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) -} - -func isSmartypantable(node *Node) bool { - pt := node.Parent.Type - return pt != Link && pt != CodeBlock && pt != Code -} - -func appendLanguageAttr(attrs []string, info []byte) []string { - if len(info) == 0 { - return attrs - } - endOfLang := bytes.IndexAny(info, "\t ") - if endOfLang < 0 { - endOfLang = len(info) - } - return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) -} - -func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { - w.Write(name) - if len(attrs) > 0 { - w.Write(spaceBytes) - w.Write([]byte(strings.Join(attrs, " "))) - } - w.Write(gtBytes) - r.lastOutputLen = 1 -} - -func footnoteRef(prefix string, node *Node) []byte { - urlFrag := prefix + string(slugify(node.Destination)) - anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) - return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) -} - -func footnoteItem(prefix string, slug []byte) []byte { - return []byte(fmt.Sprintf(`
  • `, prefix, slug)) -} - -func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { - const format = ` %s` - return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) -} - -func itemOpenCR(node *Node) bool { - if node.Prev == nil { - return false - } - ld := node.Parent.ListData - return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 -} - -func skipParagraphTags(node *Node) bool { - grandparent := node.Parent.Parent - if grandparent == nil || grandparent.Type != List { - return false - } - tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 - return grandparent.Type == List && tightOrTerm -} - -func cellAlignment(align CellAlignFlags) string { - switch align { - case TableAlignmentLeft: - return "left" - case TableAlignmentRight: - return "right" - case TableAlignmentCenter: - return "center" - default: - return "" - } -} - -func (r *HTMLRenderer) out(w io.Writer, text []byte) { - if r.disableTags > 0 { - w.Write(htmlTagRe.ReplaceAll(text, []byte{})) - } else { - w.Write(text) - } - r.lastOutputLen = len(text) -} - -func (r *HTMLRenderer) cr(w io.Writer) { - if r.lastOutputLen > 0 { - r.out(w, nlBytes) - } -} - -var ( - nlBytes = []byte{'\n'} - gtBytes = []byte{'>'} - spaceBytes = []byte{' '} -) - -var ( - brTag = []byte("
    ") - brXHTMLTag = []byte("
    ") - emTag = []byte("") - emCloseTag = []byte("") - strongTag = []byte("") - strongCloseTag = []byte("") - delTag = []byte("") - delCloseTag = []byte("") - ttTag = []byte("") - ttCloseTag = []byte("") - aTag = []byte("") - preTag = []byte("
    ")
    -	preCloseTag        = []byte("
    ") - codeTag = []byte("") - codeCloseTag = []byte("") - pTag = []byte("

    ") - pCloseTag = []byte("

    ") - blockquoteTag = []byte("
    ") - blockquoteCloseTag = []byte("
    ") - hrTag = []byte("
    ") - hrXHTMLTag = []byte("
    ") - ulTag = []byte("
      ") - ulCloseTag = []byte("
    ") - olTag = []byte("
      ") - olCloseTag = []byte("
    ") - dlTag = []byte("
    ") - dlCloseTag = []byte("
    ") - liTag = []byte("
  • ") - liCloseTag = []byte("
  • ") - ddTag = []byte("
    ") - ddCloseTag = []byte("
    ") - dtTag = []byte("
    ") - dtCloseTag = []byte("
    ") - tableTag = []byte("") - tableCloseTag = []byte("
    ") - tdTag = []byte("") - thTag = []byte("") - theadTag = []byte("") - theadCloseTag = []byte("") - tbodyTag = []byte("") - tbodyCloseTag = []byte("") - trTag = []byte("") - trCloseTag = []byte("") - h1Tag = []byte("") - h2Tag = []byte("") - h3Tag = []byte("") - h4Tag = []byte("") - h5Tag = []byte("") - h6Tag = []byte("") - - footnotesDivBytes = []byte("\n
    \n\n") - footnotesCloseDivBytes = []byte("\n
    \n") -) - -func headingTagsFromLevel(level int) ([]byte, []byte) { - if level <= 1 { - return h1Tag, h1CloseTag - } - switch level { - case 2: - return h2Tag, h2CloseTag - case 3: - return h3Tag, h3CloseTag - case 4: - return h4Tag, h4CloseTag - case 5: - return h5Tag, h5CloseTag - } - return h6Tag, h6CloseTag -} - -func (r *HTMLRenderer) outHRTag(w io.Writer) { - if r.Flags&UseXHTML == 0 { - r.out(w, hrTag) - } else { - r.out(w, hrXHTMLTag) - } -} - -// RenderNode is a default renderer of a single node of a syntax tree. For -// block nodes it will be called twice: first time with entering=true, second -// time with entering=false, so that it could know when it's working on an open -// tag and when on close. It writes the result to w. -// -// The return value is a way to tell the calling walker to adjust its walk -// pattern: e.g. it can terminate the traversal by returning Terminate. Or it -// can ask the walker to skip a subtree of this node by returning SkipChildren. -// The typical behavior is to return GoToNext, which asks for the usual -// traversal to the next node. -func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { - attrs := []string{} - switch node.Type { - case Text: - if r.Flags&Smartypants != 0 { - var tmp bytes.Buffer - escapeHTML(&tmp, node.Literal) - r.sr.Process(w, tmp.Bytes()) - } else { - if node.Parent.Type == Link { - escLink(w, node.Literal) - } else { - escapeHTML(w, node.Literal) - } - } - case Softbreak: - r.cr(w) - // TODO: make it configurable via out(renderer.softbreak) - case Hardbreak: - if r.Flags&UseXHTML == 0 { - r.out(w, brTag) - } else { - r.out(w, brXHTMLTag) - } - r.cr(w) - case Emph: - if entering { - r.out(w, emTag) - } else { - r.out(w, emCloseTag) - } - case Strong: - if entering { - r.out(w, strongTag) - } else { - r.out(w, strongCloseTag) - } - case Del: - if entering { - r.out(w, delTag) - } else { - r.out(w, delCloseTag) - } - case HTMLSpan: - if r.Flags&SkipHTML != 0 { - break - } - r.out(w, node.Literal) - case Link: - // mark it but don't link it if it is not a safe link: no smartypants - dest := node.LinkData.Destination - if needSkipLink(r.Flags, dest) { - if entering { - r.out(w, ttTag) - } else { - r.out(w, ttCloseTag) - } - } else { - if entering { - dest = r.addAbsPrefix(dest) - var hrefBuf bytes.Buffer - hrefBuf.WriteString("href=\"") - escLink(&hrefBuf, dest) - hrefBuf.WriteByte('"') - attrs = append(attrs, hrefBuf.String()) - if node.NoteID != 0 { - r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) - break - } - attrs = appendLinkAttrs(attrs, r.Flags, dest) - if len(node.LinkData.Title) > 0 { - var titleBuff bytes.Buffer - titleBuff.WriteString("title=\"") - escapeHTML(&titleBuff, node.LinkData.Title) - titleBuff.WriteByte('"') - attrs = append(attrs, titleBuff.String()) - } - r.tag(w, aTag, attrs) - } else { - if node.NoteID != 0 { - break - } - r.out(w, aCloseTag) - } - } - case Image: - if r.Flags&SkipImages != 0 { - return SkipChildren - } - if entering { - dest := node.LinkData.Destination - dest = r.addAbsPrefix(dest) - if r.disableTags == 0 { - //if options.safe && potentiallyUnsafe(dest) { - //out(w, ``)
-				//} else {
-				r.out(w, []byte(`<img src=`)) - } - } - case Code: - r.out(w, codeTag) - escapeHTML(w, node.Literal) - r.out(w, codeCloseTag) - case Document: - break - case Paragraph: - if skipParagraphTags(node) { - break - } - if entering { - // TODO: untangle this clusterfuck about when the newlines need - // to be added and when not. - if node.Prev != nil { - switch node.Prev.Type { - case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: - r.cr(w) - } - } - if node.Parent.Type == BlockQuote && node.Prev == nil { - r.cr(w) - } - r.out(w, pTag) - } else { - r.out(w, pCloseTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case BlockQuote: - if entering { - r.cr(w) - r.out(w, blockquoteTag) - } else { - r.out(w, blockquoteCloseTag) - r.cr(w) - } - case HTMLBlock: - if r.Flags&SkipHTML != 0 { - break - } - r.cr(w) - r.out(w, node.Literal) - r.cr(w) - case Heading: - headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level - openTag, closeTag := headingTagsFromLevel(headingLevel) - if entering { - if node.IsTitleblock { - attrs = append(attrs, `class="title"`) - } - if node.HeadingID != "" { - id := r.ensureUniqueHeadingID(node.HeadingID) - if r.HeadingIDPrefix != "" { - id = r.HeadingIDPrefix + id - } - if r.HeadingIDSuffix != "" { - id = id + r.HeadingIDSuffix - } - attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) - } - r.cr(w) - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case HorizontalRule: - r.cr(w) - r.outHRTag(w) - r.cr(w) - case List: - openTag := ulTag - closeTag := ulCloseTag - if node.ListFlags&ListTypeOrdered != 0 { - openTag = olTag - closeTag = olCloseTag - } - if node.ListFlags&ListTypeDefinition != 0 { - openTag = dlTag - closeTag = dlCloseTag - } - if entering { - if node.IsFootnotesList { - r.out(w, footnotesDivBytes) - r.outHRTag(w) - r.cr(w) - } - r.cr(w) - if node.Parent.Type == Item && node.Parent.Parent.Tight { - r.cr(w) - } - r.tag(w, openTag[:len(openTag)-1], attrs) - r.cr(w) - } else { - r.out(w, closeTag) - //cr(w) - //if node.parent.Type != Item { - // cr(w) - //} - if node.Parent.Type == Item && node.Next != nil { - r.cr(w) - } - if node.Parent.Type == Document || node.Parent.Type == BlockQuote { - r.cr(w) - } - if node.IsFootnotesList { - r.out(w, footnotesCloseDivBytes) - } - } - case Item: - openTag := liTag - closeTag := liCloseTag - if node.ListFlags&ListTypeDefinition != 0 { - openTag = ddTag - closeTag = ddCloseTag - } - if node.ListFlags&ListTypeTerm != 0 { - openTag = dtTag - closeTag = dtCloseTag - } - if entering { - if itemOpenCR(node) { - r.cr(w) - } - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) - break - } - r.out(w, openTag) - } else { - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - if r.Flags&FootnoteReturnLinks != 0 { - r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) - } - } - r.out(w, closeTag) - r.cr(w) - } - case CodeBlock: - attrs = appendLanguageAttr(attrs, node.Info) - r.cr(w) - r.out(w, preTag) - r.tag(w, codeTag[:len(codeTag)-1], attrs) - escapeHTML(w, node.Literal) - r.out(w, codeCloseTag) - r.out(w, preCloseTag) - if node.Parent.Type != Item { - r.cr(w) - } - case Table: - if entering { - r.cr(w) - r.out(w, tableTag) - } else { - r.out(w, tableCloseTag) - r.cr(w) - } - case TableCell: - openTag := tdTag - closeTag := tdCloseTag - if node.IsHeader { - openTag = thTag - closeTag = thCloseTag - } - if entering { - align := cellAlignment(node.Align) - if align != "" { - attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) - } - if node.Prev == nil { - r.cr(w) - } - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - r.cr(w) - } - case TableHead: - if entering { - r.cr(w) - r.out(w, theadTag) - } else { - r.out(w, theadCloseTag) - r.cr(w) - } - case TableBody: - if entering { - r.cr(w) - r.out(w, tbodyTag) - // XXX: this is to adhere to a rather silly test. Should fix test. - if node.FirstChild == nil { - r.cr(w) - } - } else { - r.out(w, tbodyCloseTag) - r.cr(w) - } - case TableRow: - if entering { - r.cr(w) - r.out(w, trTag) - } else { - r.out(w, trCloseTag) - r.cr(w) - } - default: - panic("Unknown node type " + node.Type.String()) - } - return GoToNext -} - -// RenderHeader writes HTML document preamble and TOC if requested. -func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { - r.writeDocumentHeader(w) - if r.Flags&TOC != 0 { - r.writeTOC(w, ast) - } -} - -// RenderFooter writes HTML document footer. -func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { - if r.Flags&CompletePage == 0 { - return - } - io.WriteString(w, "\n\n\n") -} - -func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { - if r.Flags&CompletePage == 0 { - return - } - ending := "" - if r.Flags&UseXHTML != 0 { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - ending = " /" - } else { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - } - io.WriteString(w, "\n") - io.WriteString(w, " ") - if r.Flags&Smartypants != 0 { - r.sr.Process(w, []byte(r.Title)) - } else { - escapeHTML(w, []byte(r.Title)) - } - io.WriteString(w, "\n") - io.WriteString(w, " \n") - io.WriteString(w, " \n") - if r.CSS != "" { - io.WriteString(w, " \n") - } - if r.Icon != "" { - io.WriteString(w, " \n") - } - io.WriteString(w, "\n") - io.WriteString(w, "\n\n") -} - -func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { - buf := bytes.Buffer{} - - inHeading := false - tocLevel := 0 - headingCount := 0 - - ast.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Heading && !node.HeadingData.IsTitleblock { - inHeading = entering - if entering { - node.HeadingID = fmt.Sprintf("toc_%d", headingCount) - if node.Level == tocLevel { - buf.WriteString("\n\n
  • ") - } else if node.Level < tocLevel { - for node.Level < tocLevel { - tocLevel-- - buf.WriteString("
  • \n") - } - buf.WriteString("\n\n
  • ") - } else { - for node.Level > tocLevel { - tocLevel++ - buf.WriteString("\n") - } - - if buf.Len() > 0 { - io.WriteString(w, "\n") - } - r.lastOutputLen = buf.Len() -} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go deleted file mode 100644 index 4ed29079..00000000 --- a/vendor/github.com/russross/blackfriday/v2/inline.go +++ /dev/null @@ -1,1228 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse inline elements. -// - -package blackfriday - -import ( - "bytes" - "regexp" - "strconv" -) - -var ( - urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` - anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) - - // https://www.w3.org/TR/html5/syntax.html#character-references - // highest unicode code point in 17 planes (2^20): 1,114,112d = - // 7 dec digits or 6 hex digits - // named entity references can be 2-31 characters with stuff like < - // at one end and ∳ at the other. There - // are also sometimes numbers at the end, although this isn't inherent - // in the specification; there are never numbers anywhere else in - // current character references, though; see ¾ and ▒, etc. - // https://www.w3.org/TR/html5/syntax.html#named-character-references - // - // entity := "&" (named group | number ref) ";" - // named group := [a-zA-Z]{2,31}[0-9]{0,2} - // number ref := "#" (dec ref | hex ref) - // dec ref := [0-9]{1,7} - // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} - htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) -) - -// Functions to parse text within a block -// Each function returns the number of chars taken care of -// data is the complete block being rendered -// offset is the number of valid chars before the current cursor - -func (p *Markdown) inline(currBlock *Node, data []byte) { - // handlers might call us recursively: enforce a maximum depth - if p.nesting >= p.maxNesting || len(data) == 0 { - return - } - p.nesting++ - beg, end := 0, 0 - for end < len(data) { - handler := p.inlineCallback[data[end]] - if handler != nil { - if consumed, node := handler(p, data, end); consumed == 0 { - // No action from the callback. - end++ - } else { - // Copy inactive chars into the output. - currBlock.AppendChild(text(data[beg:end])) - if node != nil { - currBlock.AppendChild(node) - } - // Skip past whatever the callback used. - beg = end + consumed - end = beg - } - } else { - end++ - } - } - if beg < len(data) { - if data[end-1] == '\n' { - end-- - } - currBlock.AppendChild(text(data[beg:end])) - } - p.nesting-- -} - -// single and double emphasis parsing -func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - c := data[0] - - if len(data) > 2 && data[1] != c { - // whitespace cannot follow an opening emphasis; - // strikethrough only takes two characters '~~' - if c == '~' || isspace(data[1]) { - return 0, nil - } - ret, node := helperEmphasis(p, data[1:], c) - if ret == 0 { - return 0, nil - } - - return ret + 1, node - } - - if len(data) > 3 && data[1] == c && data[2] != c { - if isspace(data[2]) { - return 0, nil - } - ret, node := helperDoubleEmphasis(p, data[2:], c) - if ret == 0 { - return 0, nil - } - - return ret + 2, node - } - - if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { - if c == '~' || isspace(data[3]) { - return 0, nil - } - ret, node := helperTripleEmphasis(p, data, 3, c) - if ret == 0 { - return 0, nil - } - - return ret + 3, node - } - - return 0, nil -} - -func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - nb := 0 - - // count the number of backticks in the delimiter - for nb < len(data) && data[nb] == '`' { - nb++ - } - - // find the next delimiter - i, end := 0, 0 - for end = nb; end < len(data) && i < nb; end++ { - if data[end] == '`' { - i++ - } else { - i = 0 - } - } - - // no matching delimiter? - if i < nb && end >= len(data) { - return 0, nil - } - - // trim outside whitespace - fBegin := nb - for fBegin < end && data[fBegin] == ' ' { - fBegin++ - } - - fEnd := end - nb - for fEnd > fBegin && data[fEnd-1] == ' ' { - fEnd-- - } - - // render the code span - if fBegin != fEnd { - code := NewNode(Code) - code.Literal = data[fBegin:fEnd] - return end, code - } - - return end, nil -} - -// newline preceded by two spaces becomes
    -func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - origOffset := offset - for offset < len(data) && data[offset] == ' ' { - offset++ - } - - if offset < len(data) && data[offset] == '\n' { - if offset-origOffset >= 2 { - return offset - origOffset + 1, NewNode(Hardbreak) - } - return offset - origOffset, nil - } - return 0, nil -} - -// newline without two spaces works when HardLineBreak is enabled -func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - if p.extensions&HardLineBreak != 0 { - return 1, NewNode(Hardbreak) - } - return 0, nil -} - -type linkType int - -const ( - linkNormal linkType = iota - linkImg - linkDeferredFootnote - linkInlineFootnote -) - -func isReferenceStyleLink(data []byte, pos int, t linkType) bool { - if t == linkDeferredFootnote { - return false - } - return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' -} - -func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -// '[': parse a link or an image or a footnote -func link(p *Markdown, data []byte, offset int) (int, *Node) { - // no links allowed inside regular links, footnote, and deferred footnotes - if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { - return 0, nil - } - - var t linkType - switch { - // special case: ![^text] == deferred footnote (that follows something with - // an exclamation point) - case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': - t = linkDeferredFootnote - // ![alt] == image - case offset >= 0 && data[offset] == '!': - t = linkImg - offset++ - // ^[text] == inline footnote - // [^refId] == deferred footnote - case p.extensions&Footnotes != 0: - if offset >= 0 && data[offset] == '^' { - t = linkInlineFootnote - offset++ - } else if len(data)-1 > offset && data[offset+1] == '^' { - t = linkDeferredFootnote - } - // [text] == regular link - default: - t = linkNormal - } - - data = data[offset:] - - var ( - i = 1 - noteID int - title, link, altContent []byte - textHasNl = false - ) - - if t == linkDeferredFootnote { - i++ - } - - // look for the matching closing bracket - for level := 1; level > 0 && i < len(data); i++ { - switch { - case data[i] == '\n': - textHasNl = true - - case data[i-1] == '\\': - continue - - case data[i] == '[': - level++ - - case data[i] == ']': - level-- - if level <= 0 { - i-- // compensate for extra i++ in for loop - } - } - } - - if i >= len(data) { - return 0, nil - } - - txtE := i - i++ - var footnoteNode *Node - - // skip any amount of whitespace or newline - // (this is much more lax than original markdown syntax) - for i < len(data) && isspace(data[i]) { - i++ - } - - // inline style link - switch { - case i < len(data) && data[i] == '(': - // skip initial whitespace - i++ - - for i < len(data) && isspace(data[i]) { - i++ - } - - linkB := i - - // look for link end: ' " ) - findlinkend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')' || data[i] == '\'' || data[i] == '"': - break findlinkend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - linkE := i - - // look for title end if present - titleB, titleE := 0, 0 - if data[i] == '\'' || data[i] == '"' { - i++ - titleB = i - - findtitleend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')': - break findtitleend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - - // skip whitespace after title - titleE = i - 1 - for titleE > titleB && isspace(data[titleE]) { - titleE-- - } - - // check for closing quote presence - if data[titleE] != '\'' && data[titleE] != '"' { - titleB, titleE = 0, 0 - linkE = i - } - } - - // remove whitespace at the end of the link - for linkE > linkB && isspace(data[linkE-1]) { - linkE-- - } - - // remove optional angle brackets around the link - if data[linkB] == '<' { - linkB++ - } - if data[linkE-1] == '>' { - linkE-- - } - - // build escaped link and title - if linkE > linkB { - link = data[linkB:linkE] - } - - if titleE > titleB { - title = data[titleB:titleE] - } - - i++ - - // reference style link - case isReferenceStyleLink(data, i, t): - var id []byte - altContentConsidered := false - - // look for the id - i++ - linkB := i - for i < len(data) && data[i] != ']' { - i++ - } - if i >= len(data) { - return 0, nil - } - linkE := i - - // find the reference - if linkB == linkE { - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - id = data[1:txtE] - altContentConsidered = true - } - } else { - id = data[linkB:linkE] - } - - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - // keep link and title from reference - link = lr.link - title = lr.title - if altContentConsidered { - altContent = lr.text - } - i++ - - // shortcut reference style link or reference or inline footnote - default: - var id []byte - - // craft the id - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - if t == linkDeferredFootnote { - id = data[2:txtE] // get rid of the ^ - } else { - id = data[1:txtE] - } - } - - footnoteNode = NewNode(Item) - if t == linkInlineFootnote { - // create a new reference - noteID = len(p.notes) + 1 - - var fragment []byte - if len(id) > 0 { - if len(id) < 16 { - fragment = make([]byte, len(id)) - } else { - fragment = make([]byte, 16) - } - copy(fragment, slugify(id)) - } else { - fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) - } - - ref := &reference{ - noteID: noteID, - hasBlock: false, - link: fragment, - title: id, - footnote: footnoteNode, - } - - p.notes = append(p.notes, ref) - - link = ref.link - title = ref.title - } else { - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - if t == linkDeferredFootnote { - lr.noteID = len(p.notes) + 1 - lr.footnote = footnoteNode - p.notes = append(p.notes, lr) - } - - // keep link and title from reference - link = lr.link - // if inline footnote, title == footnote contents - title = lr.title - noteID = lr.noteID - } - - // rewind the whitespace - i = txtE + 1 - } - - var uLink []byte - if t == linkNormal || t == linkImg { - if len(link) > 0 { - var uLinkBuf bytes.Buffer - unescapeText(&uLinkBuf, link) - uLink = uLinkBuf.Bytes() - } - - // links need something to click on and somewhere to go - if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { - return 0, nil - } - } - - // call the relevant rendering function - var linkNode *Node - switch t { - case linkNormal: - linkNode = NewNode(Link) - linkNode.Destination = normalizeURI(uLink) - linkNode.Title = title - if len(altContent) > 0 { - linkNode.AppendChild(text(altContent)) - } else { - // links cannot contain other links, so turn off link parsing - // temporarily and recurse - insideLink := p.insideLink - p.insideLink = true - p.inline(linkNode, data[1:txtE]) - p.insideLink = insideLink - } - - case linkImg: - linkNode = NewNode(Image) - linkNode.Destination = uLink - linkNode.Title = title - linkNode.AppendChild(text(data[1:txtE])) - i++ - - case linkInlineFootnote, linkDeferredFootnote: - linkNode = NewNode(Link) - linkNode.Destination = link - linkNode.Title = title - linkNode.NoteID = noteID - linkNode.Footnote = footnoteNode - if t == linkInlineFootnote { - i++ - } - - default: - return 0, nil - } - - return i, linkNode -} - -func (p *Markdown) inlineHTMLComment(data []byte) int { - if len(data) < 5 { - return 0 - } - if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { - return 0 - } - i := 5 - // scan for an end-of-comment marker, across lines if necessary - for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { - i++ - } - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return i + 1 -} - -func stripMailto(link []byte) []byte { - if bytes.HasPrefix(link, []byte("mailto://")) { - return link[9:] - } else if bytes.HasPrefix(link, []byte("mailto:")) { - return link[7:] - } else { - return link - } -} - -// autolinkType specifies a kind of autolink that gets detected. -type autolinkType int - -// These are the possible flag values for the autolink renderer. -const ( - notAutolink autolinkType = iota - normalAutolink - emailAutolink -) - -// '<' when tags or autolinks are allowed -func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - altype, end := tagLength(data) - if size := p.inlineHTMLComment(data); size > 0 { - end = size - } - if end > 2 { - if altype != notAutolink { - var uLink bytes.Buffer - unescapeText(&uLink, data[1:end+1-2]) - if uLink.Len() > 0 { - link := uLink.Bytes() - node := NewNode(Link) - node.Destination = link - if altype == emailAutolink { - node.Destination = append([]byte("mailto:"), link...) - } - node.AppendChild(text(stripMailto(link))) - return end, node - } - } else { - htmlTag := NewNode(HTMLSpan) - htmlTag.Literal = data[:end] - return end, htmlTag - } - } - - return end, nil -} - -// '\\' backslash escape -var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") - -func escape(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - if len(data) > 1 { - if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { - return 2, NewNode(Hardbreak) - } - if bytes.IndexByte(escapeChars, data[1]) < 0 { - return 0, nil - } - - return 2, text(data[1:2]) - } - - return 2, nil -} - -func unescapeText(ob *bytes.Buffer, src []byte) { - i := 0 - for i < len(src) { - org := i - for i < len(src) && src[i] != '\\' { - i++ - } - - if i > org { - ob.Write(src[org:i]) - } - - if i+1 >= len(src) { - break - } - - ob.WriteByte(src[i+1]) - i += 2 - } -} - -// '&' escaped when it doesn't belong to an entity -// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - end := 1 - - if end < len(data) && data[end] == '#' { - end++ - } - - for end < len(data) && isalnum(data[end]) { - end++ - } - - if end < len(data) && data[end] == ';' { - end++ // real entity - } else { - return 0, nil // lone '&' - } - - ent := data[:end] - // undo & escaping or it will be converted to &amp; by another - // escaper in the renderer - if bytes.Equal(ent, []byte("&")) { - ent = []byte{'&'} - } - - return end, text(ent) -} - -func linkEndsWithEntity(data []byte, linkEnd int) bool { - entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) - return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd -} - -// hasPrefixCaseInsensitive is a custom implementation of -// strings.HasPrefix(strings.ToLower(s), prefix) -// we rolled our own because ToLower pulls in a huge machinery of lowercasing -// anything from Unicode and that's very slow. Since this func will only be -// used on ASCII protocol prefixes, we can take shortcuts. -func hasPrefixCaseInsensitive(s, prefix []byte) bool { - if len(s) < len(prefix) { - return false - } - delta := byte('a' - 'A') - for i, b := range prefix { - if b != s[i] && b != s[i]+delta { - return false - } - } - return true -} - -var protocolPrefixes = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("file://"), - []byte("mailto:"), -} - -const shortestPrefix = 6 // len("ftp://"), the shortest of the above - -func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // quick check to rule out most false hits - if p.insideLink || len(data) < offset+shortestPrefix { - return 0, nil - } - for _, prefix := range protocolPrefixes { - endOfHead := offset + 8 // 8 is the len() of the longest prefix - if endOfHead > len(data) { - endOfHead = len(data) - } - if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { - return autoLink(p, data, offset) - } - } - return 0, nil -} - -func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // Now a more expensive check to see if we're not inside an anchor element - anchorStart := offset - offsetFromAnchor := 0 - for anchorStart > 0 && data[anchorStart] != '<' { - anchorStart-- - offsetFromAnchor++ - } - - anchorStr := anchorRe.Find(data[anchorStart:]) - if anchorStr != nil { - anchorClose := NewNode(HTMLSpan) - anchorClose.Literal = anchorStr[offsetFromAnchor:] - return len(anchorStr) - offsetFromAnchor, anchorClose - } - - // scan backward for a word boundary - rewind := 0 - for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { - rewind++ - } - if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters - return 0, nil - } - - origData := data - data = data[offset-rewind:] - - if !isSafeLink(data) { - return 0, nil - } - - linkEnd := 0 - for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { - linkEnd++ - } - - // Skip punctuation at the end of the link - if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { - linkEnd-- - } - - // But don't skip semicolon if it's a part of escaped entity: - if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { - linkEnd-- - } - - // See if the link finishes with a punctuation sign that can be closed. - var copen byte - switch data[linkEnd-1] { - case '"': - copen = '"' - case '\'': - copen = '\'' - case ')': - copen = '(' - case ']': - copen = '[' - case '}': - copen = '{' - default: - copen = 0 - } - - if copen != 0 { - bufEnd := offset - rewind + linkEnd - 2 - - openDelim := 1 - - /* Try to close the final punctuation sign in this same line; - * if we managed to close it outside of the URL, that means that it's - * not part of the URL. If it closes inside the URL, that means it - * is part of the URL. - * - * Examples: - * - * foo http://www.pokemon.com/Pikachu_(Electric) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo (http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric)) - * - * (foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => foo http://www.pokemon.com/Pikachu_(Electric) - */ - - for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { - if origData[bufEnd] == data[linkEnd-1] { - openDelim++ - } - - if origData[bufEnd] == copen { - openDelim-- - } - - bufEnd-- - } - - if openDelim == 0 { - linkEnd-- - } - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[:linkEnd]) - - if uLink.Len() > 0 { - node := NewNode(Link) - node.Destination = uLink.Bytes() - node.AppendChild(text(uLink.Bytes())) - return linkEnd, node - } - - return linkEnd, nil -} - -func isEndOfLink(char byte) bool { - return isspace(char) || char == '<' -} - -var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} -var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} - -func isSafeLink(link []byte) bool { - for _, path := range validPaths { - if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { - if len(link) == len(path) { - return true - } else if isalnum(link[len(path)]) { - return true - } - } - } - - for _, prefix := range validUris { - // TODO: handle unicode here - // case-insensitive prefix test - if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { - return true - } - } - - return false -} - -// return the length of the given tag, or 0 is it's not valid -func tagLength(data []byte) (autolink autolinkType, end int) { - var i, j int - - // a valid tag can't be shorter than 3 chars - if len(data) < 3 { - return notAutolink, 0 - } - - // begins with a '<' optionally followed by '/', followed by letter or number - if data[0] != '<' { - return notAutolink, 0 - } - if data[1] == '/' { - i = 2 - } else { - i = 1 - } - - if !isalnum(data[i]) { - return notAutolink, 0 - } - - // scheme test - autolink = notAutolink - - // try to find the beginning of an URI - for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { - i++ - } - - if i > 1 && i < len(data) && data[i] == '@' { - if j = isMailtoAutoLink(data[i:]); j != 0 { - return emailAutolink, i + j - } - } - - if i > 2 && i < len(data) && data[i] == ':' { - autolink = normalAutolink - i++ - } - - // complete autolink test: no whitespace or ' or " - switch { - case i >= len(data): - autolink = notAutolink - case autolink != notAutolink: - j = i - - for i < len(data) { - if data[i] == '\\' { - i += 2 - } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { - break - } else { - i++ - } - - } - - if i >= len(data) { - return autolink, 0 - } - if i > j && data[i] == '>' { - return autolink, i + 1 - } - - // one of the forbidden chars has been found - autolink = notAutolink - } - i += bytes.IndexByte(data[i:], '>') - if i < 0 { - return autolink, 0 - } - return autolink, i + 1 -} - -// look for the address part of a mail autolink and '>' -// this is less strict than the original markdown e-mail address matching -func isMailtoAutoLink(data []byte) int { - nb := 0 - - // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' - for i := 0; i < len(data); i++ { - if isalnum(data[i]) { - continue - } - - switch data[i] { - case '@': - nb++ - - case '-', '.', '_': - break - - case '>': - if nb == 1 { - return i + 1 - } - return 0 - default: - return 0 - } - } - - return 0 -} - -// look for the next emph char, skipping other constructs -func helperFindEmphChar(data []byte, c byte) int { - i := 0 - - for i < len(data) { - for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { - i++ - } - if i >= len(data) { - return 0 - } - // do not count escaped chars - if i != 0 && data[i-1] == '\\' { - i++ - continue - } - if data[i] == c { - return i - } - - if data[i] == '`' { - // skip a code span - tmpI := 0 - i++ - for i < len(data) && data[i] != '`' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } else if data[i] == '[' { - // skip a link - tmpI := 0 - i++ - for i < len(data) && data[i] != ']' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\n') { - i++ - } - if i >= len(data) { - return tmpI - } - if data[i] != '[' && data[i] != '(' { // not a link - if tmpI > 0 { - return tmpI - } - continue - } - cc := data[i] - i++ - for i < len(data) && data[i] != cc { - if tmpI == 0 && data[i] == c { - return i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } - } - return 0 -} - -func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - // skip one symbol if coming from emph3 - if len(data) > 1 && data[0] == c && data[1] == c { - i = 1 - } - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - if i >= len(data) { - return 0, nil - } - - if i+1 < len(data) && data[i+1] == c { - i++ - continue - } - - if data[i] == c && !isspace(data[i-1]) { - - if p.extensions&NoIntraEmphasis != 0 { - if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { - continue - } - } - - emph := NewNode(Emph) - p.inline(emph, data[:i]) - return i + 1, emph - } - } - - return 0, nil -} - -func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { - nodeType := Strong - if c == '~' { - nodeType = Del - } - node := NewNode(nodeType) - p.inline(node, data[:i]) - return i + 2, node - } - i++ - } - return 0, nil -} - -func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { - i := 0 - origData := data - data = data[offset:] - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - // skip whitespace preceded symbols - if data[i] != c || isspace(data[i-1]) { - continue - } - - switch { - case i+2 < len(data) && data[i+1] == c && data[i+2] == c: - // triple symbol found - strong := NewNode(Strong) - em := NewNode(Emph) - strong.AppendChild(em) - p.inline(em, data[:i]) - return i + 3, strong - case (i+1 < len(data) && data[i+1] == c): - // double symbol found, hand over to emph1 - length, node := helperEmphasis(p, origData[offset-2:], c) - if length == 0 { - return 0, nil - } - return length - 2, node - default: - // single symbol found, hand over to emph2 - length, node := helperDoubleEmphasis(p, origData[offset-1:], c) - if length == 0 { - return 0, nil - } - return length - 1, node - } - } - return 0, nil -} - -func text(s []byte) *Node { - node := NewNode(Text) - node.Literal = s - return node -} - -func normalizeURI(s []byte) []byte { - return s // TODO: implement -} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go deleted file mode 100644 index 58d2e453..00000000 --- a/vendor/github.com/russross/blackfriday/v2/markdown.go +++ /dev/null @@ -1,950 +0,0 @@ -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "strings" - "unicode/utf8" -) - -// -// Markdown parsing and processing -// - -// Version string of the package. Appears in the rendered document when -// CompletePage flag is on. -const Version = "2.0" - -// Extensions is a bitwise or'ed collection of enabled Blackfriday's -// extensions. -type Extensions int - -// These are the supported markdown parsing extensions. -// OR these values together to select multiple extensions. -const ( - NoExtensions Extensions = 0 - NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words - Tables // Render tables - FencedCode // Render fenced code blocks - Autolink // Detect embedded URLs that are not explicitly marked - Strikethrough // Strikethrough text using ~~test~~ - LaxHTMLBlocks // Loosen up HTML block parsing rules - SpaceHeadings // Be strict about prefix heading rules - HardLineBreak // Translate newlines into line breaks - TabSizeEight // Expand tabs to eight spaces instead of four - Footnotes // Pandoc-style footnotes - NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block - HeadingIDs // specify heading IDs with {#id} - Titleblock // Titleblock ala pandoc - AutoHeadingIDs // Create the heading ID from the text - BackslashLineBreak // Translate trailing backslashes into line breaks - DefinitionLists // Render definition lists - - CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | - SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes - - CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | - Autolink | Strikethrough | SpaceHeadings | HeadingIDs | - BackslashLineBreak | DefinitionLists -) - -// ListType contains bitwise or'ed flags for list and list item objects. -type ListType int - -// These are the possible flag values for the ListItem renderer. -// Multiple flag values may be ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - ListTypeOrdered ListType = 1 << iota - ListTypeDefinition - ListTypeTerm - - ListItemContainsBlock - ListItemBeginningOfList // TODO: figure out if this is of any use now - ListItemEndOfList -) - -// CellAlignFlags holds a type of alignment in a table cell. -type CellAlignFlags int - -// These are the possible flag values for the table cell renderer. -// Only a single one of these values will be used; they are not ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - TableAlignmentLeft CellAlignFlags = 1 << iota - TableAlignmentRight - TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) -) - -// The size of a tab stop. -const ( - TabSizeDefault = 4 - TabSizeDouble = 8 -) - -// blockTags is a set of tags that are recognized as HTML block tags. -// Any of these can be included in markdown text without special escaping. -var blockTags = map[string]struct{}{ - "blockquote": {}, - "del": {}, - "div": {}, - "dl": {}, - "fieldset": {}, - "form": {}, - "h1": {}, - "h2": {}, - "h3": {}, - "h4": {}, - "h5": {}, - "h6": {}, - "iframe": {}, - "ins": {}, - "math": {}, - "noscript": {}, - "ol": {}, - "pre": {}, - "p": {}, - "script": {}, - "style": {}, - "table": {}, - "ul": {}, - - // HTML5 - "address": {}, - "article": {}, - "aside": {}, - "canvas": {}, - "figcaption": {}, - "figure": {}, - "footer": {}, - "header": {}, - "hgroup": {}, - "main": {}, - "nav": {}, - "output": {}, - "progress": {}, - "section": {}, - "video": {}, -} - -// Renderer is the rendering interface. This is mostly of interest if you are -// implementing a new rendering format. -// -// Only an HTML implementation is provided in this repository, see the README -// for external implementations. -type Renderer interface { - // RenderNode is the main rendering method. It will be called once for - // every leaf node and twice for every non-leaf node (first with - // entering=true, then with entering=false). The method should write its - // rendition of the node to the supplied writer w. - RenderNode(w io.Writer, node *Node, entering bool) WalkStatus - - // RenderHeader is a method that allows the renderer to produce some - // content preceding the main body of the output document. The header is - // understood in the broad sense here. For example, the default HTML - // renderer will write not only the HTML document preamble, but also the - // table of contents if it was requested. - // - // The method will be passed an entire document tree, in case a particular - // implementation needs to inspect it to produce output. - // - // The output should be written to the supplied writer w. If your - // implementation has no header to write, supply an empty implementation. - RenderHeader(w io.Writer, ast *Node) - - // RenderFooter is a symmetric counterpart of RenderHeader. - RenderFooter(w io.Writer, ast *Node) -} - -// Callback functions for inline parsing. One such function is defined -// for each character that triggers a response when parsing inline data. -type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) - -// Markdown is a type that holds extensions and the runtime state used by -// Parse, and the renderer. You can not use it directly, construct it with New. -type Markdown struct { - renderer Renderer - referenceOverride ReferenceOverrideFunc - refs map[string]*reference - inlineCallback [256]inlineParser - extensions Extensions - nesting int - maxNesting int - insideLink bool - - // Footnotes need to be ordered as well as available to quickly check for - // presence. If a ref is also a footnote, it's stored both in refs and here - // in notes. Slice is nil if footnotes not enabled. - notes []*reference - - doc *Node - tip *Node // = doc - oldTip *Node - lastMatchedContainer *Node // = doc - allClosed bool -} - -func (p *Markdown) getRef(refid string) (ref *reference, found bool) { - if p.referenceOverride != nil { - r, overridden := p.referenceOverride(refid) - if overridden { - if r == nil { - return nil, false - } - return &reference{ - link: []byte(r.Link), - title: []byte(r.Title), - noteID: 0, - hasBlock: false, - text: []byte(r.Text)}, true - } - } - // refs are case insensitive - ref, found = p.refs[strings.ToLower(refid)] - return ref, found -} - -func (p *Markdown) finalize(block *Node) { - above := block.Parent - block.open = false - p.tip = above -} - -func (p *Markdown) addChild(node NodeType, offset uint32) *Node { - return p.addExistingChild(NewNode(node), offset) -} - -func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { - for !p.tip.canContain(node.Type) { - p.finalize(p.tip) - } - p.tip.AppendChild(node) - p.tip = node - return node -} - -func (p *Markdown) closeUnmatchedBlocks() { - if !p.allClosed { - for p.oldTip != p.lastMatchedContainer { - parent := p.oldTip.Parent - p.finalize(p.oldTip) - p.oldTip = parent - } - p.allClosed = true - } -} - -// -// -// Public interface -// -// - -// Reference represents the details of a link. -// See the documentation in Options for more details on use-case. -type Reference struct { - // Link is usually the URL the reference points to. - Link string - // Title is the alternate text describing the link in more detail. - Title string - // Text is the optional text to override the ref with if the syntax used was - // [refid][] - Text string -} - -// ReferenceOverrideFunc is expected to be called with a reference string and -// return either a valid Reference type that the reference string maps to or -// nil. If overridden is false, the default reference logic will be executed. -// See the documentation in Options for more details on use-case. -type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) - -// New constructs a Markdown processor. You can use the same With* functions as -// for Run() to customize parser's behavior and the renderer. -func New(opts ...Option) *Markdown { - var p Markdown - for _, opt := range opts { - opt(&p) - } - p.refs = make(map[string]*reference) - p.maxNesting = 16 - p.insideLink = false - docNode := NewNode(Document) - p.doc = docNode - p.tip = docNode - p.oldTip = docNode - p.lastMatchedContainer = docNode - p.allClosed = true - // register inline parsers - p.inlineCallback[' '] = maybeLineBreak - p.inlineCallback['*'] = emphasis - p.inlineCallback['_'] = emphasis - if p.extensions&Strikethrough != 0 { - p.inlineCallback['~'] = emphasis - } - p.inlineCallback['`'] = codeSpan - p.inlineCallback['\n'] = lineBreak - p.inlineCallback['['] = link - p.inlineCallback['<'] = leftAngle - p.inlineCallback['\\'] = escape - p.inlineCallback['&'] = entity - p.inlineCallback['!'] = maybeImage - p.inlineCallback['^'] = maybeInlineFootnote - if p.extensions&Autolink != 0 { - p.inlineCallback['h'] = maybeAutoLink - p.inlineCallback['m'] = maybeAutoLink - p.inlineCallback['f'] = maybeAutoLink - p.inlineCallback['H'] = maybeAutoLink - p.inlineCallback['M'] = maybeAutoLink - p.inlineCallback['F'] = maybeAutoLink - } - if p.extensions&Footnotes != 0 { - p.notes = make([]*reference, 0) - } - return &p -} - -// Option customizes the Markdown processor's default behavior. -type Option func(*Markdown) - -// WithRenderer allows you to override the default renderer. -func WithRenderer(r Renderer) Option { - return func(p *Markdown) { - p.renderer = r - } -} - -// WithExtensions allows you to pick some of the many extensions provided by -// Blackfriday. You can bitwise OR them. -func WithExtensions(e Extensions) Option { - return func(p *Markdown) { - p.extensions = e - } -} - -// WithNoExtensions turns off all extensions and custom behavior. -func WithNoExtensions() Option { - return func(p *Markdown) { - p.extensions = NoExtensions - p.renderer = NewHTMLRenderer(HTMLRendererParameters{ - Flags: HTMLFlagsNone, - }) - } -} - -// WithRefOverride sets an optional function callback that is called every -// time a reference is resolved. -// -// In Markdown, the link reference syntax can be made to resolve a link to -// a reference instead of an inline URL, in one of the following ways: -// -// * [link text][refid] -// * [refid][] -// -// Usually, the refid is defined at the bottom of the Markdown document. If -// this override function is provided, the refid is passed to the override -// function first, before consulting the defined refids at the bottom. If -// the override function indicates an override did not occur, the refids at -// the bottom will be used to fill in the link details. -func WithRefOverride(o ReferenceOverrideFunc) Option { - return func(p *Markdown) { - p.referenceOverride = o - } -} - -// Run is the main entry point to Blackfriday. It parses and renders a -// block of markdown-encoded text. -// -// The simplest invocation of Run takes one argument, input: -// output := Run(input) -// This will parse the input with CommonExtensions enabled and render it with -// the default HTMLRenderer (with CommonHTMLFlags). -// -// Variadic arguments opts can customize the default behavior. Since Markdown -// type does not contain exported fields, you can not use it directly. Instead, -// use the With* functions. For example, this will call the most basic -// functionality, with no extensions: -// output := Run(input, WithNoExtensions()) -// -// You can use any number of With* arguments, even contradicting ones. They -// will be applied in order of appearance and the latter will override the -// former: -// output := Run(input, WithNoExtensions(), WithExtensions(exts), -// WithRenderer(yourRenderer)) -func Run(input []byte, opts ...Option) []byte { - r := NewHTMLRenderer(HTMLRendererParameters{ - Flags: CommonHTMLFlags, - }) - optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} - optList = append(optList, opts...) - parser := New(optList...) - ast := parser.Parse(input) - var buf bytes.Buffer - parser.renderer.RenderHeader(&buf, ast) - ast.Walk(func(node *Node, entering bool) WalkStatus { - return parser.renderer.RenderNode(&buf, node, entering) - }) - parser.renderer.RenderFooter(&buf, ast) - return buf.Bytes() -} - -// Parse is an entry point to the parsing part of Blackfriday. It takes an -// input markdown document and produces a syntax tree for its contents. This -// tree can then be rendered with a default or custom renderer, or -// analyzed/transformed by the caller to whatever non-standard needs they have. -// The return value is the root node of the syntax tree. -func (p *Markdown) Parse(input []byte) *Node { - p.block(input) - // Walk the tree and finish up some of unfinished blocks - for p.tip != nil { - p.finalize(p.tip) - } - // Walk the tree again and process inline markdown in each block - p.doc.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) - p.parseRefsToAST() - return p.doc -} - -func (p *Markdown) parseRefsToAST() { - if p.extensions&Footnotes == 0 || len(p.notes) == 0 { - return - } - p.tip = p.doc - block := p.addBlock(List, nil) - block.IsFootnotesList = true - block.ListFlags = ListTypeOrdered - flags := ListItemBeginningOfList - // Note: this loop is intentionally explicit, not range-form. This is - // because the body of the loop will append nested footnotes to p.notes and - // we need to process those late additions. Range form would only walk over - // the fixed initial set. - for i := 0; i < len(p.notes); i++ { - ref := p.notes[i] - p.addExistingChild(ref.footnote, 0) - block := ref.footnote - block.ListFlags = flags | ListTypeOrdered - block.RefLink = ref.link - if ref.hasBlock { - flags |= ListItemContainsBlock - p.block(ref.title) - } else { - p.inline(block, ref.title) - } - flags &^= ListItemBeginningOfList | ListItemContainsBlock - } - above := block.Parent - finalizeList(block) - p.tip = above - block.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) -} - -// -// Link references -// -// This section implements support for references that (usually) appear -// as footnotes in a document, and can be referenced anywhere in the document. -// The basic format is: -// -// [1]: http://www.google.com/ "Google" -// [2]: http://www.github.com/ "Github" -// -// Anywhere in the document, the reference can be linked by referring to its -// label, i.e., 1 and 2 in this example, as in: -// -// This library is hosted on [Github][2], a git hosting site. -// -// Actual footnotes as specified in Pandoc and supported by some other Markdown -// libraries such as php-markdown are also taken care of. They look like this: -// -// This sentence needs a bit of further explanation.[^note] -// -// [^note]: This is the explanation. -// -// Footnotes should be placed at the end of the document in an ordered list. -// Finally, there are inline footnotes such as: -// -// Inline footnotes^[Also supported.] provide a quick inline explanation, -// but are rendered at the bottom of the document. -// - -// reference holds all information necessary for a reference-style links or -// footnotes. -// -// Consider this markdown with reference-style links: -// -// [link][ref] -// -// [ref]: /url/ "tooltip title" -// -// It will be ultimately converted to this HTML: -// -//

    link

    -// -// And a reference structure will be populated as follows: -// -// p.refs["ref"] = &reference{ -// link: "/url/", -// title: "tooltip title", -// } -// -// Alternatively, reference can contain information about a footnote. Consider -// this markdown: -// -// Text needing a footnote.[^a] -// -// [^a]: This is the note -// -// A reference structure will be populated as follows: -// -// p.refs["a"] = &reference{ -// link: "a", -// title: "This is the note", -// noteID: , -// } -// -// TODO: As you can see, it begs for splitting into two dedicated structures -// for refs and for footnotes. -type reference struct { - link []byte - title []byte - noteID int // 0 if not a footnote ref - hasBlock bool - footnote *Node // a link to the Item node within a list of footnotes - - text []byte // only gets populated by refOverride feature with Reference.Text -} - -func (r *reference) String() string { - return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", - r.link, r.title, r.text, r.noteID, r.hasBlock) -} - -// Check whether or not data starts with a reference link. -// If so, it is parsed and stored in the list of references -// (in the render struct). -// Returns the number of bytes to skip to move past it, -// or zero if the first line is not a reference. -func isReference(p *Markdown, data []byte, tabSize int) int { - // up to 3 optional leading spaces - if len(data) < 4 { - return 0 - } - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - - noteID := 0 - - // id part: anything but a newline between brackets - if data[i] != '[' { - return 0 - } - i++ - if p.extensions&Footnotes != 0 { - if i < len(data) && data[i] == '^' { - // we can set it to anything here because the proper noteIds will - // be assigned later during the second pass. It just has to be != 0 - noteID = 1 - i++ - } - } - idOffset := i - for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { - i++ - } - if i >= len(data) || data[i] != ']' { - return 0 - } - idEnd := i - // footnotes can have empty ID, like this: [^], but a reference can not be - // empty like this: []. Break early if it's not a footnote and there's no ID - if noteID == 0 && idOffset == idEnd { - return 0 - } - // spacer: colon (space | tab)* newline? (space | tab)* - i++ - if i >= len(data) || data[i] != ':' { - return 0 - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && (data[i] == '\n' || data[i] == '\r') { - i++ - if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { - i++ - } - } - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i >= len(data) { - return 0 - } - - var ( - linkOffset, linkEnd int - titleOffset, titleEnd int - lineEnd int - raw []byte - hasBlock bool - ) - - if p.extensions&Footnotes != 0 && noteID != 0 { - linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) - lineEnd = linkEnd - } else { - linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) - } - if lineEnd == 0 { - return 0 - } - - // a valid ref has been found - - ref := &reference{ - noteID: noteID, - hasBlock: hasBlock, - } - - if noteID > 0 { - // reusing the link field for the id since footnotes don't have links - ref.link = data[idOffset:idEnd] - // if footnote, it's not really a title, it's the contained text - ref.title = raw - } else { - ref.link = data[linkOffset:linkEnd] - ref.title = data[titleOffset:titleEnd] - } - - // id matches are case-insensitive - id := string(bytes.ToLower(data[idOffset:idEnd])) - - p.refs[id] = ref - - return lineEnd -} - -func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { - // link: whitespace-free sequence, optionally between angle brackets - if data[i] == '<' { - i++ - } - linkOffset = i - for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { - i++ - } - linkEnd = i - if data[linkOffset] == '<' && data[linkEnd-1] == '>' { - linkOffset++ - linkEnd-- - } - - // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { - return - } - - // compute end-of-line - if i >= len(data) || data[i] == '\r' || data[i] == '\n' { - lineEnd = i - } - if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { - lineEnd++ - } - - // optional (space|tab)* spacer after a newline - if lineEnd > 0 { - i = lineEnd + 1 - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - } - - // optional title: any non-newline sequence enclosed in '"() alone on its line - if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { - i++ - titleOffset = i - - // look for EOL - for i < len(data) && data[i] != '\n' && data[i] != '\r' { - i++ - } - if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { - titleEnd = i + 1 - } else { - titleEnd = i - } - - // step back - i-- - for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { - i-- - } - if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { - lineEnd = titleEnd - titleEnd = i - } - } - - return -} - -// The first bit of this logic is the same as Parser.listItem, but the rest -// is much simpler. This function simply finds the entire block and shifts it -// over by one tab if it is indeed a block (just returns the line if it's not). -// blockEnd is the end of the section in the input buffer, and contents is the -// extracted text that was shifted over one tab. It will need to be rendered at -// the end of the document. -func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { - if i == 0 || len(data) == 0 { - return - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - blockStart = i - - // find the end of the line - blockEnd = i - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[blockEnd:i]) - blockEnd = i - - // process the following lines - containsBlankLine := false - -gatherLines: - for blockEnd < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[blockEnd:i]) > 0 { - containsBlankLine = true - blockEnd = i - continue - } - - n := 0 - if n = isIndented(data[blockEnd:i], indentSize); n == 0 { - // this is the end of the block. - // we don't want to include this last line in the index. - break gatherLines - } - - // if there were blank lines before this one, insert a new one now - if containsBlankLine { - raw.WriteByte('\n') - containsBlankLine = false - } - - // get rid of that first tab, write to buffer - raw.Write(data[blockEnd+n : i]) - hasBlock = true - - blockEnd = i - } - - if data[blockEnd-1] != '\n' { - raw.WriteByte('\n') - } - - contents = raw.Bytes() - - return -} - -// -// -// Miscellaneous helper functions -// -// - -// Test if a character is a punctuation symbol. -// Taken from a private function in regexp in the stdlib. -func ispunct(c byte) bool { - for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { - if c == r { - return true - } - } - return false -} - -// Test if a character is a whitespace character. -func isspace(c byte) bool { - return ishorizontalspace(c) || isverticalspace(c) -} - -// Test if a character is a horizontal whitespace character. -func ishorizontalspace(c byte) bool { - return c == ' ' || c == '\t' -} - -// Test if a character is a vertical character. -func isverticalspace(c byte) bool { - return c == '\n' || c == '\r' || c == '\f' || c == '\v' -} - -// Test if a character is letter. -func isletter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// Test if a character is a letter or a digit. -// TODO: check when this is looking for ASCII alnum and when it should use unicode -func isalnum(c byte) bool { - return (c >= '0' && c <= '9') || isletter(c) -} - -// Replace tab characters with spaces, aligning to the next TAB_SIZE column. -// always ends output with a newline -func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { - // first, check for common cases: no tabs, or only tabs at beginning of line - i, prefix := 0, 0 - slowcase := false - for i = 0; i < len(line); i++ { - if line[i] == '\t' { - if prefix == i { - prefix++ - } else { - slowcase = true - break - } - } - } - - // no need to decode runes if all tabs are at the beginning of the line - if !slowcase { - for i = 0; i < prefix*tabSize; i++ { - out.WriteByte(' ') - } - out.Write(line[prefix:]) - return - } - - // the slow case: we need to count runes to figure out how - // many spaces to insert for each tab - column := 0 - i = 0 - for i < len(line) { - start := i - for i < len(line) && line[i] != '\t' { - _, size := utf8.DecodeRune(line[i:]) - i += size - column++ - } - - if i > start { - out.Write(line[start:i]) - } - - if i >= len(line) { - break - } - - for { - out.WriteByte(' ') - column++ - if column%tabSize == 0 { - break - } - } - - i++ - } -} - -// Find if a line counts as indented or not. -// Returns number of characters the indent is (0 = not indented). -func isIndented(data []byte, indentSize int) int { - if len(data) == 0 { - return 0 - } - if data[0] == '\t' { - return 1 - } - if len(data) < indentSize { - return 0 - } - for i := 0; i < indentSize; i++ { - if data[i] != ' ' { - return 0 - } - } - return indentSize -} - -// Create a url-safe slug for fragments -func slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if isalnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go deleted file mode 100644 index 51b9e8c1..00000000 --- a/vendor/github.com/russross/blackfriday/v2/node.go +++ /dev/null @@ -1,354 +0,0 @@ -package blackfriday - -import ( - "bytes" - "fmt" -) - -// NodeType specifies a type of a single node of a syntax tree. Usually one -// node (and its type) corresponds to a single markdown feature, e.g. emphasis -// or code block. -type NodeType int - -// Constants for identifying different types of nodes. See NodeType. -const ( - Document NodeType = iota - BlockQuote - List - Item - Paragraph - Heading - HorizontalRule - Emph - Strong - Del - Link - Image - Text - HTMLBlock - CodeBlock - Softbreak - Hardbreak - Code - HTMLSpan - Table - TableCell - TableHead - TableBody - TableRow -) - -var nodeTypeNames = []string{ - Document: "Document", - BlockQuote: "BlockQuote", - List: "List", - Item: "Item", - Paragraph: "Paragraph", - Heading: "Heading", - HorizontalRule: "HorizontalRule", - Emph: "Emph", - Strong: "Strong", - Del: "Del", - Link: "Link", - Image: "Image", - Text: "Text", - HTMLBlock: "HTMLBlock", - CodeBlock: "CodeBlock", - Softbreak: "Softbreak", - Hardbreak: "Hardbreak", - Code: "Code", - HTMLSpan: "HTMLSpan", - Table: "Table", - TableCell: "TableCell", - TableHead: "TableHead", - TableBody: "TableBody", - TableRow: "TableRow", -} - -func (t NodeType) String() string { - return nodeTypeNames[t] -} - -// ListData contains fields relevant to a List and Item node type. -type ListData struct { - ListFlags ListType - Tight bool // Skip

    s around list item data if true - BulletChar byte // '*', '+' or '-' in bullet lists - Delimiter byte // '.' or ')' after the number in ordered lists - RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering - IsFootnotesList bool // This is a list of footnotes -} - -// LinkData contains fields relevant to a Link node type. -type LinkData struct { - Destination []byte // Destination is what goes into a href - Title []byte // Title is the tooltip thing that goes in a title attribute - NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote - Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. -} - -// CodeBlockData contains fields relevant to a CodeBlock node type. -type CodeBlockData struct { - IsFenced bool // Specifies whether it's a fenced code block or an indented one - Info []byte // This holds the info string - FenceChar byte - FenceLength int - FenceOffset int -} - -// TableCellData contains fields relevant to a TableCell node type. -type TableCellData struct { - IsHeader bool // This tells if it's under the header row - Align CellAlignFlags // This holds the value for align attribute -} - -// HeadingData contains fields relevant to a Heading node type. -type HeadingData struct { - Level int // This holds the heading level number - HeadingID string // This might hold heading ID, if present - IsTitleblock bool // Specifies whether it's a title block -} - -// Node is a single element in the abstract syntax tree of the parsed document. -// It holds connections to the structurally neighboring nodes and, for certain -// types of nodes, additional information that might be needed when rendering. -type Node struct { - Type NodeType // Determines the type of the node - Parent *Node // Points to the parent - FirstChild *Node // Points to the first child, if any - LastChild *Node // Points to the last child, if any - Prev *Node // Previous sibling; nil if it's the first child - Next *Node // Next sibling; nil if it's the last child - - Literal []byte // Text contents of the leaf nodes - - HeadingData // Populated if Type is Heading - ListData // Populated if Type is List - CodeBlockData // Populated if Type is CodeBlock - LinkData // Populated if Type is Link - TableCellData // Populated if Type is TableCell - - content []byte // Markdown content of the block nodes - open bool // Specifies an open block node that has not been finished to process yet -} - -// NewNode allocates a node of a specified type. -func NewNode(typ NodeType) *Node { - return &Node{ - Type: typ, - open: true, - } -} - -func (n *Node) String() string { - ellipsis := "" - snippet := n.Literal - if len(snippet) > 16 { - snippet = snippet[:16] - ellipsis = "..." - } - return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) -} - -// Unlink removes node 'n' from the tree. -// It panics if the node is nil. -func (n *Node) Unlink() { - if n.Prev != nil { - n.Prev.Next = n.Next - } else if n.Parent != nil { - n.Parent.FirstChild = n.Next - } - if n.Next != nil { - n.Next.Prev = n.Prev - } else if n.Parent != nil { - n.Parent.LastChild = n.Prev - } - n.Parent = nil - n.Next = nil - n.Prev = nil -} - -// AppendChild adds a node 'child' as a child of 'n'. -// It panics if either node is nil. -func (n *Node) AppendChild(child *Node) { - child.Unlink() - child.Parent = n - if n.LastChild != nil { - n.LastChild.Next = child - child.Prev = n.LastChild - n.LastChild = child - } else { - n.FirstChild = child - n.LastChild = child - } -} - -// InsertBefore inserts 'sibling' immediately before 'n'. -// It panics if either node is nil. -func (n *Node) InsertBefore(sibling *Node) { - sibling.Unlink() - sibling.Prev = n.Prev - if sibling.Prev != nil { - sibling.Prev.Next = sibling - } - sibling.Next = n - n.Prev = sibling - sibling.Parent = n.Parent - if sibling.Prev == nil { - sibling.Parent.FirstChild = sibling - } -} - -func (n *Node) isContainer() bool { - switch n.Type { - case Document: - fallthrough - case BlockQuote: - fallthrough - case List: - fallthrough - case Item: - fallthrough - case Paragraph: - fallthrough - case Heading: - fallthrough - case Emph: - fallthrough - case Strong: - fallthrough - case Del: - fallthrough - case Link: - fallthrough - case Image: - fallthrough - case Table: - fallthrough - case TableHead: - fallthrough - case TableBody: - fallthrough - case TableRow: - fallthrough - case TableCell: - return true - default: - return false - } -} - -func (n *Node) canContain(t NodeType) bool { - if n.Type == List { - return t == Item - } - if n.Type == Document || n.Type == BlockQuote || n.Type == Item { - return t != Item - } - if n.Type == Table { - return t == TableHead || t == TableBody - } - if n.Type == TableHead || n.Type == TableBody { - return t == TableRow - } - if n.Type == TableRow { - return t == TableCell - } - return false -} - -// WalkStatus allows NodeVisitor to have some control over the tree traversal. -// It is returned from NodeVisitor and different values allow Node.Walk to -// decide which node to go to next. -type WalkStatus int - -const ( - // GoToNext is the default traversal of every node. - GoToNext WalkStatus = iota - // SkipChildren tells walker to skip all children of current node. - SkipChildren - // Terminate tells walker to terminate the traversal. - Terminate -) - -// NodeVisitor is a callback to be called when traversing the syntax tree. -// Called twice for every node: once with entering=true when the branch is -// first visited, then with entering=false after all the children are done. -type NodeVisitor func(node *Node, entering bool) WalkStatus - -// Walk is a convenience method that instantiates a walker and starts a -// traversal of subtree rooted at n. -func (n *Node) Walk(visitor NodeVisitor) { - w := newNodeWalker(n) - for w.current != nil { - status := visitor(w.current, w.entering) - switch status { - case GoToNext: - w.next() - case SkipChildren: - w.entering = false - w.next() - case Terminate: - return - } - } -} - -type nodeWalker struct { - current *Node - root *Node - entering bool -} - -func newNodeWalker(root *Node) *nodeWalker { - return &nodeWalker{ - current: root, - root: root, - entering: true, - } -} - -func (nw *nodeWalker) next() { - if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root { - nw.current = nil - return - } - if nw.entering && nw.current.isContainer() { - if nw.current.FirstChild != nil { - nw.current = nw.current.FirstChild - nw.entering = true - } else { - nw.entering = false - } - } else if nw.current.Next == nil { - nw.current = nw.current.Parent - nw.entering = false - } else { - nw.current = nw.current.Next - nw.entering = true - } -} - -func dump(ast *Node) { - fmt.Println(dumpString(ast)) -} - -func dumpR(ast *Node, depth int) string { - if ast == nil { - return "" - } - indent := bytes.Repeat([]byte("\t"), depth) - content := ast.Literal - if content == nil { - content = ast.content - } - result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) - for n := ast.FirstChild; n != nil; n = n.Next { - result += dumpR(n, depth+1) - } - return result -} - -func dumpString(ast *Node) string { - return dumpR(ast, 0) -} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go deleted file mode 100644 index 3a220e94..00000000 --- a/vendor/github.com/russross/blackfriday/v2/smartypants.go +++ /dev/null @@ -1,457 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// SmartyPants rendering -// -// - -package blackfriday - -import ( - "bytes" - "io" -) - -// SPRenderer is a struct containing state of a Smartypants renderer. -type SPRenderer struct { - inSingleQuote bool - inDoubleQuote bool - callbacks [256]smartCallback -} - -func wordBoundary(c byte) bool { - return c == 0 || isspace(c) || ispunct(c) -} - -func tolower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c - 'A' + 'a' - } - return c -} - -func isdigit(c byte) bool { - return c >= '0' && c <= '9' -} - -func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { - // edge of the buffer is likely to be a tag that we don't get to see, - // so we treat it like text sometimes - - // enumerate all sixteen possibilities for (previousChar, nextChar) - // each can be one of {0, space, punct, other} - switch { - case previousChar == 0 && nextChar == 0: - // context is not any help here, so toggle - *isOpen = !*isOpen - case isspace(previousChar) && nextChar == 0: - // [ "] might be [ "foo...] - *isOpen = true - case ispunct(previousChar) && nextChar == 0: - // [!"] hmm... could be [Run!"] or [("...] - *isOpen = false - case /* isnormal(previousChar) && */ nextChar == 0: - // [a"] is probably a close - *isOpen = false - case previousChar == 0 && isspace(nextChar): - // [" ] might be [...foo" ] - *isOpen = false - case isspace(previousChar) && isspace(nextChar): - // [ " ] context is not any help here, so toggle - *isOpen = !*isOpen - case ispunct(previousChar) && isspace(nextChar): - // [!" ] is probably a close - *isOpen = false - case /* isnormal(previousChar) && */ isspace(nextChar): - // [a" ] this is one of the easy cases - *isOpen = false - case previousChar == 0 && ispunct(nextChar): - // ["!] hmm... could be ["$1.95] or ["!...] - *isOpen = false - case isspace(previousChar) && ispunct(nextChar): - // [ "!] looks more like [ "$1.95] - *isOpen = true - case ispunct(previousChar) && ispunct(nextChar): - // [!"!] context is not any help here, so toggle - *isOpen = !*isOpen - case /* isnormal(previousChar) && */ ispunct(nextChar): - // [a"!] is probably a close - *isOpen = false - case previousChar == 0 /* && isnormal(nextChar) */ : - // ["a] is probably an open - *isOpen = true - case isspace(previousChar) /* && isnormal(nextChar) */ : - // [ "a] this is one of the easy cases - *isOpen = true - case ispunct(previousChar) /* && isnormal(nextChar) */ : - // [!"a] is probably an open - *isOpen = true - default: - // [a'b] maybe a contraction? - *isOpen = false - } - - // Note that with the limited lookahead, this non-breaking - // space will also be appended to single double quotes. - if addNBSP && !*isOpen { - out.WriteString(" ") - } - - out.WriteByte('&') - if *isOpen { - out.WriteByte('l') - } else { - out.WriteByte('r') - } - out.WriteByte(quote) - out.WriteString("quo;") - - if addNBSP && *isOpen { - out.WriteString(" ") - } - - return true -} - -func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - t1 := tolower(text[1]) - - if t1 == '\'' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { - out.WriteString("’") - return 0 - } - - if len(text) >= 3 { - t2 := tolower(text[2]) - - if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && - (len(text) < 4 || wordBoundary(text[3])) { - out.WriteString("’") - return 0 - } - } - } - - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { - return 0 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 { - t1 := tolower(text[1]) - t2 := tolower(text[2]) - - if t1 == 'c' && t2 == ')' { - out.WriteString("©") - return 2 - } - - if t1 == 'r' && t2 == ')' { - out.WriteString("®") - return 2 - } - - if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { - out.WriteString("™") - return 3 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - if text[1] == '-' { - out.WriteString("—") - return 1 - } - - if wordBoundary(previousChar) && wordBoundary(text[1]) { - out.WriteString("–") - return 0 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '-' && text[2] == '-' { - out.WriteString("—") - return 2 - } - if len(text) >= 2 && text[1] == '-' { - out.WriteString("–") - return 1 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { - if bytes.HasPrefix(text, []byte(""")) { - nextChar := byte(0) - if len(text) >= 7 { - nextChar = text[6] - } - if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { - return 5 - } - } - - if bytes.HasPrefix(text, []byte("�")) { - return 3 - } - - out.WriteByte('&') - return 0 -} - -func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { - var quote byte = 'd' - if angledQuotes { - quote = 'a' - } - - return func(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) - } -} - -func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '.' && text[2] == '.' { - out.WriteString("…") - return 2 - } - - if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { - out.WriteString("…") - return 4 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 && text[1] == '`' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b - // note: check for regular slash (/) or fraction slash (â„, 0x2044, or 0xe2 81 84 in utf-8) - // and avoid changing dates like 1/23/2005 into fractions. - numEnd := 0 - for len(text) > numEnd && isdigit(text[numEnd]) { - numEnd++ - } - if numEnd == 0 { - out.WriteByte(text[0]) - return 0 - } - denStart := numEnd + 1 - if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { - denStart = numEnd + 3 - } else if len(text) < numEnd+2 || text[numEnd] != '/' { - out.WriteByte(text[0]) - return 0 - } - denEnd := denStart - for len(text) > denEnd && isdigit(text[denEnd]) { - denEnd++ - } - if denEnd == denStart { - out.WriteByte(text[0]) - return 0 - } - if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { - out.WriteString("") - out.Write(text[:numEnd]) - out.WriteString("") - out.Write(text[denStart:denEnd]) - out.WriteString("") - return denEnd - 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - if text[0] == '1' && text[1] == '/' && text[2] == '2' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { - out.WriteString("½") - return 2 - } - } - - if text[0] == '1' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { - out.WriteString("¼") - return 2 - } - } - - if text[0] == '3' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { - out.WriteString("¾") - return 2 - } - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { - out.WriteString(""") - } - - return 0 -} - -func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') -} - -func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') -} - -func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { - i := 0 - - for i < len(text) && text[i] != '>' { - i++ - } - - out.Write(text[:i+1]) - return i -} - -type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int - -// NewSmartypantsRenderer constructs a Smartypants renderer object. -func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { - var ( - r SPRenderer - - smartAmpAngled = r.smartAmp(true, false) - smartAmpAngledNBSP = r.smartAmp(true, true) - smartAmpRegular = r.smartAmp(false, false) - smartAmpRegularNBSP = r.smartAmp(false, true) - - addNBSP = flags&SmartypantsQuotesNBSP != 0 - ) - - if flags&SmartypantsAngledQuotes == 0 { - r.callbacks['"'] = r.smartDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpRegular - } else { - r.callbacks['&'] = smartAmpRegularNBSP - } - } else { - r.callbacks['"'] = r.smartAngledDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpAngled - } else { - r.callbacks['&'] = smartAmpAngledNBSP - } - } - r.callbacks['\''] = r.smartSingleQuote - r.callbacks['('] = r.smartParens - if flags&SmartypantsDashes != 0 { - if flags&SmartypantsLatexDashes == 0 { - r.callbacks['-'] = r.smartDash - } else { - r.callbacks['-'] = r.smartDashLatex - } - } - r.callbacks['.'] = r.smartPeriod - if flags&SmartypantsFractions == 0 { - r.callbacks['1'] = r.smartNumber - r.callbacks['3'] = r.smartNumber - } else { - for ch := '1'; ch <= '9'; ch++ { - r.callbacks[ch] = r.smartNumberGeneric - } - } - r.callbacks['<'] = r.smartLeftAngle - r.callbacks['`'] = r.smartBacktick - return &r -} - -// Process is the entry point of the Smartypants renderer. -func (r *SPRenderer) Process(w io.Writer, text []byte) { - mark := 0 - for i := 0; i < len(text); i++ { - if action := r.callbacks[text[i]]; action != nil { - if i > mark { - w.Write(text[mark:i]) - } - previousChar := byte(0) - if i > 0 { - previousChar = text[i-1] - } - var tmp bytes.Buffer - i += action(&tmp, previousChar, text[i:]) - w.Write(tmp.Bytes()) - mark = i + 1 - } - } - if mark < len(text) { - w.Write(text[mark:]) - } -} diff --git a/vendor/github.com/samfoo/ansi/.gitignore b/vendor/github.com/samfoo/ansi/.gitignore deleted file mode 100644 index 9ed3b07c..00000000 --- a/vendor/github.com/samfoo/ansi/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.test diff --git a/vendor/github.com/samfoo/ansi/LICENSE b/vendor/github.com/samfoo/ansi/LICENSE deleted file mode 100644 index 06ce0c3b..00000000 --- a/vendor/github.com/samfoo/ansi/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2013 Mario L. Gutierrez - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/samfoo/ansi/README.md b/vendor/github.com/samfoo/ansi/README.md deleted file mode 100644 index 5fb3f222..00000000 --- a/vendor/github.com/samfoo/ansi/README.md +++ /dev/null @@ -1,119 +0,0 @@ -# ansi - -Package ansi is a small, fast library to create ANSI colored strings and codes. - -## Install - -This install the color viewer and the package itself - -```sh -go get -u github.com/mgutz/ansi/cmd/ansi-mgutz -``` - -## Example - -```go -import "github.com/mgutz/ansi" - -// colorize a string, SLOW -msg := ansi.Color("foo", "red+b:white") - -// create a closure to avoid recalculating ANSI code compilation -phosphorize := ansi.ColorFunc("green+h:black") -msg = phosphorize("Bring back the 80s!") -msg2 := phospohorize("Look, I'm a CRT!") - -// cache escape codes and build strings manually -lime := ansi.ColorCode("green+h:black") -reset := ansi.ColorCode("reset") - -fmt.Println(lime, "Bring back the 80s!", reset) -``` - -Other examples - -```go -Color(s, "red") // red -Color(s, "red+b") // red bold -Color(s, "red+B") // red blinking -Color(s, "red+u") // red underline -Color(s, "red+bh") // red bold bright -Color(s, "red:white") // red on white -Color(s, "red+b:white+h") // red bold on white bright -Color(s, "red+B:white+h") // red blink on white bright -Color(s, "off") // turn off ansi codes -``` - -To view color combinations, from terminal. - -```sh -ansi-mgutz -``` - -## Style format - -```go -"foregroundColor+attributes:backgroundColor+attributes" -``` - -Colors - -* black -* red -* green -* yellow -* blue -* magenta -* cyan -* white -* 0...255 (256 colors) - -Attributes - -* b = bold foreground -* B = Blink foreground -* u = underline foreground -* i = inverse -* h = high intensity (bright) foreground, background - - does not work with 256 colors - -## Constants - -* ansi.Reset -* ansi.DefaultBG -* ansi.DefaultFG -* ansi.Black -* ansi.Red -* ansi.Green -* ansi.Yellow -* ansi.Blue -* ansi.Magenta -* ansi.Cyan -* ansi.White -* ansi.LightBlack -* ansi.LightRed -* ansi.LightGreen -* ansi.LightYellow -* ansi.LightBlue -* ansi.LightMagenta -* ansi.LightCyan -* ansi.LightWhite - - -## References - -Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) - -General [tips and formatting](http://misc.flogisoft.com/bash/tip_colors_and_formatting) - -What about support on Windows? Use [colorable by mattn](https://github.com/mattn/go-colorable). -Ansi and colorable are used by [logxi](https://github.com/mgutz/logxi) to support logging in -color on Windows. - -## MIT License - -Copyright (c) 2013 Mario Gutierrez mario@mgutz.com - -See the file LICENSE for copying permission. - diff --git a/vendor/github.com/samfoo/ansi/ansi.go b/vendor/github.com/samfoo/ansi/ansi.go deleted file mode 100644 index 6dbafdc9..00000000 --- a/vendor/github.com/samfoo/ansi/ansi.go +++ /dev/null @@ -1,251 +0,0 @@ -package ansi - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -const ( - black = iota - red - green - yellow - blue - magenta - cyan - white - defaultt = 9 - - normalIntensityFG = 30 - highIntensityFG = 90 - normalIntensityBG = 40 - highIntensityBG = 100 - - start = "\033[" - bold = "1;" - strikethrough = "9;" - blink = "5;" - underline = "4;" - inverse = "7;" - - // Reset is the ANSI reset escape sequence - Reset = "\033[0m" - // DefaultBG is the default background - DefaultBG = "\033[49m" - // DefaultFG is the default foreground - DefaultFG = "\033[39m" -) - -// Black FG -var Black string - -// Red FG -var Red string - -// Green FG -var Green string - -// Yellow FG -var Yellow string - -// Blue FG -var Blue string - -// Magenta FG -var Magenta string - -// Cyan FG -var Cyan string - -// White FG -var White string - -// LightBlack FG -var LightBlack string - -// LightRed FG -var LightRed string - -// LightGreen FG -var LightGreen string - -// LightYellow FG -var LightYellow string - -// LightBlue FG -var LightBlue string - -// LightMagenta FG -var LightMagenta string - -// LightCyan FG -var LightCyan string - -// LightWhite FG -var LightWhite string - -var ( - plain = false - // Colors maps common color names to their ANSI color code. - Colors = map[string]int{ - "black": black, - "red": red, - "green": green, - "yellow": yellow, - "blue": blue, - "magenta": magenta, - "cyan": cyan, - "white": white, - "default": defaultt, - } -) - -func init() { - for i := 0; i < 256; i++ { - Colors[strconv.Itoa(i)] = i - } - - Black = ColorCode("black") - Red = ColorCode("red") - Green = ColorCode("green") - Yellow = ColorCode("yellow") - Blue = ColorCode("blue") - Magenta = ColorCode("magenta") - Cyan = ColorCode("cyan") - White = ColorCode("white") - LightBlack = ColorCode("black+h") - LightRed = ColorCode("red+h") - LightGreen = ColorCode("green+h") - LightYellow = ColorCode("yellow+h") - LightBlue = ColorCode("blue+h") - LightMagenta = ColorCode("magenta+h") - LightCyan = ColorCode("cyan+h") - LightWhite = ColorCode("white+h") -} - -// ColorCode returns the ANSI color color code for style. -func ColorCode(style string) string { - return colorCode(style).String() -} - -// Gets the ANSI color code for a style. -func colorCode(style string) *bytes.Buffer { - buf := bytes.NewBufferString("") - if plain || style == "" { - return buf - } - if style == "reset" { - buf.WriteString(Reset) - return buf - } else if style == "off" { - return buf - } - - foregroundBackground := strings.Split(style, ":") - foreground := strings.Split(foregroundBackground[0], "+") - fgKey := foreground[0] - fg := Colors[fgKey] - fgStyle := "" - if len(foreground) > 1 { - fgStyle = foreground[1] - } - - bg, bgStyle := "", "" - - if len(foregroundBackground) > 1 { - background := strings.Split(foregroundBackground[1], "+") - bg = background[0] - if len(background) > 1 { - bgStyle = background[1] - } - } - - buf.WriteString(start) - base := normalIntensityFG - if len(fgStyle) > 0 { - if strings.Contains(fgStyle, "s") { - buf.WriteString(strikethrough) - } - - if strings.Contains(fgStyle, "b") { - buf.WriteString(bold) - } - if strings.Contains(fgStyle, "B") { - buf.WriteString(blink) - } - if strings.Contains(fgStyle, "u") { - buf.WriteString(underline) - } - if strings.Contains(fgStyle, "i") { - buf.WriteString(inverse) - } - if strings.Contains(fgStyle, "h") { - base = highIntensityFG - } - } - - // if 256-color - n, err := strconv.Atoi(fgKey) - if err == nil { - fmt.Fprintf(buf, "38;5;%d;", n) - } else { - fmt.Fprintf(buf, "%d;", base+fg) - } - - base = normalIntensityBG - if len(bg) > 0 { - if strings.Contains(bgStyle, "h") { - base = highIntensityBG - } - // if 256-color - n, err := strconv.Atoi(bg) - if err == nil { - fmt.Fprintf(buf, "48;5;%d;", n) - } else { - fmt.Fprintf(buf, "%d;", base+Colors[bg]) - } - } - - // remove last ";" - buf.Truncate(buf.Len() - 1) - buf.WriteRune('m') - return buf -} - -// Color colors a string based on the ANSI color code for style. -func Color(s, style string) string { - if plain || len(style) < 1 { - return s - } - buf := colorCode(style) - buf.WriteString(s) - buf.WriteString(Reset) - return buf.String() -} - -// ColorFunc creates a closureto avoid ANSI color code calculation. -func ColorFunc(style string) func(string) string { - if style == "" { - return func(s string) string { - return s - } - } - color := ColorCode(style) - return func(s string) string { - if plain || s == "" { - return s - } - buf := bytes.NewBufferString(color) - buf.WriteString(s) - buf.WriteString(Reset) - result := buf.String() - return result - } -} - -// DisableColors disables ANSI color codes. On by default. -func DisableColors(disable bool) { - plain = disable -} diff --git a/vendor/github.com/samfoo/ansi/doc.go b/vendor/github.com/samfoo/ansi/doc.go deleted file mode 100644 index 43c217e1..00000000 --- a/vendor/github.com/samfoo/ansi/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package ansi is a small, fast library to create ANSI colored strings and codes. - -Installation - - # this installs the color viewer and the package - go get -u github.com/mgutz/ansi/cmd/ansi-mgutz - -Example - - // colorize a string, SLOW - msg := ansi.Color("foo", "red+b:white") - - // create a closure to avoid recalculating ANSI code compilation - phosphorize := ansi.ColorFunc("green+h:black") - msg = phosphorize("Bring back the 80s!") - msg2 := phospohorize("Look, I'm a CRT!") - - // cache escape codes and build strings manually - lime := ansi.ColorCode("green+h:black") - reset := ansi.ColorCode("reset") - - fmt.Println(lime, "Bring back the 80s!", reset) - -Other examples - - Color(s, "red") // red - Color(s, "red+b") // red bold - Color(s, "red+B") // red blinking - Color(s, "red+u") // red underline - Color(s, "red+bh") // red bold bright - Color(s, "red:white") // red on white - Color(s, "red+b:white+h") // red bold on white bright - Color(s, "red+B:white+h") // red blink on white bright - -To view color combinations, from terminal - - ansi-mgutz - -Style format - - "foregroundColor+attributes:backgroundColor+attributes" - -Colors - - black - red - green - yellow - blue - magenta - cyan - white - -Attributes - - b = bold foreground - B = Blink foreground - u = underline foreground - h = high intensity (bright) foreground, background - i = inverse - -Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) -*/ -package ansi diff --git a/vendor/github.com/samfoo/ansi/print.go b/vendor/github.com/samfoo/ansi/print.go deleted file mode 100644 index 044c9a68..00000000 --- a/vendor/github.com/samfoo/ansi/print.go +++ /dev/null @@ -1,42 +0,0 @@ -package ansi - -// PrintStyles prints all style combinations to the terminal. -func PrintStyles() { - oldPlain := plain - plain = false - - bgColors := []string{ - "", - ":black", - ":red", - ":green", - ":yellow", - ":blue", - ":magenta", - ":cyan", - ":white", - } - for fg := range Colors { - for _, bg := range bgColors { - println(padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg})) - println(padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"})) - println(padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"})) - } - } - plain = oldPlain -} - -func pad(s string, length int) string { - for len(s) < length { - s += " " - } - return s -} - -func padColor(s string, styles []string) string { - buffer := "" - for _, style := range styles { - buffer += Color(pad(s+style, 20), s+style) - } - return buffer -} diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore deleted file mode 100644 index 8a43ce9d..00000000 --- a/vendor/github.com/shopspring/decimal/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.git -*.swp - -# IntelliJ -.idea/ -*.iml diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml deleted file mode 100644 index 55d42b28..00000000 --- a/vendor/github.com/shopspring/decimal/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.7.x - - 1.12.x - - 1.13.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md deleted file mode 100644 index 01ba02fe..00000000 --- a/vendor/github.com/shopspring/decimal/CHANGELOG.md +++ /dev/null @@ -1,19 +0,0 @@ -## Decimal v1.2.0 - -#### BREAKING -- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) - -#### FEATURES -- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) -- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) -- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) - -#### ENHANCEMENTS -- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) -- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) -- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) -- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) - -#### BUGFIXES -- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) -- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE deleted file mode 100644 index ad2148aa..00000000 --- a/vendor/github.com/shopspring/decimal/LICENSE +++ /dev/null @@ -1,45 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Spring, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -- Based on https://github.com/oguzbilgic/fpd, which has the following license: -""" -The MIT License (MIT) - -Copyright (c) 2013 Oguz Bilgic - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -""" diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md deleted file mode 100644 index b70f9015..00000000 --- a/vendor/github.com/shopspring/decimal/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# decimal - -[![Build Status](https://travis-ci.org/shopspring/decimal.png?branch=master)](https://travis-ci.org/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) - -Arbitrary-precision fixed-point decimal numbers in go. - -_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point. - -## Features - - * The zero-value is 0, and is safe to use without initialization - * Addition, subtraction, multiplication with no loss of precision - * Division with specified precision - * Database/sql serialization/deserialization - * JSON and XML serialization/deserialization - -## Install - -Run `go get github.com/shopspring/decimal` - -## Requirements - -Decimal library requires Go version `>=1.7` - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/shopspring/decimal" -) - -func main() { - price, err := decimal.NewFromString("136.02") - if err != nil { - panic(err) - } - - quantity := decimal.NewFromInt(3) - - fee, _ := decimal.NewFromString(".035") - taxRate, _ := decimal.NewFromString(".08875") - - subtotal := price.Mul(quantity) - - preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) - - total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) - - fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06 - fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421 - fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375 - fmt.Println("Total:", total) // Total: 459.824961375 - fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 -} -``` - -## Documentation - -http://godoc.org/github.com/shopspring/decimal - -## Production Usage - -* [Spring](https://shopspring.com/), since August 14, 2014. -* If you are using this in production, please let us know! - -## FAQ - -#### Why don't you just use float64? - -Because float64 (or any binary floating point type, actually) can't represent -numbers such as `0.1` exactly. - -Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that -it prints out `10`, but it actually prints `9.999999999999831`. Over time, -these small errors can really add up! - -#### Why don't you just use big.Rat? - -big.Rat is fine for representing rational numbers, but Decimal is better for -representing money. Why? Here's a (contrived) example: - -Let's say you use big.Rat, and you have two numbers, x and y, both -representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one -out, the string output has to stop somewhere (let's say it stops at 3 decimal -digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did -the other 0.001 go? - -Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE - -With Decimal, the strings being printed out represent the number exactly. So, -if you have `x = y = 1/3` (with precision 3), they will actually be equal to -0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is -unaccounted for! - -You still have to be careful. If you want to split a number `N` 3 ways, you -can't just send `N/3` to three different people. You have to pick one to send -`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. - -But, it is much easier to be careful with Decimal than with big.Rat. - -#### Why isn't the API similar to big.Int's? - -big.Int's API is built to reduce the number of memory allocations for maximal -performance. This makes sense for its use-case, but the trade-off is that the -API is awkward and easy to misuse. - -For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A -developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This -modifies `a` and sets `z` as an alias for `a`, which they might not expect. It -also modifies any other aliases to `a`. - -Here's an example of the subtle bugs you can introduce with big.Int's API: -https://play.golang.org/p/x2R_78pa8r - -In contrast, it's difficult to make such mistakes with decimal. Decimals -behave like other go numbers types: even though `a = b` will not deep copy -`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods -return new Decimals and do not modify the originals. The downside is that -this causes extra allocations, so Decimal is less performant. My assumption -is that if you're using Decimals, you probably care more about correctness -than performance. - -## License - -The MIT License (MIT) - -This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go deleted file mode 100644 index 9958d690..00000000 --- a/vendor/github.com/shopspring/decimal/decimal-go.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -package decimal - -type decimal struct { - d [800]byte // digits, big-endian representation - nd int // number of digits used - dp int // decimal point - neg bool // negative flag - trunc bool // discarded nonzero digits beyond d[:nd] -} - -func (a *decimal) String() string { - n := 10 + a.nd - if a.dp > 0 { - n += a.dp - } - if a.dp < 0 { - n += -a.dp - } - - buf := make([]byte, n) - w := 0 - switch { - case a.nd == 0: - return "0" - - case a.dp <= 0: - // zeros fill space between decimal point and digits - buf[w] = '0' - w++ - buf[w] = '.' - w++ - w += digitZero(buf[w : w+-a.dp]) - w += copy(buf[w:], a.d[0:a.nd]) - - case a.dp < a.nd: - // decimal point in middle of digits - w += copy(buf[w:], a.d[0:a.dp]) - buf[w] = '.' - w++ - w += copy(buf[w:], a.d[a.dp:a.nd]) - - default: - // zeros fill space between digits and decimal point - w += copy(buf[w:], a.d[0:a.nd]) - w += digitZero(buf[w : w+a.dp-a.nd]) - } - return string(buf[0:w]) -} - -func digitZero(dst []byte) int { - for i := range dst { - dst[i] = '0' - } - return len(dst) -} - -// trim trailing zeros from number. -// (They are meaningless; the decimal point is tracked -// independent of the number of digits.) -func trim(a *decimal) { - for a.nd > 0 && a.d[a.nd-1] == '0' { - a.nd-- - } - if a.nd == 0 { - a.dp = 0 - } -} - -// Assign v to a. -func (a *decimal) Assign(v uint64) { - var buf [24]byte - - // Write reversed decimal in buf. - n := 0 - for v > 0 { - v1 := v / 10 - v -= 10 * v1 - buf[n] = byte(v + '0') - n++ - v = v1 - } - - // Reverse again to produce forward decimal in a.d. - a.nd = 0 - for n--; n >= 0; n-- { - a.d[a.nd] = buf[n] - a.nd++ - } - a.dp = a.nd - trim(a) -} - -// Maximum shift that we can do in one pass without overflow. -// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) -const maxShift = uintSize - 4 - -// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. -func rightShift(a *decimal, k uint) { - r := 0 // read pointer - w := 0 // write pointer - - // Pick up enough leading digits to cover first shift. - var n uint - for ; n>>k == 0; r++ { - if r >= a.nd { - if n == 0 { - // a == 0; shouldn't get here, but handle anyway. - a.nd = 0 - return - } - for n>>k == 0 { - n = n * 10 - r++ - } - break - } - c := uint(a.d[r]) - n = n*10 + c - '0' - } - a.dp -= r - 1 - - var mask uint = (1 << k) - 1 - - // Pick up a digit, put down a digit. - for ; r < a.nd; r++ { - c := uint(a.d[r]) - dig := n >> k - n &= mask - a.d[w] = byte(dig + '0') - w++ - n = n*10 + c - '0' - } - - // Put down extra digits. - for n > 0 { - dig := n >> k - n &= mask - if w < len(a.d) { - a.d[w] = byte(dig + '0') - w++ - } else if dig > 0 { - a.trunc = true - } - n = n * 10 - } - - a.nd = w - trim(a) -} - -// Cheat sheet for left shift: table indexed by shift count giving -// number of new digits that will be introduced by that shift. -// -// For example, leftcheats[4] = {2, "625"}. That means that -// if we are shifting by 4 (multiplying by 16), it will add 2 digits -// when the string prefix is "625" through "999", and one fewer digit -// if the string prefix is "000" through "624". -// -// Credit for this trick goes to Ken. - -type leftCheat struct { - delta int // number of new digits - cutoff string // minus one digit if original < a. -} - -var leftcheats = []leftCheat{ - // Leading digits of 1/2^i = 5^i. - // 5^23 is not an exact 64-bit floating point number, - // so have to use bc for the math. - // Go up to 60 to be large enough for 32bit and 64bit platforms. - /* - seq 60 | sed 's/^/5^/' | bc | - awk 'BEGIN{ print "\t{ 0, \"\" }," } - { - log2 = log(2)/log(10) - printf("\t{ %d, \"%s\" },\t// * %d\n", - int(log2*NR+1), $0, 2**NR) - }' - */ - {0, ""}, - {1, "5"}, // * 2 - {1, "25"}, // * 4 - {1, "125"}, // * 8 - {2, "625"}, // * 16 - {2, "3125"}, // * 32 - {2, "15625"}, // * 64 - {3, "78125"}, // * 128 - {3, "390625"}, // * 256 - {3, "1953125"}, // * 512 - {4, "9765625"}, // * 1024 - {4, "48828125"}, // * 2048 - {4, "244140625"}, // * 4096 - {4, "1220703125"}, // * 8192 - {5, "6103515625"}, // * 16384 - {5, "30517578125"}, // * 32768 - {5, "152587890625"}, // * 65536 - {6, "762939453125"}, // * 131072 - {6, "3814697265625"}, // * 262144 - {6, "19073486328125"}, // * 524288 - {7, "95367431640625"}, // * 1048576 - {7, "476837158203125"}, // * 2097152 - {7, "2384185791015625"}, // * 4194304 - {7, "11920928955078125"}, // * 8388608 - {8, "59604644775390625"}, // * 16777216 - {8, "298023223876953125"}, // * 33554432 - {8, "1490116119384765625"}, // * 67108864 - {9, "7450580596923828125"}, // * 134217728 - {9, "37252902984619140625"}, // * 268435456 - {9, "186264514923095703125"}, // * 536870912 - {10, "931322574615478515625"}, // * 1073741824 - {10, "4656612873077392578125"}, // * 2147483648 - {10, "23283064365386962890625"}, // * 4294967296 - {10, "116415321826934814453125"}, // * 8589934592 - {11, "582076609134674072265625"}, // * 17179869184 - {11, "2910383045673370361328125"}, // * 34359738368 - {11, "14551915228366851806640625"}, // * 68719476736 - {12, "72759576141834259033203125"}, // * 137438953472 - {12, "363797880709171295166015625"}, // * 274877906944 - {12, "1818989403545856475830078125"}, // * 549755813888 - {13, "9094947017729282379150390625"}, // * 1099511627776 - {13, "45474735088646411895751953125"}, // * 2199023255552 - {13, "227373675443232059478759765625"}, // * 4398046511104 - {13, "1136868377216160297393798828125"}, // * 8796093022208 - {14, "5684341886080801486968994140625"}, // * 17592186044416 - {14, "28421709430404007434844970703125"}, // * 35184372088832 - {14, "142108547152020037174224853515625"}, // * 70368744177664 - {15, "710542735760100185871124267578125"}, // * 140737488355328 - {15, "3552713678800500929355621337890625"}, // * 281474976710656 - {15, "17763568394002504646778106689453125"}, // * 562949953421312 - {16, "88817841970012523233890533447265625"}, // * 1125899906842624 - {16, "444089209850062616169452667236328125"}, // * 2251799813685248 - {16, "2220446049250313080847263336181640625"}, // * 4503599627370496 - {16, "11102230246251565404236316680908203125"}, // * 9007199254740992 - {17, "55511151231257827021181583404541015625"}, // * 18014398509481984 - {17, "277555756156289135105907917022705078125"}, // * 36028797018963968 - {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936 - {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872 - {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744 - {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488 - {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976 -} - -// Is the leading prefix of b lexicographically less than s? -func prefixIsLessThan(b []byte, s string) bool { - for i := 0; i < len(s); i++ { - if i >= len(b) { - return true - } - if b[i] != s[i] { - return b[i] < s[i] - } - } - return false -} - -// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. -func leftShift(a *decimal, k uint) { - delta := leftcheats[k].delta - if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { - delta-- - } - - r := a.nd // read index - w := a.nd + delta // write index - - // Pick up a digit, put down a digit. - var n uint - for r--; r >= 0; r-- { - n += (uint(a.d[r]) - '0') << k - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - // Put down extra digits. - for n > 0 { - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - a.nd += delta - if a.nd >= len(a.d) { - a.nd = len(a.d) - } - a.dp += delta - trim(a) -} - -// Binary shift left (k > 0) or right (k < 0). -func (a *decimal) Shift(k int) { - switch { - case a.nd == 0: - // nothing to do: a == 0 - case k > 0: - for k > maxShift { - leftShift(a, maxShift) - k -= maxShift - } - leftShift(a, uint(k)) - case k < 0: - for k < -maxShift { - rightShift(a, maxShift) - k += maxShift - } - rightShift(a, uint(-k)) - } -} - -// If we chop a at nd digits, should we round up? -func shouldRoundUp(a *decimal, nd int) bool { - if nd < 0 || nd >= a.nd { - return false - } - if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even - // if we truncated, a little higher than what's recorded - always round up - if a.trunc { - return true - } - return nd > 0 && (a.d[nd-1]-'0')%2 != 0 - } - // not halfway - digit tells all - return a.d[nd] >= '5' -} - -// Round a to nd digits (or fewer). -// If nd is zero, it means we're rounding -// just to the left of the digits, as in -// 0.09 -> 0.1. -func (a *decimal) Round(nd int) { - if nd < 0 || nd >= a.nd { - return - } - if shouldRoundUp(a, nd) { - a.RoundUp(nd) - } else { - a.RoundDown(nd) - } -} - -// Round a down to nd digits (or fewer). -func (a *decimal) RoundDown(nd int) { - if nd < 0 || nd >= a.nd { - return - } - a.nd = nd - trim(a) -} - -// Round a up to nd digits (or fewer). -func (a *decimal) RoundUp(nd int) { - if nd < 0 || nd >= a.nd { - return - } - - // round up - for i := nd - 1; i >= 0; i-- { - c := a.d[i] - if c < '9' { // can stop after this digit - a.d[i]++ - a.nd = i + 1 - return - } - } - - // Number is all 9s. - // Change to single 1 with adjusted decimal point. - a.d[0] = '1' - a.nd = 1 - a.dp++ -} - -// Extract integer part, rounded appropriately. -// No guarantees about overflow. -func (a *decimal) RoundedInteger() uint64 { - if a.dp > 20 { - return 0xFFFFFFFFFFFFFFFF - } - var i int - n := uint64(0) - for i = 0; i < a.dp && i < a.nd; i++ { - n = n*10 + uint64(a.d[i]-'0') - } - for ; i < a.dp; i++ { - n *= 10 - } - if shouldRoundUp(a, a.dp) { - n++ - } - return n -} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go deleted file mode 100644 index 801c1a04..00000000 --- a/vendor/github.com/shopspring/decimal/decimal.go +++ /dev/null @@ -1,1477 +0,0 @@ -// Package decimal implements an arbitrary precision fixed-point decimal. -// -// The zero-value of a Decimal is 0, as you would expect. -// -// The best way to create a new Decimal is to use decimal.NewFromString, ex: -// -// n, err := decimal.NewFromString("-123.4567") -// n.String() // output: "-123.4567" -// -// To use Decimal as part of a struct: -// -// type Struct struct { -// Number Decimal -// } -// -// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. -package decimal - -import ( - "database/sql/driver" - "encoding/binary" - "fmt" - "math" - "math/big" - "strconv" - "strings" -) - -// DivisionPrecision is the number of decimal places in the result when it -// doesn't divide exactly. -// -// Example: -// -// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d1.String() // output: "0.6666666666666667" -// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) -// d2.String() // output: "0.0000666666666667" -// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) -// d3.String() // output: "6666.6666666666666667" -// decimal.DivisionPrecision = 3 -// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d4.String() // output: "0.667" -// -var DivisionPrecision = 16 - -// MarshalJSONWithoutQuotes should be set to true if you want the decimal to -// be JSON marshaled as a number, instead of as a string. -// WARNING: this is dangerous for decimals with many digits, since many JSON -// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 -// double-precision floating point numbers, which means you can potentially -// silently lose precision. -var MarshalJSONWithoutQuotes = false - -// Zero constant, to make computations faster. -// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. -var Zero = New(0, 1) - -var zeroInt = big.NewInt(0) -var oneInt = big.NewInt(1) -var twoInt = big.NewInt(2) -var fourInt = big.NewInt(4) -var fiveInt = big.NewInt(5) -var tenInt = big.NewInt(10) -var twentyInt = big.NewInt(20) - -// Decimal represents a fixed-point decimal. It is immutable. -// number = value * 10 ^ exp -type Decimal struct { - value *big.Int - - // NOTE(vadim): this must be an int32, because we cast it to float64 during - // calculations. If exp is 64 bit, we might lose precision. - // If we cared about being able to represent every possible decimal, we - // could make exp a *big.Int but it would hurt performance and numbers - // like that are unrealistic. - exp int32 -} - -// New returns a new fixed-point decimal, value * 10 ^ exp. -func New(value int64, exp int32) Decimal { - return Decimal{ - value: big.NewInt(value), - exp: exp, - } -} - -// NewFromInt converts a int64 to Decimal. -// -// Example: -// -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" -func NewFromInt(value int64) Decimal { - return Decimal{ - value: big.NewInt(value), - exp: 0, - } -} - -// NewFromInt32 converts a int32 to Decimal. -// -// Example: -// -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" -func NewFromInt32(value int32) Decimal { - return Decimal{ - value: big.NewInt(int64(value)), - exp: 0, - } -} - -// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp -func NewFromBigInt(value *big.Int, exp int32) Decimal { - return Decimal{ - value: big.NewInt(0).Set(value), - exp: exp, - } -} - -// NewFromString returns a new Decimal from a string representation. -// Trailing zeroes are not trimmed. -// -// Example: -// -// d, err := NewFromString("-123.45") -// d2, err := NewFromString(".0001") -// d3, err := NewFromString("1.47000") -// -func NewFromString(value string) (Decimal, error) { - originalInput := value - var intString string - var exp int64 - - // Check if number is using scientific notation - eIndex := strings.IndexAny(value, "Ee") - if eIndex != -1 { - expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) - } - return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) - } - value = value[:eIndex] - exp = expInt - } - - parts := strings.Split(value, ".") - if len(parts) == 1 { - // There is no decimal point, we can just parse the original string as - // an int - intString = value - } else if len(parts) == 2 { - intString = parts[0] + parts[1] - expInt := -len(parts[1]) - exp += int64(expInt) - } else { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) - } - - dValue := new(big.Int) - _, ok := dValue.SetString(intString, 10) - if !ok { - return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) - } - - if exp < math.MinInt32 || exp > math.MaxInt32 { - // NOTE(vadim): I doubt a string could realistically be this long - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) - } - - return Decimal{ - value: dValue, - exp: int32(exp), - }, nil -} - -// RequireFromString returns a new Decimal from a string representation -// or panics if NewFromString would have returned an error. -// -// Example: -// -// d := RequireFromString("-123.45") -// d2 := RequireFromString(".0001") -// -func RequireFromString(value string) Decimal { - dec, err := NewFromString(value) - if err != nil { - panic(err) - } - return dec -} - -// NewFromFloat converts a float64 to Decimal. -// -// The converted number will contain the number of significant digits that can be -// represented in a float with reliable roundtrip. -// This is typically 15 digits, but may be more in some cases. -// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -// -// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -// -// NOTE: this will panic on NaN, +/-inf -func NewFromFloat(value float64) Decimal { - if value == 0 { - return New(0, 0) - } - return newFromFloat(value, math.Float64bits(value), &float64info) -} - -// NewFromFloat32 converts a float32 to Decimal. -// -// The converted number will contain the number of significant digits that can be -// represented in a float with reliable roundtrip. -// This is typically 6-8 digits depending on the input. -// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -// -// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -// -// NOTE: this will panic on NaN, +/-inf -func NewFromFloat32(value float32) Decimal { - if value == 0 { - return New(0, 0) - } - // XOR is workaround for https://github.com/golang/go/issues/26285 - a := math.Float32bits(value) ^ 0x80808080 - return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) -} - -func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { - if math.IsNaN(val) || math.IsInf(val, 0) { - panic(fmt.Sprintf("Cannot create a Decimal from %v", val)) - } - exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 - - roundShortest(&d, mant, exp, flt) - // If less than 19 digits, we can do calculation in an int64. - if d.nd < 19 { - tmp := int64(0) - m := int64(1) - for i := d.nd - 1; i >= 0; i-- { - tmp += m * int64(d.d[i]-'0') - m *= 10 - } - if d.neg { - tmp *= -1 - } - return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} - } - dValue := new(big.Int) - dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) - if ok { - return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} - } - - return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) -} - -// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary -// number of fractional digits. -// -// Example: -// -// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" -// -func NewFromFloatWithExponent(value float64, exp int32) Decimal { - if math.IsNaN(value) || math.IsInf(value, 0) { - panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) - } - - bits := math.Float64bits(value) - mant := bits & (1<<52 - 1) - exp2 := int32((bits >> 52) & (1<<11 - 1)) - sign := bits >> 63 - - if exp2 == 0 { - // specials - if mant == 0 { - return Decimal{} - } - // subnormal - exp2++ - } else { - // normal - mant |= 1 << 52 - } - - exp2 -= 1023 + 52 - - // normalizing base-2 values - for mant&1 == 0 { - mant = mant >> 1 - exp2++ - } - - // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 - if exp < 0 && exp < exp2 { - if exp2 < 0 { - exp = exp2 - } else { - exp = 0 - } - } - - // representing 10^M * 2^N as 5^M * 2^(M+N) - exp2 -= exp - - temp := big.NewInt(1) - dMant := big.NewInt(int64(mant)) - - // applying 5^M - if exp > 0 { - temp = temp.SetInt64(int64(exp)) - temp = temp.Exp(fiveInt, temp, nil) - } else if exp < 0 { - temp = temp.SetInt64(-int64(exp)) - temp = temp.Exp(fiveInt, temp, nil) - dMant = dMant.Mul(dMant, temp) - temp = temp.SetUint64(1) - } - - // applying 2^(M+N) - if exp2 > 0 { - dMant = dMant.Lsh(dMant, uint(exp2)) - } else if exp2 < 0 { - temp = temp.Lsh(temp, uint(-exp2)) - } - - // rounding and downscaling - if exp > 0 || exp2 < 0 { - halfDown := new(big.Int).Rsh(temp, 1) - dMant = dMant.Add(dMant, halfDown) - dMant = dMant.Quo(dMant, temp) - } - - if sign == 1 { - dMant = dMant.Neg(dMant) - } - - return Decimal{ - value: dMant, - exp: exp, - } -} - -// rescale returns a rescaled version of the decimal. Returned -// decimal may be less precise if the given exponent is bigger -// than the initial exponent of the Decimal. -// NOTE: this will truncate, NOT round -// -// Example: -// -// d := New(12345, -4) -// d2 := d.rescale(-1) -// d3 := d2.rescale(-4) -// println(d1) -// println(d2) -// println(d3) -// -// Output: -// -// 1.2345 -// 1.2 -// 1.2000 -// -func (d Decimal) rescale(exp int32) Decimal { - d.ensureInitialized() - - if d.exp == exp { - return Decimal{ - new(big.Int).Set(d.value), - d.exp, - } - } - - // NOTE(vadim): must convert exps to float64 before - to prevent overflow - diff := math.Abs(float64(exp) - float64(d.exp)) - value := new(big.Int).Set(d.value) - - expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) - if exp > d.exp { - value = value.Quo(value, expScale) - } else if exp < d.exp { - value = value.Mul(value, expScale) - } - - return Decimal{ - value: value, - exp: exp, - } -} - -// Abs returns the absolute value of the decimal. -func (d Decimal) Abs() Decimal { - d.ensureInitialized() - d2Value := new(big.Int).Abs(d.value) - return Decimal{ - value: d2Value, - exp: d.exp, - } -} - -// Add returns d + d2. -func (d Decimal) Add(d2 Decimal) Decimal { - rd, rd2 := RescalePair(d, d2) - - d3Value := new(big.Int).Add(rd.value, rd2.value) - return Decimal{ - value: d3Value, - exp: rd.exp, - } -} - -// Sub returns d - d2. -func (d Decimal) Sub(d2 Decimal) Decimal { - rd, rd2 := RescalePair(d, d2) - - d3Value := new(big.Int).Sub(rd.value, rd2.value) - return Decimal{ - value: d3Value, - exp: rd.exp, - } -} - -// Neg returns -d. -func (d Decimal) Neg() Decimal { - d.ensureInitialized() - val := new(big.Int).Neg(d.value) - return Decimal{ - value: val, - exp: d.exp, - } -} - -// Mul returns d * d2. -func (d Decimal) Mul(d2 Decimal) Decimal { - d.ensureInitialized() - d2.ensureInitialized() - - expInt64 := int64(d.exp) + int64(d2.exp) - if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { - // NOTE(vadim): better to panic than give incorrect results, as - // Decimals are usually used for money - panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64)) - } - - d3Value := new(big.Int).Mul(d.value, d2.value) - return Decimal{ - value: d3Value, - exp: int32(expInt64), - } -} - -// Shift shifts the decimal in base 10. -// It shifts left when shift is positive and right if shift is negative. -// In simpler terms, the given value for shift is added to the exponent -// of the decimal. -func (d Decimal) Shift(shift int32) Decimal { - d.ensureInitialized() - return Decimal{ - value: new(big.Int).Set(d.value), - exp: d.exp + shift, - } -} - -// Div returns d / d2. If it doesn't divide exactly, the result will have -// DivisionPrecision digits after the decimal point. -func (d Decimal) Div(d2 Decimal) Decimal { - return d.DivRound(d2, int32(DivisionPrecision)) -} - -// QuoRem does divsion with remainder -// d.QuoRem(d2,precision) returns quotient q and remainder r such that -// d = d2 * q + r, q an integer multiple of 10^(-precision) -// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 -// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 -// Note that precision<0 is allowed as input. -func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { - d.ensureInitialized() - d2.ensureInitialized() - if d2.value.Sign() == 0 { - panic("decimal division by 0") - } - scale := -precision - e := int64(d.exp - d2.exp - scale) - if e > math.MaxInt32 || e < math.MinInt32 { - panic("overflow in decimal QuoRem") - } - var aa, bb, expo big.Int - var scalerest int32 - // d = a 10^ea - // d2 = b 10^eb - if e < 0 { - aa = *d.value - expo.SetInt64(-e) - bb.Exp(tenInt, &expo, nil) - bb.Mul(d2.value, &bb) - scalerest = d.exp - // now aa = a - // bb = b 10^(scale + eb - ea) - } else { - expo.SetInt64(e) - aa.Exp(tenInt, &expo, nil) - aa.Mul(d.value, &aa) - bb = *d2.value - scalerest = scale + d2.exp - // now aa = a ^ (ea - eb - scale) - // bb = b - } - var q, r big.Int - q.QuoRem(&aa, &bb, &r) - dq := Decimal{value: &q, exp: scale} - dr := Decimal{value: &r, exp: scalerest} - return dq, dr -} - -// DivRound divides and rounds to a given precision -// i.e. to an integer multiple of 10^(-precision) -// for a positive quotient digit 5 is rounded up, away from 0 -// if the quotient is negative then digit 5 is rounded down, away from 0 -// Note that precision<0 is allowed as input. -func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { - // QuoRem already checks initialization - q, r := d.QuoRem(d2, precision) - // the actual rounding decision is based on comparing r*10^precision and d2/2 - // instead compare 2 r 10 ^precision and d2 - var rv2 big.Int - rv2.Abs(r.value) - rv2.Lsh(&rv2, 1) - // now rv2 = abs(r.value) * 2 - r2 := Decimal{value: &rv2, exp: r.exp + precision} - // r2 is now 2 * r * 10 ^ precision - var c = r2.Cmp(d2.Abs()) - - if c < 0 { - return q - } - - if d.value.Sign()*d2.value.Sign() < 0 { - return q.Sub(New(1, -precision)) - } - - return q.Add(New(1, -precision)) -} - -// Mod returns d % d2. -func (d Decimal) Mod(d2 Decimal) Decimal { - quo := d.Div(d2).Truncate(0) - return d.Sub(d2.Mul(quo)) -} - -// Pow returns d to the power d2 -func (d Decimal) Pow(d2 Decimal) Decimal { - var temp Decimal - if d2.IntPart() == 0 { - return NewFromFloat(1) - } - temp = d.Pow(d2.Div(NewFromFloat(2))) - if d2.IntPart()%2 == 0 { - return temp.Mul(temp) - } - if d2.IntPart() > 0 { - return temp.Mul(temp).Mul(d) - } - return temp.Mul(temp).Div(d) -} - -// Cmp compares the numbers represented by d and d2 and returns: -// -// -1 if d < d2 -// 0 if d == d2 -// +1 if d > d2 -// -func (d Decimal) Cmp(d2 Decimal) int { - d.ensureInitialized() - d2.ensureInitialized() - - if d.exp == d2.exp { - return d.value.Cmp(d2.value) - } - - rd, rd2 := RescalePair(d, d2) - - return rd.value.Cmp(rd2.value) -} - -// Equal returns whether the numbers represented by d and d2 are equal. -func (d Decimal) Equal(d2 Decimal) bool { - return d.Cmp(d2) == 0 -} - -// Equals is deprecated, please use Equal method instead -func (d Decimal) Equals(d2 Decimal) bool { - return d.Equal(d2) -} - -// GreaterThan (GT) returns true when d is greater than d2. -func (d Decimal) GreaterThan(d2 Decimal) bool { - return d.Cmp(d2) == 1 -} - -// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. -func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { - cmp := d.Cmp(d2) - return cmp == 1 || cmp == 0 -} - -// LessThan (LT) returns true when d is less than d2. -func (d Decimal) LessThan(d2 Decimal) bool { - return d.Cmp(d2) == -1 -} - -// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. -func (d Decimal) LessThanOrEqual(d2 Decimal) bool { - cmp := d.Cmp(d2) - return cmp == -1 || cmp == 0 -} - -// Sign returns: -// -// -1 if d < 0 -// 0 if d == 0 -// +1 if d > 0 -// -func (d Decimal) Sign() int { - if d.value == nil { - return 0 - } - return d.value.Sign() -} - -// IsPositive return -// -// true if d > 0 -// false if d == 0 -// false if d < 0 -func (d Decimal) IsPositive() bool { - return d.Sign() == 1 -} - -// IsNegative return -// -// true if d < 0 -// false if d == 0 -// false if d > 0 -func (d Decimal) IsNegative() bool { - return d.Sign() == -1 -} - -// IsZero return -// -// true if d == 0 -// false if d > 0 -// false if d < 0 -func (d Decimal) IsZero() bool { - return d.Sign() == 0 -} - -// Exponent returns the exponent, or scale component of the decimal. -func (d Decimal) Exponent() int32 { - return d.exp -} - -// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() -func (d Decimal) Coefficient() *big.Int { - d.ensureInitialized() - // we copy the coefficient so that mutating the result does not mutate the - // Decimal. - return big.NewInt(0).Set(d.value) -} - -// IntPart returns the integer component of the decimal. -func (d Decimal) IntPart() int64 { - scaledD := d.rescale(0) - return scaledD.value.Int64() -} - -// BigInt returns integer component of the decimal as a BigInt. -func (d Decimal) BigInt() *big.Int { - scaledD := d.rescale(0) - i := &big.Int{} - i.SetString(scaledD.String(), 10) - return i -} - -// BigFloat returns decimal as BigFloat. -// Be aware that casting decimal to BigFloat might cause a loss of precision. -func (d Decimal) BigFloat() *big.Float { - f := &big.Float{} - f.SetString(d.String()) - return f -} - -// Rat returns a rational number representation of the decimal. -func (d Decimal) Rat() *big.Rat { - d.ensureInitialized() - if d.exp <= 0 { - // NOTE(vadim): must negate after casting to prevent int32 overflow - denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) - return new(big.Rat).SetFrac(d.value, denom) - } - - mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) - num := new(big.Int).Mul(d.value, mul) - return new(big.Rat).SetFrac(num, oneInt) -} - -// Float64 returns the nearest float64 value for d and a bool indicating -// whether f represents d exactly. -// For more details, see the documentation for big.Rat.Float64 -func (d Decimal) Float64() (f float64, exact bool) { - return d.Rat().Float64() -} - -// String returns the string representation of the decimal -// with the fixed point. -// -// Example: -// -// d := New(-12345, -3) -// println(d.String()) -// -// Output: -// -// -12.345 -// -func (d Decimal) String() string { - return d.string(true) -} - -// StringFixed returns a rounded fixed-point string with places digits after -// the decimal point. -// -// Example: -// -// NewFromFloat(0).StringFixed(2) // output: "0.00" -// NewFromFloat(0).StringFixed(0) // output: "0" -// NewFromFloat(5.45).StringFixed(0) // output: "5" -// NewFromFloat(5.45).StringFixed(1) // output: "5.5" -// NewFromFloat(5.45).StringFixed(2) // output: "5.45" -// NewFromFloat(5.45).StringFixed(3) // output: "5.450" -// NewFromFloat(545).StringFixed(-1) // output: "550" -// -func (d Decimal) StringFixed(places int32) string { - rounded := d.Round(places) - return rounded.string(false) -} - -// StringFixedBank returns a banker rounded fixed-point string with places digits -// after the decimal point. -// -// Example: -// -// NewFromFloat(0).StringFixedBank(2) // output: "0.00" -// NewFromFloat(0).StringFixedBank(0) // output: "0" -// NewFromFloat(5.45).StringFixedBank(0) // output: "5" -// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" -// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" -// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" -// NewFromFloat(545).StringFixedBank(-1) // output: "540" -// -func (d Decimal) StringFixedBank(places int32) string { - rounded := d.RoundBank(places) - return rounded.string(false) -} - -// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For -// more details see the documentation at function RoundCash. -func (d Decimal) StringFixedCash(interval uint8) string { - rounded := d.RoundCash(interval) - return rounded.string(false) -} - -// Round rounds the decimal to places decimal places. -// If places < 0, it will round the integer part to the nearest 10^(-places). -// -// Example: -// -// NewFromFloat(5.45).Round(1).String() // output: "5.5" -// NewFromFloat(545).Round(-1).String() // output: "550" -// -func (d Decimal) Round(places int32) Decimal { - // truncate to places + 1 - ret := d.rescale(-places - 1) - - // add sign(d) * 0.5 - if ret.value.Sign() < 0 { - ret.value.Sub(ret.value, fiveInt) - } else { - ret.value.Add(ret.value, fiveInt) - } - - // floor for positive numbers, ceil for negative numbers - _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) - ret.exp++ - if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { - ret.value.Add(ret.value, oneInt) - } - - return ret -} - -// RoundBank rounds the decimal to places decimal places. -// If the final digit to round is equidistant from the nearest two integers the -// rounded value is taken as the even number -// -// If places < 0, it will round the integer part to the nearest 10^(-places). -// -// Examples: -// -// NewFromFloat(5.45).Round(1).String() // output: "5.4" -// NewFromFloat(545).Round(-1).String() // output: "540" -// NewFromFloat(5.46).Round(1).String() // output: "5.5" -// NewFromFloat(546).Round(-1).String() // output: "550" -// NewFromFloat(5.55).Round(1).String() // output: "5.6" -// NewFromFloat(555).Round(-1).String() // output: "560" -// -func (d Decimal) RoundBank(places int32) Decimal { - - round := d.Round(places) - remainder := d.Sub(round).Abs() - - half := New(5, -places-1) - if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { - if round.value.Sign() < 0 { - round.value.Add(round.value, oneInt) - } else { - round.value.Sub(round.value, oneInt) - } - } - - return round -} - -// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific -// interval. The amount payable for a cash transaction is rounded to the nearest -// multiple of the minimum currency unit available. The following intervals are -// available: 5, 10, 25, 50 and 100; any other number throws a panic. -// 5: 5 cent rounding 3.43 => 3.45 -// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) -// 25: 25 cent rounding 3.41 => 3.50 -// 50: 50 cent rounding 3.75 => 4.00 -// 100: 100 cent rounding 3.50 => 4.00 -// For more details: https://en.wikipedia.org/wiki/Cash_rounding -func (d Decimal) RoundCash(interval uint8) Decimal { - var iVal *big.Int - switch interval { - case 5: - iVal = twentyInt - case 10: - iVal = tenInt - case 25: - iVal = fourInt - case 50: - iVal = twoInt - case 100: - iVal = oneInt - default: - panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval)) - } - dVal := Decimal{ - value: iVal, - } - - // TODO: optimize those calculations to reduce the high allocations (~29 allocs). - return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) -} - -// Floor returns the nearest integer value less than or equal to d. -func (d Decimal) Floor() Decimal { - d.ensureInitialized() - - if d.exp >= 0 { - return d - } - - exp := big.NewInt(10) - - // NOTE(vadim): must negate after casting to prevent int32 overflow - exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) - - z := new(big.Int).Div(d.value, exp) - return Decimal{value: z, exp: 0} -} - -// Ceil returns the nearest integer value greater than or equal to d. -func (d Decimal) Ceil() Decimal { - d.ensureInitialized() - - if d.exp >= 0 { - return d - } - - exp := big.NewInt(10) - - // NOTE(vadim): must negate after casting to prevent int32 overflow - exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) - - z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) - if m.Cmp(zeroInt) != 0 { - z.Add(z, oneInt) - } - return Decimal{value: z, exp: 0} -} - -// Truncate truncates off digits from the number, without rounding. -// -// NOTE: precision is the last digit that will not be truncated (must be >= 0). -// -// Example: -// -// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" -// -func (d Decimal) Truncate(precision int32) Decimal { - d.ensureInitialized() - if precision >= 0 && -precision > d.exp { - return d.rescale(-precision) - } - return d -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { - if string(decimalBytes) == "null" { - return nil - } - - str, err := unquoteIfQuoted(decimalBytes) - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err) - } - - decimal, err := NewFromString(str) - *d = decimal - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", str, err) - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Decimal) MarshalJSON() ([]byte, error) { - var str string - if MarshalJSONWithoutQuotes { - str = d.String() - } else { - str = "\"" + d.String() + "\"" - } - return []byte(str), nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation -// is already used when encoding to text, this method stores that string as []byte -func (d *Decimal) UnmarshalBinary(data []byte) error { - // Extract the exponent - d.exp = int32(binary.BigEndian.Uint32(data[:4])) - - // Extract the value - d.value = new(big.Int) - return d.value.GobDecode(data[4:]) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d Decimal) MarshalBinary() (data []byte, err error) { - // Write the exponent first since it's a fixed size - v1 := make([]byte, 4) - binary.BigEndian.PutUint32(v1, uint32(d.exp)) - - // Add the value - var v2 []byte - if v2, err = d.value.GobEncode(); err != nil { - return - } - - // Return the byte array - data = append(v1, v2...) - return -} - -// Scan implements the sql.Scanner interface for database deserialization. -func (d *Decimal) Scan(value interface{}) error { - // first try to see if the data is stored in database as a Numeric datatype - switch v := value.(type) { - - case float32: - *d = NewFromFloat(float64(v)) - return nil - - case float64: - // numeric in sqlite3 sends us float64 - *d = NewFromFloat(v) - return nil - - case int64: - // at least in sqlite3 when the value is 0 in db, the data is sent - // to us as an int64 instead of a float64 ... - *d = New(v, 0) - return nil - - default: - // default is trying to interpret value stored as string - str, err := unquoteIfQuoted(v) - if err != nil { - return err - } - *d, err = NewFromString(str) - return err - } -} - -// Value implements the driver.Valuer interface for database serialization. -func (d Decimal) Value() (driver.Value, error) { - return d.String(), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for XML -// deserialization. -func (d *Decimal) UnmarshalText(text []byte) error { - str := string(text) - - dec, err := NewFromString(str) - *d = dec - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", str, err) - } - - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface for XML -// serialization. -func (d Decimal) MarshalText() (text []byte, err error) { - return []byte(d.String()), nil -} - -// GobEncode implements the gob.GobEncoder interface for gob serialization. -func (d Decimal) GobEncode() ([]byte, error) { - return d.MarshalBinary() -} - -// GobDecode implements the gob.GobDecoder interface for gob serialization. -func (d *Decimal) GobDecode(data []byte) error { - return d.UnmarshalBinary(data) -} - -// StringScaled first scales the decimal then calls .String() on it. -// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. -func (d Decimal) StringScaled(exp int32) string { - return d.rescale(exp).String() -} - -func (d Decimal) string(trimTrailingZeros bool) string { - if d.exp >= 0 { - return d.rescale(0).value.String() - } - - abs := new(big.Int).Abs(d.value) - str := abs.String() - - var intPart, fractionalPart string - - // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN - // and you are on a 32-bit machine. Won't fix this super-edge case. - dExpInt := int(d.exp) - if len(str) > -dExpInt { - intPart = str[:len(str)+dExpInt] - fractionalPart = str[len(str)+dExpInt:] - } else { - intPart = "0" - - num0s := -dExpInt - len(str) - fractionalPart = strings.Repeat("0", num0s) + str - } - - if trimTrailingZeros { - i := len(fractionalPart) - 1 - for ; i >= 0; i-- { - if fractionalPart[i] != '0' { - break - } - } - fractionalPart = fractionalPart[:i+1] - } - - number := intPart - if len(fractionalPart) > 0 { - number += "." + fractionalPart - } - - if d.value.Sign() < 0 { - return "-" + number - } - - return number -} - -func (d *Decimal) ensureInitialized() { - if d.value == nil { - d.value = new(big.Int) - } -} - -// Min returns the smallest Decimal that was passed in the arguments. -// -// To call this function with an array, you must do: -// -// Min(arr[0], arr[1:]...) -// -// This makes it harder to accidentally call Min with 0 arguments. -func Min(first Decimal, rest ...Decimal) Decimal { - ans := first - for _, item := range rest { - if item.Cmp(ans) < 0 { - ans = item - } - } - return ans -} - -// Max returns the largest Decimal that was passed in the arguments. -// -// To call this function with an array, you must do: -// -// Max(arr[0], arr[1:]...) -// -// This makes it harder to accidentally call Max with 0 arguments. -func Max(first Decimal, rest ...Decimal) Decimal { - ans := first - for _, item := range rest { - if item.Cmp(ans) > 0 { - ans = item - } - } - return ans -} - -// Sum returns the combined total of the provided first and rest Decimals -func Sum(first Decimal, rest ...Decimal) Decimal { - total := first - for _, item := range rest { - total = total.Add(item) - } - - return total -} - -// Avg returns the average value of the provided first and rest Decimals -func Avg(first Decimal, rest ...Decimal) Decimal { - count := New(int64(len(rest)+1), 0) - sum := Sum(first, rest...) - return sum.Div(count) -} - -// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) -func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { - d1.ensureInitialized() - d2.ensureInitialized() - - if d1.exp == d2.exp { - return d1, d2 - } - - baseScale := min(d1.exp, d2.exp) - if baseScale != d1.exp { - return d1.rescale(baseScale), d2 - } - return d1, d2.rescale(baseScale) -} - -func min(x, y int32) int32 { - if x >= y { - return y - } - return x -} - -func unquoteIfQuoted(value interface{}) (string, error) { - var bytes []byte - - switch v := value.(type) { - case string: - bytes = []byte(v) - case []byte: - bytes = v - default: - return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", - value, value) - } - - // If the amount is quoted, strip the quotes - if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' { - bytes = bytes[1 : len(bytes)-1] - } - return string(bytes), nil -} - -// NullDecimal represents a nullable decimal with compatibility for -// scanning null values from the database. -type NullDecimal struct { - Decimal Decimal - Valid bool -} - -// Scan implements the sql.Scanner interface for database deserialization. -func (d *NullDecimal) Scan(value interface{}) error { - if value == nil { - d.Valid = false - return nil - } - d.Valid = true - return d.Decimal.Scan(value) -} - -// Value implements the driver.Valuer interface for database serialization. -func (d NullDecimal) Value() (driver.Value, error) { - if !d.Valid { - return nil, nil - } - return d.Decimal.Value() -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { - if string(decimalBytes) == "null" { - d.Valid = false - return nil - } - d.Valid = true - return d.Decimal.UnmarshalJSON(decimalBytes) -} - -// MarshalJSON implements the json.Marshaler interface. -func (d NullDecimal) MarshalJSON() ([]byte, error) { - if !d.Valid { - return []byte("null"), nil - } - return d.Decimal.MarshalJSON() -} - -// Trig functions - -// Atan returns the arctangent, in radians, of x. -func (d Decimal) Atan() Decimal { - if d.Equal(NewFromFloat(0.0)) { - return d - } - if d.GreaterThan(NewFromFloat(0.0)) { - return d.satan() - } - return d.Neg().satan().Neg() -} - -func (d Decimal) xatan() Decimal { - P0 := NewFromFloat(-8.750608600031904122785e-01) - P1 := NewFromFloat(-1.615753718733365076637e+01) - P2 := NewFromFloat(-7.500855792314704667340e+01) - P3 := NewFromFloat(-1.228866684490136173410e+02) - P4 := NewFromFloat(-6.485021904942025371773e+01) - Q0 := NewFromFloat(2.485846490142306297962e+01) - Q1 := NewFromFloat(1.650270098316988542046e+02) - Q2 := NewFromFloat(4.328810604912902668951e+02) - Q3 := NewFromFloat(4.853903996359136964868e+02) - Q4 := NewFromFloat(1.945506571482613964425e+02) - z := d.Mul(d) - b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) - b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) - z = b1.Div(b2) - z = d.Mul(z).Add(d) - return z -} - -// satan reduces its argument (known to be positive) -// to the range [0, 0.66] and calls xatan. -func (d Decimal) satan() Decimal { - Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits - Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) - pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) - - if d.LessThanOrEqual(NewFromFloat(0.66)) { - return d.xatan() - } - if d.GreaterThan(Tan3pio8) { - return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) - } - return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) -} - -// sin coefficients -var _sin = [...]Decimal{ - NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd - NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d - NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 - NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 - NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 - NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 -} - -// Sin returns the sine of the radian argument x. -func (d Decimal) Sin() Decimal { - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - if d.Equal(NewFromFloat(0.0)) { - return d - } - // make argument positive but save the sign - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - sign = true - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - j &= 7 // octant modulo 2Pi radians (360 degrees) - // reflect in x axis - if j > 3 { - sign = !sign - j -= 4 - } - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if j == 1 || j == 2 { - w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) - y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) - } else { - y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) - } - if sign { - y = y.Neg() - } - return y -} - -// cos coefficients -var _cos = [...]Decimal{ - NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b - NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 - NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 - NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 - NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 - NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b -} - -// Cos returns the cosine of the radian argument x. -func (d Decimal) Cos() Decimal { - - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - // make argument positive - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - j &= 7 // octant modulo 2Pi radians (360 degrees) - // reflect in x axis - if j > 3 { - sign = !sign - j -= 4 - } - if j > 1 { - sign = !sign - } - - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if j == 1 || j == 2 { - y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) - } else { - w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) - y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) - } - if sign { - y = y.Neg() - } - return y -} - -var _tanP = [...]Decimal{ - NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 - NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd - NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 -} -var _tanQ = [...]Decimal{ - NewFromFloat(1.00000000000000000000e+0), - NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 - NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 - NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef - NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 -} - -// Tan returns the tangent of the radian argument x. -func (d Decimal) Tan() Decimal { - - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - if d.Equal(NewFromFloat(0.0)) { - return d - } - - // make argument positive but save the sign - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - sign = true - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if zz.GreaterThan(NewFromFloat(1e-14)) { - w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) - x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) - y = z.Add(z.Mul(w.Div(x))) - } else { - y = z - } - if j&2 == 2 { - y = NewFromFloat(-1.0).Div(y) - } - if sign { - y = y.Neg() - } - return y -} diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go deleted file mode 100644 index 8008f55c..00000000 --- a/vendor/github.com/shopspring/decimal/rounding.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -package decimal - -type floatInfo struct { - mantbits uint - expbits uint - bias int -} - -var float32info = floatInfo{23, 8, -127} -var float64info = floatInfo{52, 11, -1023} - -// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits -// that will let the original floating point value be precisely reconstructed. -func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { - // If mantissa is zero, the number is zero; stop now. - if mant == 0 { - d.nd = 0 - return - } - - // Compute upper and lower such that any decimal number - // between upper and lower (possibly inclusive) - // will round to the original floating point number. - - // We may see at once that the number is already shortest. - // - // Suppose d is not denormal, so that 2^exp <= d < 10^dp. - // The closest shorter number is at least 10^(dp-nd) away. - // The lower/upper bounds computed below are at distance - // at most 2^(exp-mantbits). - // - // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), - // or equivalently log2(10)*(dp-nd) > exp-mantbits. - // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). - minexp := flt.bias + 1 // minimum possible exponent - if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { - // The number is already shortest. - return - } - - // d = mant << (exp - mantbits) - // Next highest floating point number is mant+1 << exp-mantbits. - // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. - upper := new(decimal) - upper.Assign(mant*2 + 1) - upper.Shift(exp - int(flt.mantbits) - 1) - - // d = mant << (exp - mantbits) - // Next lowest floating point number is mant-1 << exp-mantbits, - // unless mant-1 drops the significant bit and exp is not the minimum exp, - // in which case the next lowest is mant*2-1 << exp-mantbits-1. - // Either way, call it mantlo << explo-mantbits. - // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. - var mantlo uint64 - var explo int - if mant > 1< 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore deleted file mode 100644 index 1fb13abe..00000000 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -logrus -vendor - -.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml deleted file mode 100644 index 65dc2850..00000000 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ /dev/null @@ -1,40 +0,0 @@ -run: - # do not run on test files yet - tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - -linters: - enable: - - megacheck - - govet - disable: - - maligned - - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml deleted file mode 100644 index 5e20aa41..00000000 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go -go_import_path: github.com/sirupsen/logrus -git: - depth: 1 -env: - - GO111MODULE=on -go: [1.13.x, 1.14.x] -os: [linux, osx] -install: - - ./travis/install.sh -script: - - ./travis/cross_build.sh - - ./travis/lint.sh - - export GOMAXPROCS=4 - - export GORACE=halt_on_error=1 - - go test -race -v ./... - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 584026d6..00000000 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,223 +0,0 @@ -# 1.6.0 -Fixes: - * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances - * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 - -Features: - * add an option to the `TextFormatter` to completely disable fields quoting - -# 1.5.0 -Code quality: - * add golangci linter run on travis - -Fixes: - * add mutex for hooks concurrent access on `Entry` data - * caller function field for go1.14 - * fix build issue for gopherjs target - -Feature: - * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level - * add a `DisableHTMLEscape` option in the `JSONFormatter` - * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` - -# 1.4.2 - * Fixes build break for plan9, nacl, solaris -# 1.4.1 -This new release introduces: - * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) - -Fixes: - * Fix Entry.WithContext method to return a copy of the initial entry (#941) - -# 1.4.0 -This new release introduces: - * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) - * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). - -Fixes: - * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). - * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) - * Fix infinite recursion on unknown `Level.String()` (#907) - * Fix race condition in `getCaller` (#916). - - -# 1.3.0 -This new release introduces: - * Log, Logf, Logln functions for Logger and Entry that take a Level - -Fixes: - * Building prometheus node_exporter on AIX (#840) - * Race condition in TextFormatter (#468) - * Travis CI import path (#868) - * Remove coloured output on Windows (#862) - * Pointer to func as field in JSONFormatter (#870) - * Properly marshal Levels (#873) - -# 1.2.0 -This new release introduces: - * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued - * A new trace level named `Trace` whose level is below `Debug` - * A configurable exit function to be called upon a Fatal trace - * The `Level` object now implements `encoding.TextUnmarshaler` interface - -# 1.1.1 -This is a bug fix release. - * fix the build break on Solaris - * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized - -# 1.1.0 -This new release introduces: - * several fixes: - * a fix for a race condition on entry formatting - * proper cleanup of previously used entries before putting them back in the pool - * the extra new line at the end of message in text formatter has been removed - * a new global public API to check if a level is activated: IsLevelEnabled - * the following methods have been added to the Logger object - * IsLevelEnabled - * SetFormatter - * SetOutput - * ReplaceHooks - * introduction of go module - * an indent configuration for the json formatter - * output colour support for windows - * the field sort function is now configurable for text formatter - * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater - -# 1.0.6 - -This new release introduces: - * a new api WithTime which allows to easily force the time of the log entry - which is mostly useful for logger wrapper - * a fix reverting the immutability of the entry given as parameter to the hooks - a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary - * a new SetOutput method in the Logger - * a new configuration of the textformatter to configure the name of the default keys - * a new configuration of the text formatter to disable the level truncation - -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42..00000000 --- a/vendor/github.com/sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md deleted file mode 100644 index 5796706d..00000000 --- a/vendor/github.com/sirupsen/logrus/README.md +++ /dev/null @@ -1,513 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Logrus is in maintenance-mode.** We will not be introducing new features. It's -simply too hard to do in a way that won't break many people's projects, which is -the last thing you want from your Logging library (again...). - -This does not mean Logrus is dead. Logrus will continue to be maintained for -security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). - -I believe Logrus' biggest contribution is to have played a part in today's -widespread use of structured logging in Golang. There doesn't seem to be a -reason to do a major, breaking iteration into Logrus V2, since the fantastic Go -community has built those independently. Many fantastic alternatives have sprung -up. Logrus would look like those, had it been re-designed with what we know -about structured logging in Go today. Check out, for example, -[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. - -[zerolog]: https://github.com/rs/zerolog -[zap]: https://github.com/uber-go/zap -[apex]: https://github.com/apex/log - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -``` -To ensure this behaviour even if a TTY is attached, set your formatter as follows: - -```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) -``` - -#### Logging Method Name - -If you wish to add the calling method as a field, instruct the logger via: -```go -log.SetReportCaller(true) -``` -This adds the caller as 'method' like so: - -```json -{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", -"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} -``` - -```text -time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin -``` -Note that this does add measurable overhead - the cost will depend on the version of Go, but is -between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: -``` -go test -bench=.*CallerTracing -``` - - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) - - -#### Level logging - -Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * When colors are enabled, levels are truncated to 4 characters by default. To disable - truncation set the `DisableLevelTruncation` field to `true`. - * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. -* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. -* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go deleted file mode 100644 index 8fd189e1..00000000 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ /dev/null @@ -1,76 +0,0 @@ -package logrus - -// The following code was sourced and modified from the -// https://github.com/tebeka/atexit package governed by the following license: -// -// Copyright (c) 2012 Miki Tebeka . -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ( - "fmt" - "os" -) - -var handlers = []func(){} - -func runHandler(handler func()) { - defer func() { - if err := recover(); err != nil { - fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) - } - }() - - handler() -} - -func runHandlers() { - for _, handler := range handlers { - runHandler(handler) - } -} - -// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) -func Exit(code int) { - runHandlers() - os.Exit(code) -} - -// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func RegisterExitHandler(handler func()) { - handlers = append(handlers, handler) -} - -// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func DeferExitHandler(handler func()) { - handlers = append([]func(){handler}, handlers...) -} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml deleted file mode 100644 index df9d65c3..00000000 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go deleted file mode 100644 index 4545dec0..00000000 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ /dev/null @@ -1,52 +0,0 @@ -package logrus - -import ( - "bytes" - "sync" -) - -var ( - bufferPool BufferPool -) - -type BufferPool interface { - Put(*bytes.Buffer) - Get() *bytes.Buffer -} - -type defaultPool struct { - pool *sync.Pool -} - -func (p *defaultPool) Put(buf *bytes.Buffer) { - p.pool.Put(buf) -} - -func (p *defaultPool) Get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - -// SetBufferPool allows to replace the default logrus buffer pool -// to better meets the specific needs of an application. -func SetBufferPool(bp BufferPool) { - bufferPool = bp -} - -func init() { - SetBufferPool(&defaultPool{ - pool: &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }, - }) -} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go deleted file mode 100644 index da67aba0..00000000 --- a/vendor/github.com/sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go deleted file mode 100644 index 5a5cbfe7..00000000 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ /dev/null @@ -1,422 +0,0 @@ -package logrus - -import ( - "bytes" - "context" - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" -) - -var ( - - // qualified package name, cached at first use - logrusPackage string - - // Positions in the call stack when tracing to report the calling method - minimumCallerDepth int - - // Used for caller information initialisation - callerInitOnce sync.Once -) - -const ( - maximumCallerDepth int = 25 - knownLogrusFrames int = 4 -) - -func init() { - // start at the bottom of the stack before the package-name cache is primed - minimumCallerDepth = 1 -} - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, -// Info, Warn, Error, Fatal or Panic is called on it. These objects can be -// reused and passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic - // This field will be set on entry firing and the value will be equal to the one in Logger struct field. - Level Level - - // Calling method, with package name - Caller *runtime.Frame - - // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic - Message string - - // When formatter is called in entry.log(), a Buffer may be set to entry - Buffer *bytes.Buffer - - // Contains the context set by the user. Useful for hook processing etc. - Context context.Context - - // err may contain a field formatting error - err string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, plus one optional. Give a little extra room. - Data: make(Fields, 6), - } -} - -// Returns the bytes representation of this entry from the formatter. -func (entry *Entry) Bytes() ([]byte, error) { - return entry.Logger.Formatter.Format(entry) -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - serialized, err := entry.Bytes() - if err != nil { - return "", err - } - str := string(serialized) - return str, nil -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a context to the Entry. -func (entry *Entry) WithContext(ctx context.Context) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - fieldErr := entry.err - for k, v := range fields { - isErrField := false - if t := reflect.TypeOf(v); t != nil { - switch t.Kind() { - case reflect.Func: - isErrField = true - case reflect.Ptr: - isErrField = t.Elem().Kind() == reflect.Func - } - } - if isErrField { - tmp := fmt.Sprintf("can not add field %q", k) - if fieldErr != "" { - fieldErr = entry.err + ", " + tmp - } else { - fieldErr = tmp - } - } else { - data[k] = v - } - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} -} - -// Overrides the time of the Entry. -func (entry *Entry) WithTime(t time.Time) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} -} - -// getPackageName reduces a fully qualified function name to the package name -// There really ought to be to be a better way... -func getPackageName(f string) string { - for { - lastPeriod := strings.LastIndex(f, ".") - lastSlash := strings.LastIndex(f, "/") - if lastPeriod > lastSlash { - f = f[:lastPeriod] - } else { - break - } - } - - return f -} - -// getCaller retrieves the name of the first non-logrus calling function -func getCaller() *runtime.Frame { - // cache this package's fully-qualified name - callerInitOnce.Do(func() { - pcs := make([]uintptr, maximumCallerDepth) - _ = runtime.Callers(0, pcs) - - // dynamic get the package name and the minimum caller depth - for i := 0; i < maximumCallerDepth; i++ { - funcName := runtime.FuncForPC(pcs[i]).Name() - if strings.Contains(funcName, "getCaller") { - logrusPackage = getPackageName(funcName) - break - } - } - - minimumCallerDepth = knownLogrusFrames - }) - - // Restrict the lookback frames to avoid runaway lookups - pcs := make([]uintptr, maximumCallerDepth) - depth := runtime.Callers(minimumCallerDepth, pcs) - frames := runtime.CallersFrames(pcs[:depth]) - - for f, again := frames.Next(); again; f, again = frames.Next() { - pkg := getPackageName(f.Function) - - // If the caller isn't part of this package, we're done - if pkg != logrusPackage { - return &f //nolint:scopelint - } - } - - // if we got here, we failed to find the caller's context - return nil -} - -func (entry Entry) HasCaller() (has bool) { - return entry.Logger != nil && - entry.Logger.ReportCaller && - entry.Caller != nil -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { - var buffer *bytes.Buffer - - // Default to now, but allow users to override if they want. - // - // We don't have to worry about polluting future calls to Entry#log() - // with this assignment because this function is declared with a - // non-pointer receiver. - if entry.Time.IsZero() { - entry.Time = time.Now() - } - - entry.Level = level - entry.Message = msg - entry.Logger.mu.Lock() - if entry.Logger.ReportCaller { - entry.Caller = getCaller() - } - entry.Logger.mu.Unlock() - - entry.fireHooks() - - buffer = getBuffer() - defer func() { - entry.Buffer = nil - putBuffer(buffer) - }() - buffer.Reset() - entry.Buffer = buffer - - entry.write() - - entry.Buffer = nil - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) - } -} - -func (entry *Entry) fireHooks() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - err := entry.Logger.Hooks.Fire(entry.Level, entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - } -} - -func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - return - } - if _, err = entry.Logger.Out.Write(serialized); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } -} - -func (entry *Entry) Log(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.log(level, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Trace(args ...interface{}) { - entry.Log(TraceLevel, args...) -} - -func (entry *Entry) Debug(args ...interface{}) { - entry.Log(DebugLevel, args...) -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - entry.Log(InfoLevel, args...) -} - -func (entry *Entry) Warn(args ...interface{}) { - entry.Log(WarnLevel, args...) -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - entry.Log(ErrorLevel, args...) -} - -func (entry *Entry) Fatal(args ...interface{}) { - entry.Log(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - entry.Log(PanicLevel, args...) - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Logf(level Level, format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Tracef(format string, args ...interface{}) { - entry.Logf(TraceLevel, format, args...) -} - -func (entry *Entry) Debugf(format string, args ...interface{}) { - entry.Logf(DebugLevel, format, args...) -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - entry.Logf(InfoLevel, format, args...) -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - entry.Logf(WarnLevel, format, args...) -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - entry.Logf(ErrorLevel, format, args...) -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - entry.Logf(FatalLevel, format, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - entry.Logf(PanicLevel, format, args...) -} - -// Entry Println family functions - -func (entry *Entry) Logln(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Traceln(args ...interface{}) { - entry.Logln(TraceLevel, args...) -} - -func (entry *Entry) Debugln(args ...interface{}) { - entry.Logln(DebugLevel, args...) -} - -func (entry *Entry) Infoln(args ...interface{}) { - entry.Logln(InfoLevel, args...) -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - entry.Logln(WarnLevel, args...) -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - entry.Logln(ErrorLevel, args...) -} - -func (entry *Entry) Fatalln(args ...interface{}) { - entry.Logln(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - entry.Logln(PanicLevel, args...) -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go deleted file mode 100644 index 017c30ce..00000000 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ /dev/null @@ -1,270 +0,0 @@ -package logrus - -import ( - "context" - "io" - "time" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.SetOutput(out) -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.SetFormatter(formatter) -} - -// SetReportCaller sets whether the standard logger will include the calling -// method as a field. -func SetReportCaller(include bool) { - std.SetReportCaller(include) -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.SetLevel(level) -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - return std.GetLevel() -} - -// IsLevelEnabled checks if the log level of the standard logger is greater than the level param -func IsLevelEnabled(level Level) bool { - return std.IsLevelEnabled(level) -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.AddHook(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithContext creates an entry from the standard logger and adds a context to it. -func WithContext(ctx context.Context) *Entry { - return std.WithContext(ctx) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// WithTime creates an entry from the standard logger and overrides the time of -// logs generated with it. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithTime(t time.Time) *Entry { - return std.WithTime(t) -} - -// Trace logs a message at level Trace on the standard logger. -func Trace(args ...interface{}) { - std.Trace(args...) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// TraceFn logs a message from a func at level Trace on the standard logger. -func TraceFn(fn LogFunction) { - std.TraceFn(fn) -} - -// DebugFn logs a message from a func at level Debug on the standard logger. -func DebugFn(fn LogFunction) { - std.DebugFn(fn) -} - -// PrintFn logs a message from a func at level Info on the standard logger. -func PrintFn(fn LogFunction) { - std.PrintFn(fn) -} - -// InfoFn logs a message from a func at level Info on the standard logger. -func InfoFn(fn LogFunction) { - std.InfoFn(fn) -} - -// WarnFn logs a message from a func at level Warn on the standard logger. -func WarnFn(fn LogFunction) { - std.WarnFn(fn) -} - -// WarningFn logs a message from a func at level Warn on the standard logger. -func WarningFn(fn LogFunction) { - std.WarningFn(fn) -} - -// ErrorFn logs a message from a func at level Error on the standard logger. -func ErrorFn(fn LogFunction) { - std.ErrorFn(fn) -} - -// PanicFn logs a message from a func at level Panic on the standard logger. -func PanicFn(fn LogFunction) { - std.PanicFn(fn) -} - -// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. -func FatalFn(fn LogFunction) { - std.FatalFn(fn) -} - -// Tracef logs a message at level Trace on the standard logger. -func Tracef(format string, args ...interface{}) { - std.Tracef(format, args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Traceln logs a message at level Trace on the standard logger. -func Traceln(args ...interface{}) { - std.Traceln(args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go deleted file mode 100644 index 40888377..00000000 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ /dev/null @@ -1,78 +0,0 @@ -package logrus - -import "time" - -// Default key names for the default fields -const ( - defaultTimestampFormat = time.RFC3339 - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" - FieldKeyLogrusError = "logrus_error" - FieldKeyFunc = "func" - FieldKeyFile = "file" -) - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { - timeKey := fieldMap.resolve(FieldKeyTime) - if t, ok := data[timeKey]; ok { - data["fields."+timeKey] = t - delete(data, timeKey) - } - - msgKey := fieldMap.resolve(FieldKeyMsg) - if m, ok := data[msgKey]; ok { - data["fields."+msgKey] = m - delete(data, msgKey) - } - - levelKey := fieldMap.resolve(FieldKeyLevel) - if l, ok := data[levelKey]; ok { - data["fields."+levelKey] = l - delete(data, levelKey) - } - - logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) - if l, ok := data[logrusErrKey]; ok { - data["fields."+logrusErrKey] = l - delete(data, logrusErrKey) - } - - // If reportCaller is not set, 'func' will not conflict. - if reportCaller { - funcKey := fieldMap.resolve(FieldKeyFunc) - if l, ok := data[funcKey]; ok { - data["fields."+funcKey] = l - } - fileKey := fieldMap.resolve(FieldKeyFile) - if l, ok := data[fileKey]; ok { - data["fields."+fileKey] = l - } - } -} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc..00000000 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go deleted file mode 100644 index ba7f2371..00000000 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,125 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "fmt" - "runtime" -) - -type fieldKey string - -// FieldMap allows customization of the key names for default fields. -type FieldMap map[fieldKey]string - -func (f FieldMap) resolve(key fieldKey) string { - if k, ok := f[key]; ok { - return k - } - - return string(key) -} - -// JSONFormatter formats logs into parsable json -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string - - // DisableTimestamp allows disabling automatic timestamps in output - DisableTimestamp bool - - // DisableHTMLEscape allows disabling html escaping in output - DisableHTMLEscape bool - - // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. - DataKey string - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &JSONFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", - // FieldKeyFunc: "@caller", - // }, - // } - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the json data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from json fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - // PrettyPrint will indent all json logs - PrettyPrint bool -} - -// Format renders a single log entry -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+4) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - - if f.DataKey != "" { - newData := make(Fields, 4) - newData[f.DataKey] = data - data = newData - } - - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - - if entry.err != "" { - data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err - } - if !f.DisableTimestamp { - data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) - } - data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message - data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - if entry.HasCaller() { - funcVal := entry.Caller.Function - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - if funcVal != "" { - data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal - } - if fileVal != "" { - data[f.FieldMap.resolve(FieldKeyFile)] = fileVal - } - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - encoder := json.NewEncoder(b) - encoder.SetEscapeHTML(!f.DisableHTMLEscape) - if f.PrettyPrint { - encoder.SetIndent("", " ") - } - if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go deleted file mode 100644 index dbf627c9..00000000 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ /dev/null @@ -1,404 +0,0 @@ -package logrus - -import ( - "context" - "io" - "os" - "sync" - "sync/atomic" - "time" -) - -// LogFunction For big messages, it can be more efficient to pass a function -// and only call it if the log level is actually enables rather than -// generating the log message and then checking if the level is enabled -type LogFunction func()[]interface{} - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventurous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - - // Flag for whether to log caller info (off by default) - ReportCaller bool - - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. - Level Level - // Used to sync writing to the log. Locking is enabled by Default - mu MutexWrap - // Reusable empty entry - entryPool sync.Pool - // Function to exit the application, defaults to `os.Exit()` - ExitFunc exitFunc -} - -type exitFunc func(int) - -type MutexWrap struct { - lock sync.Mutex - disabled bool -} - -func (mw *MutexWrap) Lock() { - if !mw.disabled { - mw.lock.Lock() - } -} - -func (mw *MutexWrap) Unlock() { - if !mw.disabled { - mw.lock.Unlock() - } -} - -func (mw *MutexWrap) Disable() { - mw.disabled = true -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - ExitFunc: os.Exit, - ReportCaller: false, - } -} - -func (logger *Logger) newEntry() *Entry { - entry, ok := logger.entryPool.Get().(*Entry) - if ok { - return entry - } - return NewEntry(logger) -} - -func (logger *Logger) releaseEntry(entry *Entry) { - entry.Data = map[string]interface{}{} - logger.entryPool.Put(entry) -} - -// WithField allocates a new entry and adds a field to it. -// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to -// this new returned entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithError(err) -} - -// Add a context to the log entry. -func (logger *Logger) WithContext(ctx context.Context) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithContext(ctx) -} - -// Overrides the time of the log entry. -func (logger *Logger) WithTime(t time.Time) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithTime(t) -} - -func (logger *Logger) Logf(level Level, format string, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logf(level, format, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Tracef(format string, args ...interface{}) { - logger.Logf(TraceLevel, format, args...) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - logger.Logf(DebugLevel, format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - logger.Logf(InfoLevel, format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - entry := logger.newEntry() - entry.Printf(format, args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - logger.Logf(WarnLevel, format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - logger.Logf(ErrorLevel, format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - logger.Logf(FatalLevel, format, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - logger.Logf(PanicLevel, format, args...) -} - -func (logger *Logger) Log(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) LogFn(level Level, fn LogFunction) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, fn()...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Trace(args ...interface{}) { - logger.Log(TraceLevel, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - logger.Log(DebugLevel, args...) -} - -func (logger *Logger) Info(args ...interface{}) { - logger.Log(InfoLevel, args...) -} - -func (logger *Logger) Print(args ...interface{}) { - entry := logger.newEntry() - entry.Print(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warn(args ...interface{}) { - logger.Log(WarnLevel, args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - logger.Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - logger.Log(ErrorLevel, args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - logger.Log(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - logger.Log(PanicLevel, args...) -} - -func (logger *Logger) TraceFn(fn LogFunction) { - logger.LogFn(TraceLevel, fn) -} - -func (logger *Logger) DebugFn(fn LogFunction) { - logger.LogFn(DebugLevel, fn) -} - -func (logger *Logger) InfoFn(fn LogFunction) { - logger.LogFn(InfoLevel, fn) -} - -func (logger *Logger) PrintFn(fn LogFunction) { - entry := logger.newEntry() - entry.Print(fn()...) - logger.releaseEntry(entry) -} - -func (logger *Logger) WarnFn(fn LogFunction) { - logger.LogFn(WarnLevel, fn) -} - -func (logger *Logger) WarningFn(fn LogFunction) { - logger.WarnFn(fn) -} - -func (logger *Logger) ErrorFn(fn LogFunction) { - logger.LogFn(ErrorLevel, fn) -} - -func (logger *Logger) FatalFn(fn LogFunction) { - logger.LogFn(FatalLevel, fn) - logger.Exit(1) -} - -func (logger *Logger) PanicFn(fn LogFunction) { - logger.LogFn(PanicLevel, fn) -} - -func (logger *Logger) Logln(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logln(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Traceln(args ...interface{}) { - logger.Logln(TraceLevel, args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - logger.Logln(DebugLevel, args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - logger.Logln(InfoLevel, args...) -} - -func (logger *Logger) Println(args ...interface{}) { - entry := logger.newEntry() - entry.Println(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnln(args ...interface{}) { - logger.Logln(WarnLevel, args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - logger.Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - logger.Logln(ErrorLevel, args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - logger.Logln(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - logger.Logln(PanicLevel, args...) -} - -func (logger *Logger) Exit(code int) { - runHandlers() - if logger.ExitFunc == nil { - logger.ExitFunc = os.Exit - } - logger.ExitFunc(code) -} - -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. -func (logger *Logger) SetNoLock() { - logger.mu.Disable() -} - -func (logger *Logger) level() Level { - return Level(atomic.LoadUint32((*uint32)(&logger.Level))) -} - -// SetLevel sets the logger level. -func (logger *Logger) SetLevel(level Level) { - atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) -} - -// GetLevel returns the logger level. -func (logger *Logger) GetLevel() Level { - return logger.level() -} - -// AddHook adds a hook to the logger hooks. -func (logger *Logger) AddHook(hook Hook) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Hooks.Add(hook) -} - -// IsLevelEnabled checks if the log level of the logger is greater than the level param -func (logger *Logger) IsLevelEnabled(level Level) bool { - return logger.level() >= level -} - -// SetFormatter sets the logger formatter. -func (logger *Logger) SetFormatter(formatter Formatter) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Formatter = formatter -} - -// SetOutput sets the logger output. -func (logger *Logger) SetOutput(output io.Writer) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Out = output -} - -func (logger *Logger) SetReportCaller(reportCaller bool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.ReportCaller = reportCaller -} - -// ReplaceHooks replaces the logger hooks and returns the old ones -func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { - logger.mu.Lock() - oldHooks := logger.Hooks - logger.Hooks = hooks - logger.mu.Unlock() - return oldHooks -} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go deleted file mode 100644 index 2f16224c..00000000 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ /dev/null @@ -1,186 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint32 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - if b, err := level.MarshalText(); err == nil { - return string(b) - } else { - return "unknown" - } -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - case "trace": - return TraceLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (level *Level) UnmarshalText(text []byte) error { - l, err := ParseLevel(string(text)) - if err != nil { - return err - } - - *level = l - - return nil -} - -func (level Level) MarshalText() ([]byte, error) { - switch level { - case TraceLevel: - return []byte("trace"), nil - case DebugLevel: - return []byte("debug"), nil - case InfoLevel: - return []byte("info"), nil - case WarnLevel: - return []byte("warning"), nil - case ErrorLevel: - return []byte("error"), nil - case FatalLevel: - return []byte("fatal"), nil - case PanicLevel: - return []byte("panic"), nil - } - - return nil, fmt.Errorf("not a valid logrus level %d", level) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, - TraceLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel - // TraceLevel level. Designates finer-grained informational events than the Debug. - TraceLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) - - // IsDebugEnabled() bool - // IsInfoEnabled() bool - // IsWarnEnabled() bool - // IsErrorEnabled() bool - // IsFatalEnabled() bool - // IsPanicEnabled() bool -} - -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. -type Ext1FieldLogger interface { - FieldLogger - Tracef(format string, args ...interface{}) - Trace(args ...interface{}) - Traceln(args ...interface{}) -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go deleted file mode 100644 index 2403de98..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return true -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go deleted file mode 100644 index 49978998..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go deleted file mode 100644 index ebdae3ec..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build js - -package logrus - -func isTerminal(fd int) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go deleted file mode 100644 index 97af92c6..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build js nacl plan9 - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go deleted file mode 100644 index 3293fb3c..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !appengine,!js,!windows,!nacl,!plan9 - -package logrus - -import ( - "io" - "os" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - return isTerminal(int(v.Fd())) - default: - return false - } -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go deleted file mode 100644 index f6710b3b..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package logrus - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go deleted file mode 100644 index cc4fe6e3..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux aix -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go deleted file mode 100644 index 2879eb50..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !appengine,!js,windows - -package logrus - -import ( - "io" - "os" - - "golang.org/x/sys/windows" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - handle := windows.Handle(v.Fd()) - var mode uint32 - if err := windows.GetConsoleMode(handle, &mode); err != nil { - return false - } - mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - if err := windows.SetConsoleMode(handle, mode); err != nil { - return false - } - return true - } - return false -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go deleted file mode 100644 index 3c28b54c..00000000 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,334 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "os" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - red = 31 - yellow = 33 - blue = 36 - gray = 37 -) - -var baseTimestamp time.Time - -func init() { - baseTimestamp = time.Now() -} - -// TextFormatter formats logs into text -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Force quoting of all values - ForceQuote bool - - // DisableQuote disables quoting for all values. - // DisableQuote will have a lower priority than ForceQuote. - // If both of them are set to true, quote will be forced on all values. - DisableQuote bool - - // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ - EnvironmentOverrideColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool - - // The keys sorting function, when uninitialized it uses sort.Strings. - SortingFunc func([]string) - - // Disables the truncation of the level text to 4 characters. - DisableLevelTruncation bool - - // PadLevelText Adds padding the level text so that all the levels output at the same length - // PadLevelText is a superset of the DisableLevelTruncation option - PadLevelText bool - - // QuoteEmptyFields will wrap empty fields in quotes if true - QuoteEmptyFields bool - - // Whether the logger's out is to a terminal - isTerminal bool - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &TextFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message"}} - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - terminalInitOnce sync.Once - - // The max length of the level text, generated dynamically on init - levelTextMaxLength int -} - -func (f *TextFormatter) init(entry *Entry) { - if entry.Logger != nil { - f.isTerminal = checkIfTerminal(entry.Logger.Out) - } - // Get the max length of the level text - for _, level := range AllLevels { - levelTextLength := utf8.RuneCount([]byte(level.String())) - if levelTextLength > f.levelTextMaxLength { - f.levelTextMaxLength = levelTextLength - } - } -} - -func (f *TextFormatter) isColored() bool { - isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) - - if f.EnvironmentOverrideColors { - switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { - case ok && force != "0": - isColored = true - case ok && force == "0", os.Getenv("CLICOLOR") == "0": - isColored = false - } - } - - return isColored && !f.DisableColors -} - -// Format renders a single log entry -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields) - for k, v := range entry.Data { - data[k] = v - } - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - var funcVal, fileVal string - - fixedKeys := make([]string, 0, 4+len(data)) - if !f.DisableTimestamp { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) - } - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) - if entry.Message != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) - } - if entry.err != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) - } - if entry.HasCaller() { - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } else { - funcVal = entry.Caller.Function - fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - } - - if funcVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) - } - if fileVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) - } - } - - if !f.DisableSorting { - if f.SortingFunc == nil { - sort.Strings(keys) - fixedKeys = append(fixedKeys, keys...) - } else { - if !f.isColored() { - fixedKeys = append(fixedKeys, keys...) - f.SortingFunc(fixedKeys) - } else { - f.SortingFunc(keys) - } - } - } else { - fixedKeys = append(fixedKeys, keys...) - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - f.terminalInitOnce.Do(func() { f.init(entry) }) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - if f.isColored() { - f.printColored(b, entry, keys, data, timestampFormat) - } else { - - for _, key := range fixedKeys { - var value interface{} - switch { - case key == f.FieldMap.resolve(FieldKeyTime): - value = entry.Time.Format(timestampFormat) - case key == f.FieldMap.resolve(FieldKeyLevel): - value = entry.Level.String() - case key == f.FieldMap.resolve(FieldKeyMsg): - value = entry.Message - case key == f.FieldMap.resolve(FieldKeyLogrusError): - value = entry.err - case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): - value = funcVal - case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): - value = fileVal - default: - value = data[key] - } - f.appendKeyValue(b, key, value) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel, TraceLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation && !f.PadLevelText { - levelText = levelText[0:4] - } - if f.PadLevelText { - // Generates the format string used in the next line, for example "%-6s" or "%-7s". - // Based on the max level text length. - formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" - // Formats the level text by appending spaces up to the max length, for example: - // - "INFO " - // - "WARNING" - levelText = fmt.Sprintf(formatString, levelText) - } - - // Remove a single newline if it already exists in the message to keep - // the behavior of logrus text_formatter the same as the stdlib log package - entry.Message = strings.TrimSuffix(entry.Message, "\n") - - caller := "" - if entry.HasCaller() { - funcVal := fmt.Sprintf("%s()", entry.Caller.Function) - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - - if fileVal == "" { - caller = funcVal - } else if funcVal == "" { - caller = fileVal - } else { - caller = fileVal + " " + funcVal - } - } - - switch { - case f.DisableTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - case !f.FullTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - default: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) - } - for _, k := range keys { - v := data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) - f.appendValue(b, v) - } -} - -func (f *TextFormatter) needsQuoting(text string) bool { - if f.ForceQuote { - return true - } - if f.QuoteEmptyFields && len(text) == 0 { - return true - } - if f.DisableQuote { - return false - } - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - if b.Len() > 0 { - b.WriteByte(' ') - } - b.WriteString(key) - b.WriteByte('=') - f.appendValue(b, value) -} - -func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - stringVal, ok := value.(string) - if !ok { - stringVal = fmt.Sprint(value) - } - - if !f.needsQuoting(stringVal) { - b.WriteString(stringVal) - } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) - } -} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go deleted file mode 100644 index 72e8e3a1..00000000 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ /dev/null @@ -1,70 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -// Writer at INFO level. See WriterLevel for details. -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -// WriterLevel returns an io.Writer that can be used to write arbitrary text to -// the logger at the given log level. Each line written to the writer will be -// printed in the usual way using formatters and hooks. The writer is part of an -// io.Pipe and it is the callers responsibility to close the writer when done. -// This can be used to override the standard library logger easily. -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - return NewEntry(logger).WriterLevel(level) -} - -func (entry *Entry) Writer() *io.PipeWriter { - return entry.WriterLevel(InfoLevel) -} - -func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - - switch level { - case TraceLevel: - printFunc = entry.Trace - case DebugLevel: - printFunc = entry.Debug - case InfoLevel: - printFunc = entry.Info - case WarnLevel: - printFunc = entry.Warn - case ErrorLevel: - printFunc = entry.Error - case FatalLevel: - printFunc = entry.Fatal - case PanicLevel: - printFunc = entry.Panic - default: - printFunc = entry.Print - } - - go entry.writerScanner(reader, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - entry.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/smallstep/certificates/LICENSE b/vendor/github.com/smallstep/certificates/LICENSE deleted file mode 100644 index 3044b170..00000000 --- a/vendor/github.com/smallstep/certificates/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2020 Smallstep Labs, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/smallstep/certificates/authority/admin/db.go b/vendor/github.com/smallstep/certificates/authority/admin/db.go deleted file mode 100644 index 15fe6686..00000000 --- a/vendor/github.com/smallstep/certificates/authority/admin/db.go +++ /dev/null @@ -1,179 +0,0 @@ -package admin - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/pkg/errors" - "go.step.sm/linkedca" -) - -const ( - // DefaultAuthorityID is the default AuthorityID. This will be the ID - // of the first Authority created, as well as the default AuthorityID - // if one is not specified in the configuration. - DefaultAuthorityID = "00000000-0000-0000-0000-000000000000" -) - -// ErrNotFound is an error that should be used by the authority.DB interface to -// indicate that an entity does not exist. -var ErrNotFound = errors.New("not found") - -// UnmarshalProvisionerDetails unmarshals details type to the specific provisioner details. -func UnmarshalProvisionerDetails(typ linkedca.Provisioner_Type, data []byte) (*linkedca.ProvisionerDetails, error) { - var v linkedca.ProvisionerDetails - switch typ { - case linkedca.Provisioner_JWK: - v.Data = new(linkedca.ProvisionerDetails_JWK) - case linkedca.Provisioner_OIDC: - v.Data = new(linkedca.ProvisionerDetails_OIDC) - case linkedca.Provisioner_GCP: - v.Data = new(linkedca.ProvisionerDetails_GCP) - case linkedca.Provisioner_AWS: - v.Data = new(linkedca.ProvisionerDetails_AWS) - case linkedca.Provisioner_AZURE: - v.Data = new(linkedca.ProvisionerDetails_Azure) - case linkedca.Provisioner_ACME: - v.Data = new(linkedca.ProvisionerDetails_ACME) - case linkedca.Provisioner_X5C: - v.Data = new(linkedca.ProvisionerDetails_X5C) - case linkedca.Provisioner_K8SSA: - v.Data = new(linkedca.ProvisionerDetails_K8SSA) - case linkedca.Provisioner_SSHPOP: - v.Data = new(linkedca.ProvisionerDetails_SSHPOP) - case linkedca.Provisioner_SCEP: - v.Data = new(linkedca.ProvisionerDetails_SCEP) - default: - return nil, fmt.Errorf("unsupported provisioner type %s", typ) - } - - if err := json.Unmarshal(data, v.Data); err != nil { - return nil, err - } - return &linkedca.ProvisionerDetails{Data: v.Data}, nil -} - -// DB is the DB interface expected by the step-ca ACME API. -type DB interface { - CreateProvisioner(ctx context.Context, prov *linkedca.Provisioner) error - GetProvisioner(ctx context.Context, id string) (*linkedca.Provisioner, error) - GetProvisioners(ctx context.Context) ([]*linkedca.Provisioner, error) - UpdateProvisioner(ctx context.Context, prov *linkedca.Provisioner) error - DeleteProvisioner(ctx context.Context, id string) error - - CreateAdmin(ctx context.Context, admin *linkedca.Admin) error - GetAdmin(ctx context.Context, id string) (*linkedca.Admin, error) - GetAdmins(ctx context.Context) ([]*linkedca.Admin, error) - UpdateAdmin(ctx context.Context, admin *linkedca.Admin) error - DeleteAdmin(ctx context.Context, id string) error -} - -// MockDB is an implementation of the DB interface that should only be used as -// a mock in tests. -type MockDB struct { - MockCreateProvisioner func(ctx context.Context, prov *linkedca.Provisioner) error - MockGetProvisioner func(ctx context.Context, id string) (*linkedca.Provisioner, error) - MockGetProvisioners func(ctx context.Context) ([]*linkedca.Provisioner, error) - MockUpdateProvisioner func(ctx context.Context, prov *linkedca.Provisioner) error - MockDeleteProvisioner func(ctx context.Context, id string) error - - MockCreateAdmin func(ctx context.Context, adm *linkedca.Admin) error - MockGetAdmin func(ctx context.Context, id string) (*linkedca.Admin, error) - MockGetAdmins func(ctx context.Context) ([]*linkedca.Admin, error) - MockUpdateAdmin func(ctx context.Context, adm *linkedca.Admin) error - MockDeleteAdmin func(ctx context.Context, id string) error - - MockError error - MockRet1 interface{} -} - -// CreateProvisioner mock. -func (m *MockDB) CreateProvisioner(ctx context.Context, prov *linkedca.Provisioner) error { - if m.MockCreateProvisioner != nil { - return m.MockCreateProvisioner(ctx, prov) - } else if m.MockError != nil { - return m.MockError - } - return m.MockError -} - -// GetProvisioner mock. -func (m *MockDB) GetProvisioner(ctx context.Context, id string) (*linkedca.Provisioner, error) { - if m.MockGetProvisioner != nil { - return m.MockGetProvisioner(ctx, id) - } else if m.MockError != nil { - return nil, m.MockError - } - return m.MockRet1.(*linkedca.Provisioner), m.MockError -} - -// GetProvisioners mock -func (m *MockDB) GetProvisioners(ctx context.Context) ([]*linkedca.Provisioner, error) { - if m.MockGetProvisioners != nil { - return m.MockGetProvisioners(ctx) - } else if m.MockError != nil { - return nil, m.MockError - } - return m.MockRet1.([]*linkedca.Provisioner), m.MockError -} - -// UpdateProvisioner mock -func (m *MockDB) UpdateProvisioner(ctx context.Context, prov *linkedca.Provisioner) error { - if m.MockUpdateProvisioner != nil { - return m.MockUpdateProvisioner(ctx, prov) - } - return m.MockError -} - -// DeleteProvisioner mock -func (m *MockDB) DeleteProvisioner(ctx context.Context, id string) error { - if m.MockDeleteProvisioner != nil { - return m.MockDeleteProvisioner(ctx, id) - } - return m.MockError -} - -// CreateAdmin mock -func (m *MockDB) CreateAdmin(ctx context.Context, admin *linkedca.Admin) error { - if m.MockCreateAdmin != nil { - return m.MockCreateAdmin(ctx, admin) - } - return m.MockError -} - -// GetAdmin mock. -func (m *MockDB) GetAdmin(ctx context.Context, id string) (*linkedca.Admin, error) { - if m.MockGetAdmin != nil { - return m.MockGetAdmin(ctx, id) - } else if m.MockError != nil { - return nil, m.MockError - } - return m.MockRet1.(*linkedca.Admin), m.MockError -} - -// GetAdmins mock -func (m *MockDB) GetAdmins(ctx context.Context) ([]*linkedca.Admin, error) { - if m.MockGetAdmins != nil { - return m.MockGetAdmins(ctx) - } else if m.MockError != nil { - return nil, m.MockError - } - return m.MockRet1.([]*linkedca.Admin), m.MockError -} - -// UpdateAdmin mock -func (m *MockDB) UpdateAdmin(ctx context.Context, adm *linkedca.Admin) error { - if m.MockUpdateAdmin != nil { - return m.MockUpdateAdmin(ctx, adm) - } - return m.MockError -} - -// DeleteAdmin mock -func (m *MockDB) DeleteAdmin(ctx context.Context, id string) error { - if m.MockDeleteAdmin != nil { - return m.MockDeleteAdmin(ctx, id) - } - return m.MockError -} diff --git a/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/admin.go b/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/admin.go deleted file mode 100644 index 6bb6bdd1..00000000 --- a/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/admin.go +++ /dev/null @@ -1,178 +0,0 @@ -package nosql - -import ( - "context" - "encoding/json" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/admin" - "github.com/smallstep/nosql" - "go.step.sm/linkedca" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// dbAdmin is the database representation of the Admin type. -type dbAdmin struct { - ID string `json:"id"` - AuthorityID string `json:"authorityID"` - ProvisionerID string `json:"provisionerID"` - Subject string `json:"subject"` - Type linkedca.Admin_Type `json:"type"` - CreatedAt time.Time `json:"createdAt"` - DeletedAt time.Time `json:"deletedAt"` -} - -func (dba *dbAdmin) convert() *linkedca.Admin { - return &linkedca.Admin{ - Id: dba.ID, - AuthorityId: dba.AuthorityID, - ProvisionerId: dba.ProvisionerID, - Subject: dba.Subject, - Type: dba.Type, - CreatedAt: timestamppb.New(dba.CreatedAt), - DeletedAt: timestamppb.New(dba.DeletedAt), - } -} - -func (dba *dbAdmin) clone() *dbAdmin { - u := *dba - return &u -} - -func (db *DB) getDBAdminBytes(ctx context.Context, id string) ([]byte, error) { - data, err := db.db.Get(adminsTable, []byte(id)) - if nosql.IsErrNotFound(err) { - return nil, admin.NewError(admin.ErrorNotFoundType, "admin %s not found", id) - } else if err != nil { - return nil, errors.Wrapf(err, "error loading admin %s", id) - } - return data, nil -} - -func (db *DB) unmarshalDBAdmin(data []byte, id string) (*dbAdmin, error) { - var dba = new(dbAdmin) - if err := json.Unmarshal(data, dba); err != nil { - return nil, errors.Wrapf(err, "error unmarshaling admin %s into dbAdmin", id) - } - if !dba.DeletedAt.IsZero() { - return nil, admin.NewError(admin.ErrorDeletedType, "admin %s is deleted", id) - } - if dba.AuthorityID != db.authorityID { - return nil, admin.NewError(admin.ErrorAuthorityMismatchType, - "admin %s is not owned by authority %s", dba.ID, db.authorityID) - } - return dba, nil -} - -func (db *DB) getDBAdmin(ctx context.Context, id string) (*dbAdmin, error) { - data, err := db.getDBAdminBytes(ctx, id) - if err != nil { - return nil, err - } - dba, err := db.unmarshalDBAdmin(data, id) - if err != nil { - return nil, err - } - return dba, nil -} - -func (db *DB) unmarshalAdmin(data []byte, id string) (*linkedca.Admin, error) { - dba, err := db.unmarshalDBAdmin(data, id) - if err != nil { - return nil, err - } - return dba.convert(), nil -} - -// GetAdmin retrieves and unmarshals a admin from the database. -func (db *DB) GetAdmin(ctx context.Context, id string) (*linkedca.Admin, error) { - data, err := db.getDBAdminBytes(ctx, id) - if err != nil { - return nil, err - } - adm, err := db.unmarshalAdmin(data, id) - if err != nil { - return nil, err - } - - return adm, nil -} - -// GetAdmins retrieves and unmarshals all active (not deleted) admins -// from the database. -// TODO should we be paginating? -func (db *DB) GetAdmins(ctx context.Context) ([]*linkedca.Admin, error) { - dbEntries, err := db.db.List(adminsTable) - if err != nil { - return nil, errors.Wrap(err, "error loading admins") - } - var admins = []*linkedca.Admin{} - for _, entry := range dbEntries { - adm, err := db.unmarshalAdmin(entry.Value, string(entry.Key)) - if err != nil { - switch k := err.(type) { - case *admin.Error: - if k.IsType(admin.ErrorDeletedType) || k.IsType(admin.ErrorAuthorityMismatchType) { - continue - } else { - return nil, err - } - default: - return nil, err - } - } - if adm.AuthorityId != db.authorityID { - continue - } - admins = append(admins, adm) - } - return admins, nil -} - -// CreateAdmin stores a new admin to the database. -func (db *DB) CreateAdmin(ctx context.Context, adm *linkedca.Admin) error { - var err error - adm.Id, err = randID() - if err != nil { - return admin.WrapErrorISE(err, "error generating random id for admin") - } - adm.AuthorityId = db.authorityID - - dba := &dbAdmin{ - ID: adm.Id, - AuthorityID: db.authorityID, - ProvisionerID: adm.ProvisionerId, - Subject: adm.Subject, - Type: adm.Type, - CreatedAt: clock.Now(), - } - - return db.save(ctx, dba.ID, dba, nil, "admin", adminsTable) -} - -// UpdateAdmin saves an updated admin to the database. -func (db *DB) UpdateAdmin(ctx context.Context, adm *linkedca.Admin) error { - old, err := db.getDBAdmin(ctx, adm.Id) - if err != nil { - return err - } - - nu := old.clone() - nu.Type = adm.Type - - return db.save(ctx, old.ID, nu, old, "admin", adminsTable) -} - -// DeleteAdmin saves an updated admin to the database. -func (db *DB) DeleteAdmin(ctx context.Context, id string) error { - old, err := db.getDBAdmin(ctx, id) - if err != nil { - return err - } - - nu := old.clone() - nu.DeletedAt = clock.Now() - - return db.save(ctx, old.ID, nu, old, "admin", adminsTable) -} diff --git a/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/nosql.go b/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/nosql.go deleted file mode 100644 index 18599b02..00000000 --- a/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/nosql.go +++ /dev/null @@ -1,88 +0,0 @@ -package nosql - -import ( - "context" - "encoding/json" - "time" - - "github.com/pkg/errors" - nosqlDB "github.com/smallstep/nosql/database" - "go.step.sm/crypto/randutil" -) - -var ( - adminsTable = []byte("admins") - provisionersTable = []byte("provisioners") -) - -// DB is a struct that implements the AdminDB interface. -type DB struct { - db nosqlDB.DB - authorityID string -} - -// New configures and returns a new Authority DB backend implemented using a nosql DB. -func New(db nosqlDB.DB, authorityID string) (*DB, error) { - tables := [][]byte{adminsTable, provisionersTable} - for _, b := range tables { - if err := db.CreateTable(b); err != nil { - return nil, errors.Wrapf(err, "error creating table %s", - string(b)) - } - } - return &DB{db, authorityID}, nil -} - -// save writes the new data to the database, overwriting the old data if it -// existed. -func (db *DB) save(ctx context.Context, id string, nu interface{}, old interface{}, typ string, table []byte) error { - var ( - err error - newB []byte - ) - if nu == nil { - newB = nil - } else { - newB, err = json.Marshal(nu) - if err != nil { - return errors.Wrapf(err, "error marshaling authority type: %s, value: %v", typ, nu) - } - } - var oldB []byte - if old == nil { - oldB = nil - } else { - oldB, err = json.Marshal(old) - if err != nil { - return errors.Wrapf(err, "error marshaling admin type: %s, value: %v", typ, old) - } - } - - _, swapped, err := db.db.CmpAndSwap(table, []byte(id), oldB, newB) - switch { - case err != nil: - return errors.Wrapf(err, "error saving authority %s", typ) - case !swapped: - return errors.Errorf("error saving authority %s; changed since last read", typ) - default: - return nil - } -} - -func randID() (val string, err error) { - val, err = randutil.UUIDv4() - if err != nil { - return "", errors.Wrap(err, "error generating random alphanumeric ID") - } - return val, nil -} - -// Clock that returns time in UTC rounded to seconds. -type Clock struct{} - -// Now returns the UTC time rounded to seconds. -func (c *Clock) Now() time.Time { - return time.Now().UTC().Truncate(time.Second) -} - -var clock = new(Clock) diff --git a/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/provisioner.go b/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/provisioner.go deleted file mode 100644 index 71d9c8d6..00000000 --- a/vendor/github.com/smallstep/certificates/authority/admin/db/nosql/provisioner.go +++ /dev/null @@ -1,211 +0,0 @@ -package nosql - -import ( - "context" - "encoding/json" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/admin" - "github.com/smallstep/nosql" - "go.step.sm/linkedca" - "google.golang.org/protobuf/types/known/timestamppb" -) - -// dbProvisioner is the database representation of a Provisioner type. -type dbProvisioner struct { - ID string `json:"id"` - AuthorityID string `json:"authorityID"` - Type linkedca.Provisioner_Type `json:"type"` - Name string `json:"name"` - Claims *linkedca.Claims `json:"claims"` - Details []byte `json:"details"` - X509Template *linkedca.Template `json:"x509Template"` - SSHTemplate *linkedca.Template `json:"sshTemplate"` - CreatedAt time.Time `json:"createdAt"` - DeletedAt time.Time `json:"deletedAt"` -} - -func (dbp *dbProvisioner) clone() *dbProvisioner { - u := *dbp - return &u -} - -func (dbp *dbProvisioner) convert2linkedca() (*linkedca.Provisioner, error) { - details, err := admin.UnmarshalProvisionerDetails(dbp.Type, dbp.Details) - if err != nil { - return nil, err - } - - return &linkedca.Provisioner{ - Id: dbp.ID, - AuthorityId: dbp.AuthorityID, - Type: dbp.Type, - Name: dbp.Name, - Claims: dbp.Claims, - Details: details, - X509Template: dbp.X509Template, - SshTemplate: dbp.SSHTemplate, - CreatedAt: timestamppb.New(dbp.CreatedAt), - DeletedAt: timestamppb.New(dbp.DeletedAt), - }, nil -} - -func (db *DB) getDBProvisionerBytes(ctx context.Context, id string) ([]byte, error) { - data, err := db.db.Get(provisionersTable, []byte(id)) - if nosql.IsErrNotFound(err) { - return nil, admin.NewError(admin.ErrorNotFoundType, "provisioner %s not found", id) - } else if err != nil { - return nil, errors.Wrapf(err, "error loading provisioner %s", id) - } - return data, nil -} - -func (db *DB) unmarshalDBProvisioner(data []byte, id string) (*dbProvisioner, error) { - var dbp = new(dbProvisioner) - if err := json.Unmarshal(data, dbp); err != nil { - return nil, errors.Wrapf(err, "error unmarshaling provisioner %s into dbProvisioner", id) - } - if !dbp.DeletedAt.IsZero() { - return nil, admin.NewError(admin.ErrorDeletedType, "provisioner %s is deleted", id) - } - if dbp.AuthorityID != db.authorityID { - return nil, admin.NewError(admin.ErrorAuthorityMismatchType, - "provisioner %s is not owned by authority %s", id, db.authorityID) - } - return dbp, nil -} - -func (db *DB) getDBProvisioner(ctx context.Context, id string) (*dbProvisioner, error) { - data, err := db.getDBProvisionerBytes(ctx, id) - if err != nil { - return nil, err - } - dbp, err := db.unmarshalDBProvisioner(data, id) - if err != nil { - return nil, err - } - return dbp, nil -} - -func (db *DB) unmarshalProvisioner(data []byte, id string) (*linkedca.Provisioner, error) { - dbp, err := db.unmarshalDBProvisioner(data, id) - if err != nil { - return nil, err - } - - return dbp.convert2linkedca() -} - -// GetProvisioner retrieves and unmarshals a provisioner from the database. -func (db *DB) GetProvisioner(ctx context.Context, id string) (*linkedca.Provisioner, error) { - data, err := db.getDBProvisionerBytes(ctx, id) - if err != nil { - return nil, err - } - - prov, err := db.unmarshalProvisioner(data, id) - if err != nil { - return nil, err - } - return prov, nil -} - -// GetProvisioners retrieves and unmarshals all active (not deleted) provisioners -// from the database. -func (db *DB) GetProvisioners(ctx context.Context) ([]*linkedca.Provisioner, error) { - dbEntries, err := db.db.List(provisionersTable) - if err != nil { - return nil, errors.Wrap(err, "error loading provisioners") - } - var provs []*linkedca.Provisioner - for _, entry := range dbEntries { - prov, err := db.unmarshalProvisioner(entry.Value, string(entry.Key)) - if err != nil { - switch k := err.(type) { - case *admin.Error: - if k.IsType(admin.ErrorDeletedType) || k.IsType(admin.ErrorAuthorityMismatchType) { - continue - } else { - return nil, err - } - default: - return nil, err - } - } - if prov.AuthorityId != db.authorityID { - continue - } - provs = append(provs, prov) - } - return provs, nil -} - -// CreateProvisioner stores a new provisioner to the database. -func (db *DB) CreateProvisioner(ctx context.Context, prov *linkedca.Provisioner) error { - var err error - prov.Id, err = randID() - if err != nil { - return admin.WrapErrorISE(err, "error generating random id for provisioner") - } - - details, err := json.Marshal(prov.Details.GetData()) - if err != nil { - return admin.WrapErrorISE(err, "error marshaling details when creating provisioner %s", prov.Name) - } - - dbp := &dbProvisioner{ - ID: prov.Id, - AuthorityID: db.authorityID, - Type: prov.Type, - Name: prov.Name, - Claims: prov.Claims, - Details: details, - X509Template: prov.X509Template, - SSHTemplate: prov.SshTemplate, - CreatedAt: clock.Now(), - } - - if err := db.save(ctx, prov.Id, dbp, nil, "provisioner", provisionersTable); err != nil { - return admin.WrapErrorISE(err, "error creating provisioner %s", prov.Name) - } - - return nil -} - -// UpdateProvisioner saves an updated provisioner to the database. -func (db *DB) UpdateProvisioner(ctx context.Context, prov *linkedca.Provisioner) error { - old, err := db.getDBProvisioner(ctx, prov.Id) - if err != nil { - return err - } - - nu := old.clone() - - if old.Type != prov.Type { - return admin.NewError(admin.ErrorBadRequestType, "cannot update provisioner type") - } - nu.Name = prov.Name - nu.Claims = prov.Claims - nu.Details, err = json.Marshal(prov.Details.GetData()) - if err != nil { - return admin.WrapErrorISE(err, "error marshaling details when updating provisioner %s", prov.Name) - } - nu.X509Template = prov.X509Template - nu.SSHTemplate = prov.SshTemplate - - return db.save(ctx, prov.Id, nu, old, "provisioner", provisionersTable) -} - -// DeleteProvisioner saves an updated admin to the database. -func (db *DB) DeleteProvisioner(ctx context.Context, id string) error { - old, err := db.getDBProvisioner(ctx, id) - if err != nil { - return err - } - - nu := old.clone() - nu.DeletedAt = clock.Now() - - return db.save(ctx, old.ID, nu, old, "provisioner", provisionersTable) -} diff --git a/vendor/github.com/smallstep/certificates/authority/admin/errors.go b/vendor/github.com/smallstep/certificates/authority/admin/errors.go deleted file mode 100644 index 607093b0..00000000 --- a/vendor/github.com/smallstep/certificates/authority/admin/errors.go +++ /dev/null @@ -1,223 +0,0 @@ -package admin - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "os" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "github.com/smallstep/certificates/logging" -) - -// ProblemType is the type of the Admin problem. -type ProblemType int - -const ( - // ErrorNotFoundType resource not found. - ErrorNotFoundType ProblemType = iota - // ErrorAuthorityMismatchType resource Authority ID does not match the - // context Authority ID. - ErrorAuthorityMismatchType - // ErrorDeletedType resource has been deleted. - ErrorDeletedType - // ErrorBadRequestType bad request. - ErrorBadRequestType - // ErrorNotImplementedType not implemented. - ErrorNotImplementedType - // ErrorUnauthorizedType internal server error. - ErrorUnauthorizedType - // ErrorServerInternalType internal server error. - ErrorServerInternalType -) - -// String returns the string representation of the admin problem type, -// fulfilling the Stringer interface. -func (ap ProblemType) String() string { - switch ap { - case ErrorNotFoundType: - return "notFound" - case ErrorAuthorityMismatchType: - return "authorityMismatch" - case ErrorDeletedType: - return "deleted" - case ErrorBadRequestType: - return "badRequest" - case ErrorNotImplementedType: - return "notImplemented" - case ErrorUnauthorizedType: - return "unauthorized" - case ErrorServerInternalType: - return "internalServerError" - default: - return fmt.Sprintf("unsupported error type '%d'", int(ap)) - } -} - -type errorMetadata struct { - details string - status int - typ string - String string -} - -var ( - errorServerInternalMetadata = errorMetadata{ - typ: ErrorServerInternalType.String(), - details: "the server experienced an internal error", - status: 500, - } - errorMap = map[ProblemType]errorMetadata{ - ErrorNotFoundType: { - typ: ErrorNotFoundType.String(), - details: "resource not found", - status: http.StatusNotFound, - }, - ErrorAuthorityMismatchType: { - typ: ErrorAuthorityMismatchType.String(), - details: "resource not owned by authority", - status: http.StatusUnauthorized, - }, - ErrorDeletedType: { - typ: ErrorDeletedType.String(), - details: "resource is deleted", - status: http.StatusNotFound, - }, - ErrorNotImplementedType: { - typ: ErrorNotImplementedType.String(), - details: "not implemented", - status: http.StatusNotImplemented, - }, - ErrorBadRequestType: { - typ: ErrorBadRequestType.String(), - details: "bad request", - status: http.StatusBadRequest, - }, - ErrorUnauthorizedType: { - typ: ErrorUnauthorizedType.String(), - details: "unauthorized", - status: http.StatusUnauthorized, - }, - ErrorServerInternalType: errorServerInternalMetadata, - } -) - -// Error represents an Admin -type Error struct { - Type string `json:"type"` - Detail string `json:"detail"` - Message string `json:"message"` - Err error `json:"-"` - Status int `json:"-"` -} - -// IsType returns true if the error type matches the input type. -func (e *Error) IsType(pt ProblemType) bool { - return pt.String() == e.Type -} - -// NewError creates a new Error type. -func NewError(pt ProblemType, msg string, args ...interface{}) *Error { - return newError(pt, errors.Errorf(msg, args...)) -} - -func newError(pt ProblemType, err error) *Error { - meta, ok := errorMap[pt] - if !ok { - meta = errorServerInternalMetadata - return &Error{ - Type: meta.typ, - Detail: meta.details, - Status: meta.status, - Err: err, - } - } - - return &Error{ - Type: meta.typ, - Detail: meta.details, - Status: meta.status, - Err: err, - } -} - -// NewErrorISE creates a new ErrorServerInternalType Error. -func NewErrorISE(msg string, args ...interface{}) *Error { - return NewError(ErrorServerInternalType, msg, args...) -} - -// WrapError attempts to wrap the internal error. -func WrapError(typ ProblemType, err error, msg string, args ...interface{}) *Error { - switch e := err.(type) { - case nil: - return nil - case *Error: - if e.Err == nil { - e.Err = errors.Errorf(msg+"; "+e.Detail, args...) - } else { - e.Err = errors.Wrapf(e.Err, msg, args...) - } - return e - default: - return newError(typ, errors.Wrapf(err, msg, args...)) - } -} - -// WrapErrorISE shortcut to wrap an internal server error type. -func WrapErrorISE(err error, msg string, args ...interface{}) *Error { - return WrapError(ErrorServerInternalType, err, msg, args...) -} - -// StatusCode returns the status code and implements the StatusCoder interface. -func (e *Error) StatusCode() int { - return e.Status -} - -// Error allows AError to implement the error interface. -func (e *Error) Error() string { - return e.Err.Error() -} - -// Cause returns the internal error and implements the Causer interface. -func (e *Error) Cause() error { - if e.Err == nil { - return errors.New(e.Detail) - } - return e.Err -} - -// ToLog implements the EnableLogger interface. -func (e *Error) ToLog() (interface{}, error) { - b, err := json.Marshal(e) - if err != nil { - return nil, WrapErrorISE(err, "error marshaling authority.Error for logging") - } - return string(b), nil -} - -// WriteError writes to w a JSON representation of the given error. -func WriteError(w http.ResponseWriter, err *Error) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(err.StatusCode()) - - err.Message = err.Err.Error() - // Write errors in the response writer - if rl, ok := w.(logging.ResponseLogger); ok { - rl.WithFields(map[string]interface{}{ - "error": err.Err, - }) - if os.Getenv("STEPDEBUG") == "1" { - if e, ok := err.Err.(errs.StackTracer); ok { - rl.WithFields(map[string]interface{}{ - "stack-trace": fmt.Sprintf("%+v", e), - }) - } - } - } - - if err := json.NewEncoder(w).Encode(err); err != nil { - log.Println(err) - } -} diff --git a/vendor/github.com/smallstep/certificates/authority/administrator/collection.go b/vendor/github.com/smallstep/certificates/authority/administrator/collection.go deleted file mode 100644 index ff04a41f..00000000 --- a/vendor/github.com/smallstep/certificates/authority/administrator/collection.go +++ /dev/null @@ -1,243 +0,0 @@ -package administrator - -import ( - "sort" - "sync" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/admin" - "github.com/smallstep/certificates/authority/provisioner" - "go.step.sm/linkedca" -) - -// DefaultAdminLimit is the default limit for listing provisioners. -const DefaultAdminLimit = 20 - -// DefaultAdminMax is the maximum limit for listing provisioners. -const DefaultAdminMax = 100 - -type adminSlice []*linkedca.Admin - -func (p adminSlice) Len() int { return len(p) } -func (p adminSlice) Less(i, j int) bool { return p[i].Id < p[j].Id } -func (p adminSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Collection is a memory map of admins. -type Collection struct { - byID *sync.Map - bySubProv *sync.Map - byProv *sync.Map - sorted adminSlice - provisioners *provisioner.Collection - superCount int - superCountByProvisioner map[string]int -} - -// NewCollection initializes a collection of provisioners. The given list of -// audiences are the audiences used by the JWT provisioner. -func NewCollection(provisioners *provisioner.Collection) *Collection { - return &Collection{ - byID: new(sync.Map), - byProv: new(sync.Map), - bySubProv: new(sync.Map), - superCountByProvisioner: map[string]int{}, - provisioners: provisioners, - } -} - -// LoadByID a admin by the ID. -func (c *Collection) LoadByID(id string) (*linkedca.Admin, bool) { - return loadAdmin(c.byID, id) -} - -type subProv struct { - subject string - provisioner string -} - -func newSubProv(subject, provisioner string) subProv { - return subProv{subject, provisioner} -} - -// LoadBySubProv a admin by the subject and provisioner name. -func (c *Collection) LoadBySubProv(sub, provName string) (*linkedca.Admin, bool) { - return loadAdmin(c.bySubProv, newSubProv(sub, provName)) -} - -// LoadByProvisioner a admin by the subject and provisioner name. -func (c *Collection) LoadByProvisioner(provName string) ([]*linkedca.Admin, bool) { - val, ok := c.byProv.Load(provName) - if !ok { - return nil, false - } - admins, ok := val.([]*linkedca.Admin) - if !ok { - return nil, false - } - return admins, true -} - -// Store adds an admin to the collection and enforces the uniqueness of -// admin IDs and amdin subject <-> provisioner name combos. -func (c *Collection) Store(adm *linkedca.Admin, prov provisioner.Interface) error { - // Input validation. - if adm.ProvisionerId != prov.GetID() { - return admin.NewErrorISE("admin.provisionerId does not match provisioner argument") - } - - // Store admin always in byID. ID must be unique. - if _, loaded := c.byID.LoadOrStore(adm.Id, adm); loaded { - return errors.New("cannot add multiple admins with the same id") - } - - provName := prov.GetName() - // Store admin always in bySubProv. Subject <-> ProvisionerName must be unique. - if _, loaded := c.bySubProv.LoadOrStore(newSubProv(adm.Subject, provName), adm); loaded { - c.byID.Delete(adm.Id) - return errors.New("cannot add multiple admins with the same subject and provisioner") - } - - var isSuper = (adm.Type == linkedca.Admin_SUPER_ADMIN) - if admins, ok := c.LoadByProvisioner(provName); ok { - c.byProv.Store(provName, append(admins, adm)) - if isSuper { - c.superCountByProvisioner[provName]++ - } - } else { - c.byProv.Store(provName, []*linkedca.Admin{adm}) - if isSuper { - c.superCountByProvisioner[provName] = 1 - } - } - if isSuper { - c.superCount++ - } - - c.sorted = append(c.sorted, adm) - sort.Sort(c.sorted) - - return nil -} - -// Remove deletes an admin from all associated collections and lists. -func (c *Collection) Remove(id string) error { - adm, ok := c.LoadByID(id) - if !ok { - return admin.NewError(admin.ErrorNotFoundType, "admin %s not found", id) - } - if adm.Type == linkedca.Admin_SUPER_ADMIN && c.SuperCount() == 1 { - return admin.NewError(admin.ErrorBadRequestType, "cannot remove the last super admin") - } - prov, ok := c.provisioners.Load(adm.ProvisionerId) - if !ok { - return admin.NewError(admin.ErrorNotFoundType, - "provisioner %s for admin %s not found", adm.ProvisionerId, id) - } - provName := prov.GetName() - adminsByProv, ok := c.LoadByProvisioner(provName) - if !ok { - return admin.NewError(admin.ErrorNotFoundType, - "admins not found for provisioner %s", provName) - } - - // Find index in sorted list. - sortedIndex := sort.Search(c.sorted.Len(), func(i int) bool { return c.sorted[i].Id >= adm.Id }) - if c.sorted[sortedIndex].Id != adm.Id { - return admin.NewError(admin.ErrorNotFoundType, - "admin %s not found in sorted list", adm.Id) - } - - var found bool - for i, a := range adminsByProv { - if a.Id == adm.Id { - // Remove admin from list. https://stackoverflow.com/questions/37334119/how-to-delete-an-element-from-a-slice-in-golang - // Order does not matter. - adminsByProv[i] = adminsByProv[len(adminsByProv)-1] - c.byProv.Store(provName, adminsByProv[:len(adminsByProv)-1]) - found = true - } - } - if !found { - return admin.NewError(admin.ErrorNotFoundType, - "admin %s not found in adminsByProvisioner list", adm.Id) - } - - // Remove index in sorted list - copy(c.sorted[sortedIndex:], c.sorted[sortedIndex+1:]) // Shift a[i+1:] left one index. - c.sorted[len(c.sorted)-1] = nil // Erase last element (write zero value). - c.sorted = c.sorted[:len(c.sorted)-1] // Truncate slice. - - c.byID.Delete(adm.Id) - c.bySubProv.Delete(newSubProv(adm.Subject, provName)) - - if adm.Type == linkedca.Admin_SUPER_ADMIN { - c.superCount-- - c.superCountByProvisioner[provName]-- - } - return nil -} - -// Update updates the given admin in all related lists and collections. -func (c *Collection) Update(id string, nu *linkedca.Admin) (*linkedca.Admin, error) { - adm, ok := c.LoadByID(id) - if !ok { - return nil, admin.NewError(admin.ErrorNotFoundType, "admin %s not found", adm.Id) - } - if adm.Type == nu.Type { - return adm, nil - } - if adm.Type == linkedca.Admin_SUPER_ADMIN && c.SuperCount() == 1 { - return nil, admin.NewError(admin.ErrorBadRequestType, "cannot change role of last super admin") - } - - adm.Type = nu.Type - return adm, nil -} - -// SuperCount returns the total number of admins. -func (c *Collection) SuperCount() int { - return c.superCount -} - -// SuperCountByProvisioner returns the total number of admins. -func (c *Collection) SuperCountByProvisioner(provName string) int { - if cnt, ok := c.superCountByProvisioner[provName]; ok { - return cnt - } - return 0 -} - -// Find implements pagination on a list of sorted admins. -func (c *Collection) Find(cursor string, limit int) ([]*linkedca.Admin, string) { - switch { - case limit <= 0: - limit = DefaultAdminLimit - case limit > DefaultAdminMax: - limit = DefaultAdminMax - } - - n := c.sorted.Len() - i := sort.Search(n, func(i int) bool { return c.sorted[i].Id >= cursor }) - - slice := []*linkedca.Admin{} - for ; i < n && len(slice) < limit; i++ { - slice = append(slice, c.sorted[i]) - } - - if i < n { - return slice, c.sorted[i].Id - } - return slice, "" -} - -func loadAdmin(m *sync.Map, key interface{}) (*linkedca.Admin, bool) { - val, ok := m.Load(key) - if !ok { - return nil, false - } - adm, ok := val.(*linkedca.Admin) - if !ok { - return nil, false - } - return adm, true -} diff --git a/vendor/github.com/smallstep/certificates/authority/admins.go b/vendor/github.com/smallstep/certificates/authority/admins.go deleted file mode 100644 index dcaf9b49..00000000 --- a/vendor/github.com/smallstep/certificates/authority/admins.go +++ /dev/null @@ -1,97 +0,0 @@ -package authority - -import ( - "context" - - "github.com/smallstep/certificates/authority/admin" - "github.com/smallstep/certificates/authority/provisioner" - "go.step.sm/linkedca" -) - -// LoadAdminByID returns an *linkedca.Admin with the given ID. -func (a *Authority) LoadAdminByID(id string) (*linkedca.Admin, bool) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - return a.admins.LoadByID(id) -} - -// LoadAdminBySubProv returns an *linkedca.Admin with the given ID. -func (a *Authority) LoadAdminBySubProv(subject, provisioner string) (*linkedca.Admin, bool) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - return a.admins.LoadBySubProv(subject, provisioner) -} - -// GetAdmins returns a map listing each provisioner and the JWK Key Set -// with their public keys. -func (a *Authority) GetAdmins(cursor string, limit int) ([]*linkedca.Admin, string, error) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - admins, nextCursor := a.admins.Find(cursor, limit) - return admins, nextCursor, nil -} - -// StoreAdmin stores an *linkedca.Admin to the authority. -func (a *Authority) StoreAdmin(ctx context.Context, adm *linkedca.Admin, prov provisioner.Interface) error { - a.adminMutex.Lock() - defer a.adminMutex.Unlock() - - if adm.ProvisionerId != prov.GetID() { - return admin.NewErrorISE("admin.provisionerId does not match provisioner argument") - } - - if _, ok := a.admins.LoadBySubProv(adm.Subject, prov.GetName()); ok { - return admin.NewError(admin.ErrorBadRequestType, - "admin with subject %s and provisioner %s already exists", adm.Subject, prov.GetName()) - } - // Store to database -- this will set the ID. - if err := a.adminDB.CreateAdmin(ctx, adm); err != nil { - return admin.WrapErrorISE(err, "error creating admin") - } - if err := a.admins.Store(adm, prov); err != nil { - if err := a.reloadAdminResources(ctx); err != nil { - return admin.WrapErrorISE(err, "error reloading admin resources on failed admin store") - } - return admin.WrapErrorISE(err, "error storing admin in authority cache") - } - return nil -} - -// UpdateAdmin stores an *linkedca.Admin to the authority. -func (a *Authority) UpdateAdmin(ctx context.Context, id string, nu *linkedca.Admin) (*linkedca.Admin, error) { - a.adminMutex.Lock() - defer a.adminMutex.Unlock() - adm, err := a.admins.Update(id, nu) - if err != nil { - return nil, admin.WrapErrorISE(err, "error updating cached admin %s", id) - } - if err := a.adminDB.UpdateAdmin(ctx, adm); err != nil { - if err := a.reloadAdminResources(ctx); err != nil { - return nil, admin.WrapErrorISE(err, "error reloading admin resources on failed admin update") - } - return nil, admin.WrapErrorISE(err, "error updating admin %s", id) - } - return adm, nil -} - -// RemoveAdmin removes an *linkedca.Admin from the authority. -func (a *Authority) RemoveAdmin(ctx context.Context, id string) error { - a.adminMutex.Lock() - defer a.adminMutex.Unlock() - - return a.removeAdmin(ctx, id) -} - -// removeAdmin helper that assumes lock. -func (a *Authority) removeAdmin(ctx context.Context, id string) error { - if err := a.admins.Remove(id); err != nil { - return admin.WrapErrorISE(err, "error removing admin %s from authority cache", id) - } - if err := a.adminDB.DeleteAdmin(ctx, id); err != nil { - if err := a.reloadAdminResources(ctx); err != nil { - return admin.WrapErrorISE(err, "error reloading admin resources on failed admin remove") - } - return admin.WrapErrorISE(err, "error deleting admin %s", id) - } - return nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/authority.go b/vendor/github.com/smallstep/certificates/authority/authority.go deleted file mode 100644 index 0f171fa7..00000000 --- a/vendor/github.com/smallstep/certificates/authority/authority.go +++ /dev/null @@ -1,558 +0,0 @@ -package authority - -import ( - "context" - "crypto" - "crypto/sha256" - "crypto/x509" - "encoding/hex" - "log" - "sync" - "time" - - "github.com/smallstep/certificates/cas" - "github.com/smallstep/certificates/scep" - "go.step.sm/linkedca" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/admin" - adminDBNosql "github.com/smallstep/certificates/authority/admin/db/nosql" - "github.com/smallstep/certificates/authority/administrator" - "github.com/smallstep/certificates/authority/config" - "github.com/smallstep/certificates/authority/provisioner" - casapi "github.com/smallstep/certificates/cas/apiv1" - "github.com/smallstep/certificates/db" - "github.com/smallstep/certificates/kms" - kmsapi "github.com/smallstep/certificates/kms/apiv1" - "github.com/smallstep/certificates/kms/sshagentkms" - "github.com/smallstep/certificates/templates" - "github.com/smallstep/nosql" - "go.step.sm/crypto/pemutil" - "golang.org/x/crypto/ssh" -) - -// Authority implements the Certificate Authority internal interface. -type Authority struct { - config *config.Config - keyManager kms.KeyManager - provisioners *provisioner.Collection - admins *administrator.Collection - db db.AuthDB - adminDB admin.DB - templates *templates.Templates - - // X509 CA - x509CAService cas.CertificateAuthorityService - rootX509Certs []*x509.Certificate - rootX509CertPool *x509.CertPool - federatedX509Certs []*x509.Certificate - certificates *sync.Map - - // SCEP CA - scepService *scep.Service - - // SSH CA - sshCAUserCertSignKey ssh.Signer - sshCAHostCertSignKey ssh.Signer - sshCAUserCerts []ssh.PublicKey - sshCAHostCerts []ssh.PublicKey - sshCAUserFederatedCerts []ssh.PublicKey - sshCAHostFederatedCerts []ssh.PublicKey - - // Do not re-initialize - initOnce bool - startTime time.Time - - // Custom functions - sshBastionFunc func(ctx context.Context, user, hostname string) (*config.Bastion, error) - sshCheckHostFunc func(ctx context.Context, principal string, tok string, roots []*x509.Certificate) (bool, error) - sshGetHostsFunc func(ctx context.Context, cert *x509.Certificate) ([]config.Host, error) - getIdentityFunc provisioner.GetIdentityFunc - - adminMutex sync.RWMutex -} - -// New creates and initiates a new Authority type. -func New(config *config.Config, opts ...Option) (*Authority, error) { - err := config.Validate() - if err != nil { - return nil, err - } - - var a = &Authority{ - config: config, - certificates: new(sync.Map), - } - - // Apply options. - for _, fn := range opts { - if err := fn(a); err != nil { - return nil, err - } - } - - // Initialize authority from options or configuration. - if err := a.init(); err != nil { - return nil, err - } - - return a, nil -} - -// NewEmbedded initializes an authority that can be embedded in a different -// project without the limitations of the config. -func NewEmbedded(opts ...Option) (*Authority, error) { - a := &Authority{ - config: &config.Config{}, - certificates: new(sync.Map), - } - - // Apply options. - for _, fn := range opts { - if err := fn(a); err != nil { - return nil, err - } - } - - // Validate required options - switch { - case a.config == nil: - return nil, errors.New("cannot create an authority without a configuration") - case len(a.rootX509Certs) == 0 && a.config.Root.HasEmpties(): - return nil, errors.New("cannot create an authority without a root certificate") - case a.x509CAService == nil && a.config.IntermediateCert == "": - return nil, errors.New("cannot create an authority without an issuer certificate") - case a.x509CAService == nil && a.config.IntermediateKey == "": - return nil, errors.New("cannot create an authority without an issuer signer") - } - - // Initialize config required fields. - a.config.Init() - - // Initialize authority from options or configuration. - if err := a.init(); err != nil { - return nil, err - } - - return a, nil -} - -// reloadAdminResources reloads admins and provisioners from the DB. -func (a *Authority) reloadAdminResources(ctx context.Context) error { - var ( - provList provisioner.List - adminList []*linkedca.Admin - ) - if a.config.AuthorityConfig.EnableAdmin { - provs, err := a.adminDB.GetProvisioners(ctx) - if err != nil { - return admin.WrapErrorISE(err, "error getting provisioners to initialize authority") - } - provList, err = provisionerListToCertificates(provs) - if err != nil { - return admin.WrapErrorISE(err, "error converting provisioner list to certificates") - } - adminList, err = a.adminDB.GetAdmins(ctx) - if err != nil { - return admin.WrapErrorISE(err, "error getting admins to initialize authority") - } - } else { - provList = a.config.AuthorityConfig.Provisioners - adminList = a.config.AuthorityConfig.Admins - } - - provisionerConfig, err := a.generateProvisionerConfig(ctx) - if err != nil { - return admin.WrapErrorISE(err, "error generating provisioner config") - } - - // Create provisioner collection. - provClxn := provisioner.NewCollection(provisionerConfig.Audiences) - for _, p := range provList { - if err := p.Init(*provisionerConfig); err != nil { - return err - } - if err := provClxn.Store(p); err != nil { - return err - } - } - // Create admin collection. - adminClxn := administrator.NewCollection(provClxn) - for _, adm := range adminList { - p, ok := provClxn.Load(adm.ProvisionerId) - if !ok { - return admin.NewErrorISE("provisioner %s not found when loading admin %s", - adm.ProvisionerId, adm.Id) - } - if err := adminClxn.Store(adm, p); err != nil { - return err - } - } - - a.config.AuthorityConfig.Provisioners = provList - a.provisioners = provClxn - a.config.AuthorityConfig.Admins = adminList - a.admins = adminClxn - return nil -} - -// init performs validation and initializes the fields of an Authority struct. -func (a *Authority) init() error { - // Check if handler has already been validated/initialized. - if a.initOnce { - return nil - } - - var err error - - // Initialize step-ca Database if it's not already initialized with WithDB. - // If a.config.DB is nil then a simple, barebones in memory DB will be used. - if a.db == nil { - if a.db, err = db.New(a.config.DB); err != nil { - return err - } - } - - // Initialize key manager if it has not been set in the options. - if a.keyManager == nil { - var options kmsapi.Options - if a.config.KMS != nil { - options = *a.config.KMS - } - a.keyManager, err = kms.New(context.Background(), options) - if err != nil { - return err - } - } - - // Initialize the X.509 CA Service if it has not been set in the options. - if a.x509CAService == nil { - var options casapi.Options - if a.config.AuthorityConfig.Options != nil { - options = *a.config.AuthorityConfig.Options - } - - // Read intermediate and create X509 signer for default CAS. - if options.Is(casapi.SoftCAS) { - options.CertificateChain, err = pemutil.ReadCertificateBundle(a.config.IntermediateCert) - if err != nil { - return err - } - options.Signer, err = a.keyManager.CreateSigner(&kmsapi.CreateSignerRequest{ - SigningKey: a.config.IntermediateKey, - Password: []byte(a.config.Password), - }) - if err != nil { - return err - } - } - - a.x509CAService, err = cas.New(context.Background(), options) - if err != nil { - return err - } - - // Get root certificate from CAS. - if srv, ok := a.x509CAService.(casapi.CertificateAuthorityGetter); ok { - resp, err := srv.GetCertificateAuthority(&casapi.GetCertificateAuthorityRequest{ - Name: options.CertificateAuthority, - }) - if err != nil { - return err - } - a.rootX509Certs = append(a.rootX509Certs, resp.RootCertificate) - sum := sha256.Sum256(resp.RootCertificate.Raw) - log.Printf("Using root fingerprint '%s'", hex.EncodeToString(sum[:])) - } - } - - // Read root certificates and store them in the certificates map. - if len(a.rootX509Certs) == 0 { - a.rootX509Certs = make([]*x509.Certificate, len(a.config.Root)) - for i, path := range a.config.Root { - crt, err := pemutil.ReadCertificate(path) - if err != nil { - return err - } - a.rootX509Certs[i] = crt - } - } - for _, crt := range a.rootX509Certs { - sum := sha256.Sum256(crt.Raw) - a.certificates.Store(hex.EncodeToString(sum[:]), crt) - } - - a.rootX509CertPool = x509.NewCertPool() - for _, cert := range a.rootX509Certs { - a.rootX509CertPool.AddCert(cert) - } - - // Read federated certificates and store them in the certificates map. - if len(a.federatedX509Certs) == 0 { - a.federatedX509Certs = make([]*x509.Certificate, len(a.config.FederatedRoots)) - for i, path := range a.config.FederatedRoots { - crt, err := pemutil.ReadCertificate(path) - if err != nil { - return err - } - a.federatedX509Certs[i] = crt - } - } - for _, crt := range a.federatedX509Certs { - sum := sha256.Sum256(crt.Raw) - a.certificates.Store(hex.EncodeToString(sum[:]), crt) - } - - // Decrypt and load SSH keys - var tmplVars templates.Step - if a.config.SSH != nil { - if a.config.SSH.HostKey != "" { - signer, err := a.keyManager.CreateSigner(&kmsapi.CreateSignerRequest{ - SigningKey: a.config.SSH.HostKey, - Password: []byte(a.config.Password), - }) - if err != nil { - return err - } - // If our signer is from sshagentkms, just unwrap it instead of - // wrapping it in another layer, and this prevents crypto from - // erroring out with: ssh: unsupported key type *agent.Key - switch s := signer.(type) { - case *sshagentkms.WrappedSSHSigner: - a.sshCAHostCertSignKey = s.Sshsigner - case crypto.Signer: - a.sshCAHostCertSignKey, err = ssh.NewSignerFromSigner(s) - default: - return errors.Errorf("unsupported signer type %T", signer) - } - if err != nil { - return errors.Wrap(err, "error creating ssh signer") - } - // Append public key to list of host certs - a.sshCAHostCerts = append(a.sshCAHostCerts, a.sshCAHostCertSignKey.PublicKey()) - a.sshCAHostFederatedCerts = append(a.sshCAHostFederatedCerts, a.sshCAHostCertSignKey.PublicKey()) - } - if a.config.SSH.UserKey != "" { - signer, err := a.keyManager.CreateSigner(&kmsapi.CreateSignerRequest{ - SigningKey: a.config.SSH.UserKey, - Password: []byte(a.config.Password), - }) - if err != nil { - return err - } - // If our signer is from sshagentkms, just unwrap it instead of - // wrapping it in another layer, and this prevents crypto from - // erroring out with: ssh: unsupported key type *agent.Key - switch s := signer.(type) { - case *sshagentkms.WrappedSSHSigner: - a.sshCAUserCertSignKey = s.Sshsigner - case crypto.Signer: - a.sshCAUserCertSignKey, err = ssh.NewSignerFromSigner(s) - default: - return errors.Errorf("unsupported signer type %T", signer) - } - if err != nil { - return errors.Wrap(err, "error creating ssh signer") - } - // Append public key to list of user certs - a.sshCAUserCerts = append(a.sshCAUserCerts, a.sshCAUserCertSignKey.PublicKey()) - a.sshCAUserFederatedCerts = append(a.sshCAUserFederatedCerts, a.sshCAUserCertSignKey.PublicKey()) - } - - // Append other public keys - for _, key := range a.config.SSH.Keys { - switch key.Type { - case provisioner.SSHHostCert: - if key.Federated { - a.sshCAHostFederatedCerts = append(a.sshCAHostFederatedCerts, key.PublicKey()) - } else { - a.sshCAHostCerts = append(a.sshCAHostCerts, key.PublicKey()) - } - case provisioner.SSHUserCert: - if key.Federated { - a.sshCAUserFederatedCerts = append(a.sshCAUserFederatedCerts, key.PublicKey()) - } else { - a.sshCAUserCerts = append(a.sshCAUserCerts, key.PublicKey()) - } - default: - return errors.Errorf("unsupported type %s", key.Type) - } - } - - // Configure template variables. - tmplVars.SSH.HostKey = a.sshCAHostCertSignKey.PublicKey() - tmplVars.SSH.UserKey = a.sshCAUserCertSignKey.PublicKey() - // On the templates we skip the first one because there's a distinction - // between the main key and federated keys. - tmplVars.SSH.HostFederatedKeys = append(tmplVars.SSH.HostFederatedKeys, a.sshCAHostFederatedCerts[1:]...) - tmplVars.SSH.UserFederatedKeys = append(tmplVars.SSH.UserFederatedKeys, a.sshCAUserFederatedCerts[1:]...) - } - - // Check if a KMS with decryption capability is required and available - if a.requiresDecrypter() { - if _, ok := a.keyManager.(kmsapi.Decrypter); !ok { - return errors.New("keymanager doesn't provide crypto.Decrypter") - } - } - - // Check if a KMS with decryption capability is required and available - if a.requiresDecrypter() { - if _, ok := a.keyManager.(kmsapi.Decrypter); !ok { - return errors.New("keymanager doesn't provide crypto.Decrypter") - } - } - - // TODO: decide if this is a good approach for providing the SCEP functionality - // It currently mirrors the logic for the x509CAService - if a.requiresSCEPService() && a.scepService == nil { - var options scep.Options - - // Read intermediate and create X509 signer and decrypter for default CAS. - options.CertificateChain, err = pemutil.ReadCertificateBundle(a.config.IntermediateCert) - if err != nil { - return err - } - options.Signer, err = a.keyManager.CreateSigner(&kmsapi.CreateSignerRequest{ - SigningKey: a.config.IntermediateKey, - Password: []byte(a.config.Password), - }) - if err != nil { - return err - } - - if km, ok := a.keyManager.(kmsapi.Decrypter); ok { - options.Decrypter, err = km.CreateDecrypter(&kmsapi.CreateDecrypterRequest{ - DecryptionKey: a.config.IntermediateKey, - Password: []byte(a.config.Password), - }) - if err != nil { - return err - } - } - - a.scepService, err = scep.NewService(context.Background(), options) - if err != nil { - return err - } - - // TODO: mimick the x509CAService GetCertificateAuthority here too? - } - - if a.config.AuthorityConfig.EnableAdmin { - // Initialize step-ca Admin Database if it's not already initialized using - // WithAdminDB. - if a.adminDB == nil { - // Check if AuthConfig already exists - a.adminDB, err = adminDBNosql.New(a.db.(nosql.DB), admin.DefaultAuthorityID) - if err != nil { - return err - } - } - - provs, err := a.adminDB.GetProvisioners(context.Background()) - if err != nil { - return admin.WrapErrorISE(err, "error loading provisioners to initialize authority") - } - if len(provs) == 0 { - // Create First Provisioner - prov, err := CreateFirstProvisioner(context.Background(), a.adminDB, a.config.Password) - if err != nil { - return admin.WrapErrorISE(err, "error creating first provisioner") - } - - // Create first admin - if err := a.adminDB.CreateAdmin(context.Background(), &linkedca.Admin{ - ProvisionerId: prov.Id, - Subject: "step", - Type: linkedca.Admin_SUPER_ADMIN, - }); err != nil { - return admin.WrapErrorISE(err, "error creating first admin") - } - } - } - - // Load Provisioners and Admins - if err := a.reloadAdminResources(context.Background()); err != nil { - return err - } - - // Configure templates, currently only ssh templates are supported. - if a.sshCAHostCertSignKey != nil || a.sshCAUserCertSignKey != nil { - a.templates = a.config.Templates - if a.templates == nil { - a.templates = templates.DefaultTemplates() - } - if a.templates.Data == nil { - a.templates.Data = make(map[string]interface{}) - } - a.templates.Data["Step"] = tmplVars - } - - // JWT numeric dates are seconds. - a.startTime = time.Now().Truncate(time.Second) - // Set flag indicating that initialization has been completed, and should - // not be repeated. - a.initOnce = true - - return nil -} - -// GetDatabase returns the authority database. If the configuration does not -// define a database, GetDatabase will return a db.SimpleDB instance. -func (a *Authority) GetDatabase() db.AuthDB { - return a.db -} - -// GetAdminDatabase returns the admin database, if one exists. -func (a *Authority) GetAdminDatabase() admin.DB { - return a.adminDB -} - -// IsAdminAPIEnabled returns a boolean indicating whether the Admin API has -// been enabled. -func (a *Authority) IsAdminAPIEnabled() bool { - return a.config.AuthorityConfig.EnableAdmin -} - -// Shutdown safely shuts down any clients, databases, etc. held by the Authority. -func (a *Authority) Shutdown() error { - if err := a.keyManager.Close(); err != nil { - log.Printf("error closing the key manager: %v", err) - } - return a.db.Shutdown() -} - -// CloseForReload closes internal services, to allow a safe reload. -func (a *Authority) CloseForReload() { - if err := a.keyManager.Close(); err != nil { - log.Printf("error closing the key manager: %v", err) - } -} - -// requiresDecrypter returns whether the Authority -// requires a KMS that provides a crypto.Decrypter -// Currently this is only required when SCEP is -// enabled. -func (a *Authority) requiresDecrypter() bool { - return a.requiresSCEPService() -} - -// requiresSCEPService iterates over the configured provisioners -// and determines if one of them is a SCEP provisioner. -func (a *Authority) requiresSCEPService() bool { - for _, p := range a.config.AuthorityConfig.Provisioners { - if p.GetType() == provisioner.TypeSCEP { - return true - } - } - return false -} - -// GetSCEPService returns the configured SCEP Service -// TODO: this function is intended to exist temporarily -// in order to make SCEP work more easily. It can be -// made more correct by using the right interfaces/abstractions -// after it works as expected. -func (a *Authority) GetSCEPService() *scep.Service { - return a.scepService -} diff --git a/vendor/github.com/smallstep/certificates/authority/authorize.go b/vendor/github.com/smallstep/certificates/authority/authorize.go deleted file mode 100644 index 69ad2a90..00000000 --- a/vendor/github.com/smallstep/certificates/authority/authorize.go +++ /dev/null @@ -1,351 +0,0 @@ -package authority - -import ( - "context" - "crypto/sha256" - "crypto/x509" - "encoding/hex" - "net/http" - "strings" - "time" - - "github.com/smallstep/certificates/authority/admin" - "github.com/smallstep/certificates/authority/provisioner" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/linkedca" - "golang.org/x/crypto/ssh" -) - -// Claims extends jose.Claims with step attributes. -type Claims struct { - jose.Claims - SANs []string `json:"sans,omitempty"` - Email string `json:"email,omitempty"` - Nonce string `json:"nonce,omitempty"` -} - -type skipTokenReuseKey struct{} - -// NewContextWithSkipTokenReuse creates a new context from ctx and attaches a -// value to skip the token reuse. -func NewContextWithSkipTokenReuse(ctx context.Context) context.Context { - return context.WithValue(ctx, skipTokenReuseKey{}, true) -} - -// SkipTokenReuseFromContext returns if the token reuse needs to be ignored. -func SkipTokenReuseFromContext(ctx context.Context) bool { - m, _ := ctx.Value(skipTokenReuseKey{}).(bool) - return m -} - -// authorizeToken parses the token and returns the provisioner used to generate -// the token. This method enforces the One-Time use policy (tokens can only be -// used once). -func (a *Authority) authorizeToken(ctx context.Context, token string) (provisioner.Interface, error) { - // Validate payload - tok, err := jose.ParseSigned(token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.authorizeToken: error parsing token") - } - - // Get claims w/out verification. We need to look up the provisioner - // key in order to verify the claims and we need the issuer from the claims - // before we can look up the provisioner. - var claims Claims - if err = tok.UnsafeClaimsWithoutVerification(&claims); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.authorizeToken") - } - - // TODO: use new persistence layer abstraction. - // Do not accept tokens issued before the start of the ca. - // This check is meant as a stopgap solution to the current lack of a persistence layer. - if a.config.AuthorityConfig != nil && !a.config.AuthorityConfig.DisableIssuedAtCheck { - if claims.IssuedAt != nil && claims.IssuedAt.Time().Before(a.startTime) { - return nil, errs.Unauthorized("authority.authorizeToken: token issued before the bootstrap of certificate authority") - } - } - - // This method will also validate the audiences for JWK provisioners. - p, ok := a.provisioners.LoadByToken(tok, &claims.Claims) - if !ok { - return nil, errs.Unauthorized("authority.authorizeToken: provisioner "+ - "not found or invalid audience (%s)", strings.Join(claims.Audience, ", ")) - } - - // Store the token to protect against reuse unless it's skipped. - // If we cannot get a token id from the provisioner, just hash the token. - if !SkipTokenReuseFromContext(ctx) { - if err = a.UseToken(token, p); err != nil { - return nil, err - } - } - - return p, nil -} - -// AuthorizeAdminToken authorize an Admin token. -func (a *Authority) AuthorizeAdminToken(r *http.Request, token string) (*linkedca.Admin, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, admin.WrapError(admin.ErrorUnauthorizedType, err, "adminHandler.authorizeToken; error parsing x5c token") - } - - verifiedChains, err := jwt.Headers[0].Certificates(x509.VerifyOptions{ - Roots: a.rootX509CertPool, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }) - if err != nil { - return nil, admin.WrapError(admin.ErrorUnauthorizedType, err, - "adminHandler.authorizeToken; error verifying x5c certificate chain in token") - } - leaf := verifiedChains[0][0] - - if leaf.KeyUsage&x509.KeyUsageDigitalSignature == 0 { - return nil, admin.NewError(admin.ErrorUnauthorizedType, "adminHandler.authorizeToken; certificate used to sign x5c token cannot be used for digital signature") - } - - // Using the leaf certificates key to validate the claims accomplishes two - // things: - // 1. Asserts that the private key used to sign the token corresponds - // to the public certificate in the `x5c` header of the token. - // 2. Asserts that the claims are valid - have not been tampered with. - var claims jose.Claims - if err = jwt.Claims(leaf.PublicKey, &claims); err != nil { - return nil, admin.WrapError(admin.ErrorUnauthorizedType, err, "adminHandler.authorizeToken; error parsing x5c claims") - } - - prov, err := a.LoadProvisionerByCertificate(leaf) - if err != nil { - return nil, err - } - - // Check that the token has not been used. - if err = a.UseToken(token, prov); err != nil { - return nil, admin.WrapError(admin.ErrorUnauthorizedType, err, "adminHandler.authorizeToken; error with reuse token") - } - - // According to "rfc7519 JSON Web Token" acceptable skew should be no - // more than a few minutes. - if err = claims.ValidateWithLeeway(jose.Expected{ - Issuer: prov.GetName(), - Time: time.Now().UTC(), - }, time.Minute); err != nil { - return nil, admin.WrapError(admin.ErrorUnauthorizedType, err, "x5c.authorizeToken; invalid x5c claims") - } - - // validate audience: path matches the current path - if r.URL.Path != claims.Audience[0] { - return nil, admin.NewError(admin.ErrorUnauthorizedType, - "x5c.authorizeToken; x5c token has invalid audience "+ - "claim (aud); expected %s, but got %s", r.URL.Path, claims.Audience) - } - - if claims.Subject == "" { - return nil, admin.NewError(admin.ErrorUnauthorizedType, - "x5c.authorizeToken; x5c token subject cannot be empty") - } - - var ( - ok bool - adm *linkedca.Admin - ) - adminFound := false - adminSANs := append([]string{leaf.Subject.CommonName}, leaf.DNSNames...) - adminSANs = append(adminSANs, leaf.EmailAddresses...) - for _, san := range adminSANs { - if adm, ok = a.LoadAdminBySubProv(san, claims.Issuer); ok { - adminFound = true - break - } - } - if !adminFound { - return nil, admin.NewError(admin.ErrorUnauthorizedType, - "adminHandler.authorizeToken; unable to load admin with subject(s) %s and provisioner '%s'", - adminSANs, claims.Issuer) - } - - if strings.HasPrefix(r.URL.Path, "/admin/admins") && (r.Method != "GET") && adm.Type != linkedca.Admin_SUPER_ADMIN { - return nil, admin.NewError(admin.ErrorUnauthorizedType, "must have super admin access to make this request") - } - - return adm, nil -} - -// UseToken stores the token to protect against reuse. -// -// This method currently ignores any error coming from the GetTokenID, but it -// should specifically ignore the error provisioner.ErrAllowTokenReuse. -func (a *Authority) UseToken(token string, prov provisioner.Interface) error { - if reuseKey, err := prov.GetTokenID(token); err == nil { - if reuseKey == "" { - sum := sha256.Sum256([]byte(token)) - reuseKey = strings.ToLower(hex.EncodeToString(sum[:])) - } - ok, err := a.db.UseToken(reuseKey, token) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, - "authority.authorizeToken: failed when attempting to store token") - } - if !ok { - return errs.Unauthorized("authority.authorizeToken: token already used") - } - } - return nil -} - -// Authorize grabs the method from the context and authorizes the request by -// validating the one-time-token. -func (a *Authority) Authorize(ctx context.Context, token string) ([]provisioner.SignOption, error) { - var opts = []interface{}{errs.WithKeyVal("token", token)} - - switch m := provisioner.MethodFromContext(ctx); m { - case provisioner.SignMethod: - signOpts, err := a.authorizeSign(ctx, token) - return signOpts, errs.Wrap(http.StatusInternalServerError, err, "authority.Authorize", opts...) - case provisioner.RevokeMethod: - return nil, errs.Wrap(http.StatusInternalServerError, a.authorizeRevoke(ctx, token), "authority.Authorize", opts...) - case provisioner.SSHSignMethod: - if a.sshCAHostCertSignKey == nil && a.sshCAUserCertSignKey == nil { - return nil, errs.NotImplemented("authority.Authorize; ssh certificate flows are not enabled", opts...) - } - signOpts, err := a.authorizeSSHSign(ctx, token) - return signOpts, errs.Wrap(http.StatusInternalServerError, err, "authority.Authorize", opts...) - case provisioner.SSHRenewMethod: - if a.sshCAHostCertSignKey == nil && a.sshCAUserCertSignKey == nil { - return nil, errs.NotImplemented("authority.Authorize; ssh certificate flows are not enabled", opts...) - } - _, err := a.authorizeSSHRenew(ctx, token) - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.Authorize", opts...) - case provisioner.SSHRevokeMethod: - return nil, errs.Wrap(http.StatusInternalServerError, a.authorizeSSHRevoke(ctx, token), "authority.Authorize", opts...) - case provisioner.SSHRekeyMethod: - if a.sshCAHostCertSignKey == nil && a.sshCAUserCertSignKey == nil { - return nil, errs.NotImplemented("authority.Authorize; ssh certificate flows are not enabled", opts...) - } - _, signOpts, err := a.authorizeSSHRekey(ctx, token) - return signOpts, errs.Wrap(http.StatusInternalServerError, err, "authority.Authorize", opts...) - default: - return nil, errs.InternalServer("authority.Authorize; method %d is not supported", append([]interface{}{m}, opts...)...) - } -} - -// authorizeSign loads the provisioner from the token and calls the provisioner -// AuthorizeSign method. Returns a list of methods to apply to the signing flow. -func (a *Authority) authorizeSign(ctx context.Context, token string) ([]provisioner.SignOption, error) { - p, err := a.authorizeToken(ctx, token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSign") - } - signOpts, err := p.AuthorizeSign(ctx, token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSign") - } - return signOpts, nil -} - -// AuthorizeSign authorizes a signature request by validating and authenticating -// a token that must be sent w/ the request. -// -// NOTE: This method is deprecated and should not be used. We make it available -// in the short term os as not to break existing clients. -func (a *Authority) AuthorizeSign(token string) ([]provisioner.SignOption, error) { - ctx := provisioner.NewContextWithMethod(context.Background(), provisioner.SignMethod) - return a.Authorize(ctx, token) -} - -// authorizeRevoke locates the provisioner used to generate the authenticating -// token and then performs the token validation flow. -func (a *Authority) authorizeRevoke(ctx context.Context, token string) error { - p, err := a.authorizeToken(ctx, token) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeRevoke") - } - if err = p.AuthorizeRevoke(ctx, token); err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeRevoke") - } - return nil -} - -// authorizeRenew locates the provisioner (using the provisioner extension in the cert), and checks -// if for the configured provisioner, the renewal is enabled or not. If the -// extra extension cannot be found, authorize the renewal by default. -// -// TODO(mariano): should we authorize by default? -func (a *Authority) authorizeRenew(cert *x509.Certificate) error { - var opts = []interface{}{errs.WithKeyVal("serialNumber", cert.SerialNumber.String())} - - // Check the passive revocation table. - isRevoked, err := a.db.IsRevoked(cert.SerialNumber.String()) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeRenew", opts...) - } - if isRevoked { - return errs.Unauthorized("authority.authorizeRenew: certificate has been revoked", opts...) - } - - p, ok := a.provisioners.LoadByCertificate(cert) - if !ok { - return errs.Unauthorized("authority.authorizeRenew: provisioner not found", opts...) - } - if err := p.AuthorizeRenew(context.Background(), cert); err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeRenew", opts...) - } - return nil -} - -// authorizeSSHSign loads the provisioner from the token, checks that it has not -// been used again and calls the provisioner AuthorizeSSHSign method. Returns a -// list of methods to apply to the signing flow. -func (a *Authority) authorizeSSHSign(ctx context.Context, token string) ([]provisioner.SignOption, error) { - p, err := a.authorizeToken(ctx, token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.authorizeSSHSign") - } - signOpts, err := p.AuthorizeSSHSign(ctx, token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.authorizeSSHSign") - } - return signOpts, nil -} - -// authorizeSSHRenew authorizes an SSH certificate renewal request, by -// validating the contents of an SSHPOP token. -func (a *Authority) authorizeSSHRenew(ctx context.Context, token string) (*ssh.Certificate, error) { - p, err := a.authorizeToken(ctx, token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSSHRenew") - } - cert, err := p.AuthorizeSSHRenew(ctx, token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSSHRenew") - } - return cert, nil -} - -// authorizeSSHRekey authorizes an SSH certificate rekey request, by -// validating the contents of an SSHPOP token. -func (a *Authority) authorizeSSHRekey(ctx context.Context, token string) (*ssh.Certificate, []provisioner.SignOption, error) { - p, err := a.authorizeToken(ctx, token) - if err != nil { - return nil, nil, errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSSHRekey") - } - cert, signOpts, err := p.AuthorizeSSHRekey(ctx, token) - if err != nil { - return nil, nil, errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSSHRekey") - } - return cert, signOpts, nil -} - -// authorizeSSHRevoke authorizes an SSH certificate revoke request, by -// validating the contents of an SSHPOP token. -func (a *Authority) authorizeSSHRevoke(ctx context.Context, token string) error { - p, err := a.authorizeToken(ctx, token) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSSHRevoke") - } - if err = p.AuthorizeSSHRevoke(ctx, token); err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "authority.authorizeSSHRevoke") - } - return nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/config.go b/vendor/github.com/smallstep/certificates/authority/config.go deleted file mode 100644 index 744ca5e7..00000000 --- a/vendor/github.com/smallstep/certificates/authority/config.go +++ /dev/null @@ -1,46 +0,0 @@ -package authority - -import "github.com/smallstep/certificates/authority/config" - -// Config is an alias to support older APIs. -type Config = config.Config - -// LoadConfiguration is an alias to support older APIs. -var LoadConfiguration = config.LoadConfiguration - -// AuthConfig is an alias to support older APIs. -type AuthConfig = config.AuthConfig - -// TLS - -// ASN1DN is an alias to support older APIs. -type ASN1DN = config.ASN1DN - -// DefaultTLSOptions is an alias to support older APIs. -var DefaultTLSOptions = config.DefaultTLSOptions - -// TLSOptions is an alias to support older APIs. -type TLSOptions = config.TLSOptions - -// CipherSuites is an alias to support older APIs. -type CipherSuites = config.CipherSuites - -// SSH - -// SSHConfig is an alias to support older APIs. -type SSHConfig = config.SSHConfig - -// Bastion is an alias to support older APIs. -type Bastion = config.Bastion - -// HostTag is an alias to support older APIs. -type HostTag = config.HostTag - -// Host is an alias to support older APIs. -type Host = config.Host - -// SSHPublicKey is an alias to support older APIs. -type SSHPublicKey = config.SSHPublicKey - -// SSHKeys is an alias to support older APIs. -type SSHKeys = config.SSHKeys diff --git a/vendor/github.com/smallstep/certificates/authority/config/config.go b/vendor/github.com/smallstep/certificates/authority/config/config.go deleted file mode 100644 index 9ad1ff5f..00000000 --- a/vendor/github.com/smallstep/certificates/authority/config/config.go +++ /dev/null @@ -1,294 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "net" - "os" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/provisioner" - cas "github.com/smallstep/certificates/cas/apiv1" - "github.com/smallstep/certificates/db" - kms "github.com/smallstep/certificates/kms/apiv1" - "github.com/smallstep/certificates/templates" - "go.step.sm/linkedca" -) - -const ( - legacyAuthority = "step-certificate-authority" -) - -var ( - // DefaultBackdate length of time to backdate certificates to avoid - // clock skew validation issues. - DefaultBackdate = time.Minute - // DefaultDisableRenewal disables renewals per provisioner. - DefaultDisableRenewal = false - // DefaultEnableSSHCA enable SSH CA features per provisioner or globally - // for all provisioners. - DefaultEnableSSHCA = false - // GlobalProvisionerClaims default claims for the Authority. Can be overridden - // by provisioner specific claims. - GlobalProvisionerClaims = provisioner.Claims{ - MinTLSDur: &provisioner.Duration{Duration: 5 * time.Minute}, // TLS certs - MaxTLSDur: &provisioner.Duration{Duration: 24 * time.Hour}, - DefaultTLSDur: &provisioner.Duration{Duration: 24 * time.Hour}, - DisableRenewal: &DefaultDisableRenewal, - MinUserSSHDur: &provisioner.Duration{Duration: 5 * time.Minute}, // User SSH certs - MaxUserSSHDur: &provisioner.Duration{Duration: 24 * time.Hour}, - DefaultUserSSHDur: &provisioner.Duration{Duration: 16 * time.Hour}, - MinHostSSHDur: &provisioner.Duration{Duration: 5 * time.Minute}, // Host SSH certs - MaxHostSSHDur: &provisioner.Duration{Duration: 30 * 24 * time.Hour}, - DefaultHostSSHDur: &provisioner.Duration{Duration: 30 * 24 * time.Hour}, - EnableSSHCA: &DefaultEnableSSHCA, - } -) - -// Config represents the CA configuration and it's mapped to a JSON object. -type Config struct { - Root multiString `json:"root"` - FederatedRoots []string `json:"federatedRoots"` - IntermediateCert string `json:"crt"` - IntermediateKey string `json:"key"` - Address string `json:"address"` - InsecureAddress string `json:"insecureAddress"` - DNSNames []string `json:"dnsNames"` - KMS *kms.Options `json:"kms,omitempty"` - SSH *SSHConfig `json:"ssh,omitempty"` - Logger json.RawMessage `json:"logger,omitempty"` - DB *db.Config `json:"db,omitempty"` - Monitoring json.RawMessage `json:"monitoring,omitempty"` - AuthorityConfig *AuthConfig `json:"authority,omitempty"` - TLS *TLSOptions `json:"tls,omitempty"` - Password string `json:"password,omitempty"` - Templates *templates.Templates `json:"templates,omitempty"` -} - -// ASN1DN contains ASN1.DN attributes that are used in Subject and Issuer -// x509 Certificate blocks. -type ASN1DN struct { - Country string `json:"country,omitempty"` - Organization string `json:"organization,omitempty"` - OrganizationalUnit string `json:"organizationalUnit,omitempty"` - Locality string `json:"locality,omitempty"` - Province string `json:"province,omitempty"` - StreetAddress string `json:"streetAddress,omitempty"` - CommonName string `json:"commonName,omitempty"` -} - -// AuthConfig represents the configuration options for the authority. An -// underlaying registration authority can also be configured using the -// cas.Options. -type AuthConfig struct { - *cas.Options - AuthorityID string `json:"authorityID,omitempty"` - Provisioners provisioner.List `json:"provisioners"` - Admins []*linkedca.Admin `json:"-"` - Template *ASN1DN `json:"template,omitempty"` - Claims *provisioner.Claims `json:"claims,omitempty"` - DisableIssuedAtCheck bool `json:"disableIssuedAtCheck,omitempty"` - Backdate *provisioner.Duration `json:"backdate,omitempty"` - EnableAdmin bool `json:"enableAdmin,omitempty"` -} - -// init initializes the required fields in the AuthConfig if they are not -// provided. -func (c *AuthConfig) init() { - if c.Provisioners == nil { - c.Provisioners = provisioner.List{} - } - if c.Template == nil { - c.Template = &ASN1DN{} - } - if c.Backdate == nil { - c.Backdate = &provisioner.Duration{ - Duration: DefaultBackdate, - } - } -} - -// Validate validates the authority configuration. -func (c *AuthConfig) Validate(audiences provisioner.Audiences) error { - if c == nil { - return errors.New("authority cannot be undefined") - } - - // Initialize required fields. - c.init() - - // Check that only one K8sSA is enabled - var k8sCount int - for _, p := range c.Provisioners { - if p.GetType() == provisioner.TypeK8sSA { - k8sCount++ - } - } - if k8sCount > 1 { - return errors.New("cannot have more than one kubernetes service account provisioner") - } - - if c.Backdate.Duration < 0 { - return errors.New("authority.backdate cannot be less than 0") - } - - return nil -} - -// LoadConfiguration parses the given filename in JSON format and returns the -// configuration struct. -func LoadConfiguration(filename string) (*Config, error) { - f, err := os.Open(filename) - if err != nil { - return nil, errors.Wrapf(err, "error opening %s", filename) - } - defer f.Close() - - var c Config - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, errors.Wrapf(err, "error parsing %s", filename) - } - - c.Init() - - return &c, nil -} - -// Init initializes the minimal configuration required to create an authority. This -// is mainly used on embedded authorities. -func (c *Config) Init() { - if c.DNSNames == nil { - c.DNSNames = []string{"localhost", "127.0.0.1", "::1"} - } - if c.TLS == nil { - c.TLS = &DefaultTLSOptions - } - if c.AuthorityConfig == nil { - c.AuthorityConfig = &AuthConfig{} - } - c.AuthorityConfig.init() -} - -// Save saves the configuration to the given filename. -func (c *Config) Save(filename string) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return errors.Wrapf(err, "error opening %s", filename) - } - defer f.Close() - - enc := json.NewEncoder(f) - enc.SetIndent("", "\t") - return errors.Wrapf(enc.Encode(c), "error writing %s", filename) -} - -// Validate validates the configuration. -func (c *Config) Validate() error { - switch { - case c.Address == "": - return errors.New("address cannot be empty") - - case len(c.DNSNames) == 0: - return errors.New("dnsNames cannot be empty") - } - - // Options holds the RA/CAS configuration. - ra := c.AuthorityConfig.Options - // The default RA/CAS requires root, crt and key. - if ra.Is(cas.SoftCAS) { - switch { - case c.Root.HasEmpties(): - return errors.New("root cannot be empty") - case c.IntermediateCert == "": - return errors.New("crt cannot be empty") - case c.IntermediateKey == "": - return errors.New("key cannot be empty") - } - } - - // Validate address (a port is required) - if _, _, err := net.SplitHostPort(c.Address); err != nil { - return errors.Errorf("invalid address %s", c.Address) - } - - if c.TLS == nil { - c.TLS = &DefaultTLSOptions - } else { - if len(c.TLS.CipherSuites) == 0 { - c.TLS.CipherSuites = DefaultTLSOptions.CipherSuites - } - if c.TLS.MaxVersion == 0 { - c.TLS.MaxVersion = DefaultTLSOptions.MaxVersion - } - if c.TLS.MinVersion == 0 { - c.TLS.MinVersion = c.TLS.MaxVersion - } - if c.TLS.MinVersion > c.TLS.MaxVersion { - return errors.New("tls minVersion cannot exceed tls maxVersion") - } - c.TLS.Renegotiation = c.TLS.Renegotiation || DefaultTLSOptions.Renegotiation - } - - // Validate KMS options, nil is ok. - if err := c.KMS.Validate(); err != nil { - return err - } - - // Validate RA/CAS options, nil is ok. - if err := ra.Validate(); err != nil { - return err - } - - // Validate ssh: nil is ok - if err := c.SSH.Validate(); err != nil { - return err - } - - // Validate templates: nil is ok - if err := c.Templates.Validate(); err != nil { - return err - } - - return c.AuthorityConfig.Validate(c.GetAudiences()) -} - -// GetAudiences returns the legacy and possible urls without the ports that will -// be used as the default provisioner audiences. The CA might have proxies in -// front so we cannot rely on the port. -func (c *Config) GetAudiences() provisioner.Audiences { - audiences := provisioner.Audiences{ - Sign: []string{legacyAuthority}, - Revoke: []string{legacyAuthority}, - SSHSign: []string{}, - SSHRevoke: []string{}, - SSHRenew: []string{}, - } - - for _, name := range c.DNSNames { - audiences.Sign = append(audiences.Sign, - fmt.Sprintf("https://%s/1.0/sign", name), - fmt.Sprintf("https://%s/sign", name), - fmt.Sprintf("https://%s/1.0/ssh/sign", name), - fmt.Sprintf("https://%s/ssh/sign", name)) - audiences.Revoke = append(audiences.Revoke, - fmt.Sprintf("https://%s/1.0/revoke", name), - fmt.Sprintf("https://%s/revoke", name)) - audiences.SSHSign = append(audiences.SSHSign, - fmt.Sprintf("https://%s/1.0/ssh/sign", name), - fmt.Sprintf("https://%s/ssh/sign", name), - fmt.Sprintf("https://%s/1.0/sign", name), - fmt.Sprintf("https://%s/sign", name)) - audiences.SSHRevoke = append(audiences.SSHRevoke, - fmt.Sprintf("https://%s/1.0/ssh/revoke", name), - fmt.Sprintf("https://%s/ssh/revoke", name)) - audiences.SSHRenew = append(audiences.SSHRenew, - fmt.Sprintf("https://%s/1.0/ssh/renew", name), - fmt.Sprintf("https://%s/ssh/renew", name)) - audiences.SSHRekey = append(audiences.SSHRekey, - fmt.Sprintf("https://%s/1.0/ssh/rekey", name), - fmt.Sprintf("https://%s/ssh/rekey", name)) - } - - return audiences -} diff --git a/vendor/github.com/smallstep/certificates/authority/config/ssh.go b/vendor/github.com/smallstep/certificates/authority/config/ssh.go deleted file mode 100644 index 4ba1bb38..00000000 --- a/vendor/github.com/smallstep/certificates/authority/config/ssh.go +++ /dev/null @@ -1,94 +0,0 @@ -package config - -import ( - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/provisioner" - "go.step.sm/crypto/jose" - "golang.org/x/crypto/ssh" -) - -// SSHConfig contains the user and host keys. -type SSHConfig struct { - HostKey string `json:"hostKey"` - UserKey string `json:"userKey"` - Keys []*SSHPublicKey `json:"keys,omitempty"` - AddUserPrincipal string `json:"addUserPrincipal,omitempty"` - AddUserCommand string `json:"addUserCommand,omitempty"` - Bastion *Bastion `json:"bastion,omitempty"` -} - -// Bastion contains the custom properties used on bastion. -type Bastion struct { - Hostname string `json:"hostname"` - User string `json:"user,omitempty"` - Port string `json:"port,omitempty"` - Command string `json:"cmd,omitempty"` - Flags string `json:"flags,omitempty"` -} - -// HostTag are tagged with k,v pairs. These tags are how a user is ultimately -// associated with a host. -type HostTag struct { - ID string - Name string - Value string -} - -// Host defines expected attributes for an ssh host. -type Host struct { - HostID string `json:"hid"` - HostTags []HostTag `json:"host_tags"` - Hostname string `json:"hostname"` -} - -// Validate checks the fields in SSHConfig. -func (c *SSHConfig) Validate() error { - if c == nil { - return nil - } - for _, k := range c.Keys { - if err := k.Validate(); err != nil { - return err - } - } - return nil -} - -// SSHPublicKey contains a public key used by federated CAs to keep old signing -// keys for this ca. -type SSHPublicKey struct { - Type string `json:"type"` - Federated bool `json:"federated"` - Key jose.JSONWebKey `json:"key"` - publicKey ssh.PublicKey -} - -// Validate checks the fields in SSHPublicKey. -func (k *SSHPublicKey) Validate() error { - switch { - case k.Type == "": - return errors.New("type cannot be empty") - case k.Type != provisioner.SSHHostCert && k.Type != provisioner.SSHUserCert: - return errors.Errorf("invalid type %s, it must be user or host", k.Type) - case !k.Key.IsPublic(): - return errors.New("invalid key type, it must be a public key") - } - - key, err := ssh.NewPublicKey(k.Key.Key) - if err != nil { - return errors.Wrap(err, "error creating ssh key") - } - k.publicKey = key - return nil -} - -// PublicKey returns the ssh public key. -func (k *SSHPublicKey) PublicKey() ssh.PublicKey { - return k.publicKey -} - -// SSHKeys represents the SSH User and Host public keys. -type SSHKeys struct { - UserKeys []ssh.PublicKey - HostKeys []ssh.PublicKey -} diff --git a/vendor/github.com/smallstep/certificates/authority/config/tls_options.go b/vendor/github.com/smallstep/certificates/authority/config/tls_options.go deleted file mode 100644 index 996b5834..00000000 --- a/vendor/github.com/smallstep/certificates/authority/config/tls_options.go +++ /dev/null @@ -1,169 +0,0 @@ -package config - -import ( - "crypto/tls" - "fmt" - - "github.com/pkg/errors" -) - -var ( - // DefaultTLSMinVersion default minimum version of TLS. - DefaultTLSMinVersion = TLSVersion(1.2) - // DefaultTLSMaxVersion default maximum version of TLS. - DefaultTLSMaxVersion = TLSVersion(1.3) - // DefaultTLSRenegotiation default TLS connection renegotiation policy. - DefaultTLSRenegotiation = false // Never regnegotiate. - // DefaultTLSCipherSuites specifies default step ciphersuite(s). - DefaultTLSCipherSuites = CipherSuites{ - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - } - // ApprovedTLSCipherSuites smallstep approved ciphersuites. - ApprovedTLSCipherSuites = CipherSuites{ - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - } - // DefaultTLSOptions represents the default TLS version as well as the cipher - // suites used in the TLS certificates. - DefaultTLSOptions = TLSOptions{ - CipherSuites: CipherSuites{ - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - }, - MinVersion: 1.2, - MaxVersion: 1.2, - Renegotiation: false, - } -) - -// TLSVersion represents a TLS version number. -type TLSVersion float64 - -// Validate implements models.Validator and checks that a cipher suite is -// valid. -func (v TLSVersion) Validate() error { - if _, ok := tlsVersions[v]; ok { - return nil - } - return errors.Errorf("%f is not a valid tls version", v) -} - -// Value returns the Go constant for the TLSVersion. -func (v TLSVersion) Value() uint16 { - return tlsVersions[v] -} - -// String returns the Go constant for the TLSVersion. -func (v TLSVersion) String() string { - k := v.Value() - switch k { - case tls.VersionTLS10: - return "1.0" - case tls.VersionTLS11: - return "1.1" - case tls.VersionTLS12: - return "1.2" - case tls.VersionTLS13: - return "1.3" - default: - return fmt.Sprintf("unexpected value: %f", v) - } -} - -// tlsVersions has the list of supported tls version. -var tlsVersions = map[TLSVersion]uint16{ - // Defaults to TLS 1.3 - 0: tls.VersionTLS13, - // Options - 1.0: tls.VersionTLS10, - 1.1: tls.VersionTLS11, - 1.2: tls.VersionTLS12, - 1.3: tls.VersionTLS13, -} - -// CipherSuites represents an array of string codes representing the cipher -// suites. -type CipherSuites []string - -// Validate implements models.Validator and checks that a cipher suite is -// valid. -func (c CipherSuites) Validate() error { - for _, s := range c { - if _, ok := cipherSuites[s]; !ok { - return errors.Errorf("%s is not a valid cipher suite", s) - } - } - return nil -} - -// Value returns an []uint16 for the cipher suites. -func (c CipherSuites) Value() []uint16 { - values := make([]uint16, len(c)) - for i, s := range c { - values[i] = cipherSuites[s] - } - return values -} - -// cipherSuites has the list of supported cipher suites. -var cipherSuites = map[string]uint16{ - "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, - "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, -} - -// TLSOptions represents the TLS options that can be specified on *tls.Config -// types to configure HTTPS servers and clients. -type TLSOptions struct { - CipherSuites CipherSuites `json:"cipherSuites"` - MinVersion TLSVersion `json:"minVersion"` - MaxVersion TLSVersion `json:"maxVersion"` - Renegotiation bool `json:"renegotiation"` -} - -// TLSConfig returns the tls.Config equivalent of the TLSOptions. -func (t *TLSOptions) TLSConfig() *tls.Config { - var rs tls.RenegotiationSupport - if t.Renegotiation { - rs = tls.RenegotiateFreelyAsClient - } else { - rs = tls.RenegotiateNever - } - - return &tls.Config{ - CipherSuites: t.CipherSuites.Value(), - MinVersion: t.MinVersion.Value(), - MaxVersion: t.MaxVersion.Value(), - Renegotiation: rs, - } -} diff --git a/vendor/github.com/smallstep/certificates/authority/config/types.go b/vendor/github.com/smallstep/certificates/authority/config/types.go deleted file mode 100644 index 6d7b9389..00000000 --- a/vendor/github.com/smallstep/certificates/authority/config/types.go +++ /dev/null @@ -1,74 +0,0 @@ -package config - -import ( - "encoding/json" - - "github.com/pkg/errors" -) - -// multiString represents a type that can be encoded/decoded in JSON as a single -// string or an array of strings. -type multiString []string - -// First returns the first element of a multiString. It will return an empty -// string if the multistring is empty. -func (s multiString) First() string { - if len(s) > 0 { - return s[0] - } - return "" -} - -// HasEmpties returns `true` if any string in the array is empty. -func (s multiString) HasEmpties() bool { - if len(s) == 0 { - return true - } - for _, ss := range s { - if len(ss) == 0 { - return true - } - } - return false -} - -// MarshalJSON marshals the multistring as a string or a slice of strings . With -// 0 elements it will return the empty string, with 1 element a regular string, -// otherwise a slice of strings. -func (s multiString) MarshalJSON() ([]byte, error) { - switch len(s) { - case 0: - return []byte(`""`), nil - case 1: - return json.Marshal(s[0]) - default: - return json.Marshal([]string(s)) - } -} - -// UnmarshalJSON parses a string or a slice and sets it to the multiString. -func (s *multiString) UnmarshalJSON(data []byte) error { - if s == nil { - return errors.New("multiString cannot be nil") - } - if len(data) == 0 { - *s = nil - return nil - } - // Parse string - if data[0] == '"' { - var str string - if err := json.Unmarshal(data, &str); err != nil { - return errors.Wrapf(err, "error unmarshalling %s", data) - } - *s = []string{str} - return nil - } - // Parse array - var ss []string - if err := json.Unmarshal(data, &ss); err != nil { - return errors.Wrapf(err, "error unmarshalling %s", data) - } - *s = ss - return nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/options.go b/vendor/github.com/smallstep/certificates/authority/options.go deleted file mode 100644 index 4e9fbdbc..00000000 --- a/vendor/github.com/smallstep/certificates/authority/options.go +++ /dev/null @@ -1,219 +0,0 @@ -package authority - -import ( - "context" - "crypto" - "crypto/x509" - "encoding/pem" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/admin" - "github.com/smallstep/certificates/authority/config" - "github.com/smallstep/certificates/authority/provisioner" - "github.com/smallstep/certificates/cas" - casapi "github.com/smallstep/certificates/cas/apiv1" - "github.com/smallstep/certificates/db" - "github.com/smallstep/certificates/kms" - "golang.org/x/crypto/ssh" -) - -// Option sets options to the Authority. -type Option func(*Authority) error - -// WithConfig replaces the current config with the given one. No validation is -// performed in the given value. -func WithConfig(config *config.Config) Option { - return func(a *Authority) error { - a.config = config - return nil - } -} - -// WithConfigFile reads the given filename as a configuration file and replaces -// the current one. No validation is performed in the given configuration. -func WithConfigFile(filename string) Option { - return func(a *Authority) (err error) { - a.config, err = config.LoadConfiguration(filename) - return - } -} - -// WithDatabase sets an already initialized authority database to a new -// authority. This option is intended to be use on graceful reloads. -func WithDatabase(db db.AuthDB) Option { - return func(a *Authority) error { - a.db = db - return nil - } -} - -// WithGetIdentityFunc sets a custom function to retrieve the identity from -// an external resource. -func WithGetIdentityFunc(fn func(ctx context.Context, p provisioner.Interface, email string) (*provisioner.Identity, error)) Option { - return func(a *Authority) error { - a.getIdentityFunc = fn - return nil - } -} - -// WithSSHBastionFunc sets a custom function to get the bastion for a -// given user-host pair. -func WithSSHBastionFunc(fn func(ctx context.Context, user, host string) (*config.Bastion, error)) Option { - return func(a *Authority) error { - a.sshBastionFunc = fn - return nil - } -} - -// WithSSHGetHosts sets a custom function to get the bastion for a -// given user-host pair. -func WithSSHGetHosts(fn func(ctx context.Context, cert *x509.Certificate) ([]config.Host, error)) Option { - return func(a *Authority) error { - a.sshGetHostsFunc = fn - return nil - } -} - -// WithSSHCheckHost sets a custom function to check whether a given host is -// step ssh enabled. The token is used to validate the request, while the roots -// are used to validate the token. -func WithSSHCheckHost(fn func(ctx context.Context, principal string, tok string, roots []*x509.Certificate) (bool, error)) Option { - return func(a *Authority) error { - a.sshCheckHostFunc = fn - return nil - } -} - -// WithKeyManager defines the key manager used to get and create keys, and sign -// certificates. -func WithKeyManager(k kms.KeyManager) Option { - return func(a *Authority) error { - a.keyManager = k - return nil - } -} - -// WithX509Signer defines the signer used to sign X509 certificates. -func WithX509Signer(crt *x509.Certificate, s crypto.Signer) Option { - return func(a *Authority) error { - srv, err := cas.New(context.Background(), casapi.Options{ - Type: casapi.SoftCAS, - Signer: s, - CertificateChain: []*x509.Certificate{crt}, - }) - if err != nil { - return err - } - a.x509CAService = srv - return nil - } -} - -// WithSSHUserSigner defines the signer used to sign SSH user certificates. -func WithSSHUserSigner(s crypto.Signer) Option { - return func(a *Authority) error { - signer, err := ssh.NewSignerFromSigner(s) - if err != nil { - return errors.Wrap(err, "error creating ssh user signer") - } - a.sshCAUserCertSignKey = signer - // Append public key to list of user certs - pub := signer.PublicKey() - a.sshCAUserCerts = append(a.sshCAUserCerts, pub) - a.sshCAUserFederatedCerts = append(a.sshCAUserFederatedCerts, pub) - return nil - } -} - -// WithSSHHostSigner defines the signer used to sign SSH host certificates. -func WithSSHHostSigner(s crypto.Signer) Option { - return func(a *Authority) error { - signer, err := ssh.NewSignerFromSigner(s) - if err != nil { - return errors.Wrap(err, "error creating ssh host signer") - } - a.sshCAHostCertSignKey = signer - // Append public key to list of host certs - pub := signer.PublicKey() - a.sshCAHostCerts = append(a.sshCAHostCerts, pub) - a.sshCAHostFederatedCerts = append(a.sshCAHostFederatedCerts, pub) - return nil - } -} - -// WithX509RootCerts is an option that allows to define the list of root -// certificates to use. This option will replace any root certificate defined -// before. -func WithX509RootCerts(rootCerts ...*x509.Certificate) Option { - return func(a *Authority) error { - a.rootX509Certs = rootCerts - return nil - } -} - -// WithX509FederatedCerts is an option that allows to define the list of -// federated certificates. This option will replace any federated certificate -// defined before. -func WithX509FederatedCerts(certs ...*x509.Certificate) Option { - return func(a *Authority) error { - a.federatedX509Certs = certs - return nil - } -} - -// WithX509RootBundle is an option that allows to define the list of root -// certificates. This option will replace any root certificate defined before. -func WithX509RootBundle(pemCerts []byte) Option { - return func(a *Authority) error { - certs, err := readCertificateBundle(pemCerts) - if err != nil { - return err - } - a.rootX509Certs = certs - return nil - } -} - -// WithX509FederatedBundle is an option that allows to define the list of -// federated certificates. This option will replace any federated certificate -// defined before. -func WithX509FederatedBundle(pemCerts []byte) Option { - return func(a *Authority) error { - certs, err := readCertificateBundle(pemCerts) - if err != nil { - return err - } - a.federatedX509Certs = certs - return nil - } -} - -// WithAdminDB is an option to set the database backing the admin APIs. -func WithAdminDB(db admin.DB) Option { - return func(a *Authority) error { - a.adminDB = db - return nil - } -} - -func readCertificateBundle(pemCerts []byte) ([]*x509.Certificate, error) { - var block *pem.Block - var certs []*x509.Certificate - for len(pemCerts) > 0 { - block, pemCerts = pem.Decode(pemCerts) - if block == nil { - break - } - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - - certs = append(certs, cert) - } - return certs, nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/acme.go b/vendor/github.com/smallstep/certificates/authority/provisioner/acme.go deleted file mode 100644 index d81b0231..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/acme.go +++ /dev/null @@ -1,111 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/x509" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" -) - -// ACME is the acme provisioner type, an entity that can authorize the ACME -// provisioning flow. -type ACME struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - ForceCN bool `json:"forceCN,omitempty"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - claimer *Claimer -} - -// GetID returns the provisioner unique identifier. -func (p ACME) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *ACME) GetIDForToken() string { - return "acme/" + p.Name -} - -// GetTokenID returns the identifier of the token. -func (p *ACME) GetTokenID(ott string) (string, error) { - return "", errors.New("acme provisioner does not implement GetTokenID") -} - -// GetName returns the name of the provisioner. -func (p *ACME) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *ACME) GetType() Type { - return TypeACME -} - -// GetEncryptedKey returns the base provisioner encrypted key if it's defined. -func (p *ACME) GetEncryptedKey() (string, string, bool) { - return "", "", false -} - -// GetOptions returns the configured provisioner options. -func (p *ACME) GetOptions() *Options { - return p.Options -} - -// DefaultTLSCertDuration returns the default TLS cert duration enforced by -// the provisioner. -func (p *ACME) DefaultTLSCertDuration() time.Duration { - return p.claimer.DefaultTLSCertDuration() -} - -// Init initializes and validates the fields of a JWK type. -func (p *ACME) Init(config Config) (err error) { - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - } - - // Update claims with global ones - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - - return err -} - -// AuthorizeSign does not do any validation, because all validation is handled -// in the ACME protocol. This method returns a list of modifiers / constraints -// on the resulting certificate. -func (p *ACME) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - return []SignOption{ - // modifiers / withOptions - newProvisionerExtensionOption(TypeACME, p.Name, ""), - newForceCNOption(p.ForceCN), - profileDefaultDuration(p.claimer.DefaultTLSCertDuration()), - // validators - defaultPublicKeyValidator{}, - newValidityValidator(p.claimer.MinTLSCertDuration(), p.claimer.MaxTLSCertDuration()), - }, nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -// NOTE: This method does not actually validate the certificate or check it's -// revocation status. Just confirms that the provisioner that created the -// certificate was configured to allow renewals. -func (p *ACME) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if p.claimer.IsDisableRenewal() { - return errs.Unauthorized("acme.AuthorizeRenew; renew is disabled for acme provisioner '%s'", p.GetName()) - } - return nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/aws.go b/vendor/github.com/smallstep/certificates/authority/provisioner/aws.go deleted file mode 100644 index c1c77ce5..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/aws.go +++ /dev/null @@ -1,755 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/sha256" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/sshutil" - "go.step.sm/crypto/x509util" -) - -// awsIssuer is the string used as issuer in the generated tokens. -const awsIssuer = "ec2.amazonaws.com" - -// awsIdentityURL is the url used to retrieve the instance identity document. -const awsIdentityURL = "http://169.254.169.254/latest/dynamic/instance-identity/document" - -// awsSignatureURL is the url used to retrieve the instance identity signature. -const awsSignatureURL = "http://169.254.169.254/latest/dynamic/instance-identity/signature" - -// awsAPITokenURL is the url used to get the IMDSv2 API token -const awsAPITokenURL = "http://169.254.169.254/latest/api/token" - -// awsAPITokenTTL is the default TTL to use when requesting IMDSv2 API tokens -// -- we keep this short-lived since we get a new token with every call to readURL() -const awsAPITokenTTL = "30" - -// awsMetadataTokenHeader is the header that must be passed with every IMDSv2 request -const awsMetadataTokenHeader = "X-aws-ec2-metadata-token" - -// awsMetadataTokenTTLHeader is the header used to indicate the token TTL requested -const awsMetadataTokenTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" - -// awsCertificate is the certificate used to validate the instance identity -// signature. -// -// The first certificate is used in: -// ap-northeast-2, ap-south-1, ap-southeast-1, ap-southeast-2 -// eu-central-1, eu-north-1, eu-west-1, eu-west-2, eu-west-3 -// us-east-1, us-east-2, us-west-1, us-west-2 -// ca-central-1, sa-east-1 -// -// The second certificate is used in: -// eu-south-1 -// -// The third certificate is used in: -// ap-east-1 -// -// The fourth certificate is used in: -// af-south-1 -// -// The fifth certificate is used in: -// me-south-1 -const awsCertificate = `-----BEGIN CERTIFICATE----- -MIIDIjCCAougAwIBAgIJAKnL4UEDMN/FMA0GCSqGSIb3DQEBBQUAMGoxCzAJBgNV -BAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgw -FgYDVQQKEw9BbWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3Mu -Y29tMB4XDTE0MDYwNTE0MjgwMloXDTI0MDYwNTE0MjgwMlowajELMAkGA1UEBhMC -VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1NlYXR0bGUxGDAWBgNV -BAoTD0FtYXpvbi5jb20gSW5jLjEaMBgGA1UEAxMRZWMyLmFtYXpvbmF3cy5jb20w -gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAIe9GN//SRK2knbjySG0ho3yqQM3 -e2TDhWO8D2e8+XZqck754gFSo99AbT2RmXClambI7xsYHZFapbELC4H91ycihvrD -jbST1ZjkLQgga0NE1q43eS68ZeTDccScXQSNivSlzJZS8HJZjgqzBlXjZftjtdJL -XeE4hwvo0sD4f3j9AgMBAAGjgc8wgcwwHQYDVR0OBBYEFCXWzAgVyrbwnFncFFIs -77VBdlE4MIGcBgNVHSMEgZQwgZGAFCXWzAgVyrbwnFncFFIs77VBdlE4oW6kbDBq -MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2Vh -dHRsZTEYMBYGA1UEChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1h -em9uYXdzLmNvbYIJAKnL4UEDMN/FMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF -BQADgYEAFYcz1OgEhQBXIwIdsgCOS8vEtiJYF+j9uO6jz7VOmJqO+pRlAbRlvY8T -C1haGgSI/A1uZUKs/Zfnph0oEI0/hu1IIJ/SKBDtN5lvmZ/IzbOPIJWirlsllQIQ -7zvWbGd9c9+Rm3p04oTvhup99la7kZqevJK0QRdD/6NpCKsqP/0= ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIICNjCCAZ+gAwIBAgIJAOZ3GEIaDcugMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV -BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 -dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTEwMjQx -NTE5MDlaGA8yMTk5MDMyOTE1MTkwOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT -EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft -YXpvbiBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB -gQCjiPgW3vsXRj4JoA16WQDyoPc/eh3QBARaApJEc4nPIGoUolpAXcjFhWplo2O+ -ivgfCsc4AU9OpYdAPha3spLey/bhHPRi1JZHRNqScKP0hzsCNmKhfnZTIEQCFvsp -DRp4zr91/WS06/flJFBYJ6JHhp0KwM81XQG59lV6kkoW7QIDAQABMA0GCSqGSIb3 -DQEBCwUAA4GBAGLLrY3P+HH6C57dYgtJkuGZGT2+rMkk2n81/abzTJvsqRqGRrWv -XRKRXlKdM/dfiuYGokDGxiC0Mg6TYy6wvsR2qRhtXW1OtZkiHWcQCnOttz+8vpew -wx8JGMvowtuKB1iMsbwyRqZkFYLcvH+Opfb/Aayi20/ChQLdI6M2R5VU ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIICSzCCAbQCCQDtQvkVxRvK9TANBgkqhkiG9w0BAQsFADBqMQswCQYDVQQGEwJV -UzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2VhdHRsZTEYMBYGA1UE -ChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1hem9uYXdzLmNvbTAe -Fw0xOTAyMDMwMzAwMDZaFw0yOTAyMDIwMzAwMDZaMGoxCzAJBgNVBAYTAlVTMRMw -EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgwFgYDVQQKEw9B -bWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3MuY29tMIGfMA0G -CSqGSIb3DQEBAQUAA4GNADCBiQKBgQC1kkHXYTfc7gY5Q55JJhjTieHAgacaQkiR -Pity9QPDE3b+NXDh4UdP1xdIw73JcIIG3sG9RhWiXVCHh6KkuCTqJfPUknIKk8vs -M3RXflUpBe8Pf+P92pxqPMCz1Fr2NehS3JhhpkCZVGxxwLC5gaG0Lr4rFORubjYY -Rh84dK98VwIDAQABMA0GCSqGSIb3DQEBCwUAA4GBAA6xV9f0HMqXjPHuGILDyaNN -dKcvplNFwDTydVg32MNubAGnecoEBtUPtxBsLoVYXCOb+b5/ZMDubPF9tU/vSXuo -TpYM5Bq57gJzDRaBOntQbX9bgHiUxw6XZWaTS/6xjRJDT5p3S1E0mPI3lP/eJv4o -Ezk5zb3eIf10/sqt4756 ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIICNjCCAZ+gAwIBAgIJAKumfZiRrNvHMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV -BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 -dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTExMjcw -NzE0MDVaGA8yMTk5MDUwMjA3MTQwNVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT -EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft -YXpvbiBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB -gQDFd571nUzVtke3rPyRkYfvs3jh0C0EMzzG72boyUNjnfw1+m0TeFraTLKb9T6F -7TuB/ZEN+vmlYqr2+5Va8U8qLbPF0bRH+FdaKjhgWZdYXxGzQzU3ioy5W5ZM1VyB -7iUsxEAlxsybC3ziPYaHI42UiTkQNahmoroNeqVyHNnBpQIDAQABMA0GCSqGSIb3 -DQEBCwUAA4GBAAJLylWyElEgOpW4B1XPyRVD4pAds8Guw2+krgqkY0HxLCdjosuH -RytGDGN+q75aAoXzW5a7SGpxLxk6Hfv0xp3RjDHsoeP0i1d8MD3hAC5ezxS4oukK -s5gbPOnokhKTMPXbTdRn5ZifCbWlx+bYN/mTYKvxho7b5SVg2o1La9aK ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIIDPDCCAqWgAwIBAgIJAMl6uIV/zqJFMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNV -BAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxlMSAw -HgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzEaMBgGA1UEAwwRZWMyLmFt -YXpvbmF3cy5jb20wIBcNMTkwNDI2MTQzMjQ3WhgPMjE5ODA5MjkxNDMyNDdaMHIx -CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0 -dGxlMSAwHgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzEaMBgGA1UEAwwR -ZWMyLmFtYXpvbmF3cy5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALVN -CDTZEnIeoX1SEYqq6k1BV0ZlpY5y3KnoOreCAE589TwS4MX5+8Fzd6AmACmugeBP -Qk7Hm6b2+g/d4tWycyxLaQlcq81DB1GmXehRkZRgGeRge1ePWd1TUA0I8P/QBT7S -gUePm/kANSFU+P7s7u1NNl+vynyi0wUUrw7/wIZTAgMBAAGjgdcwgdQwHQYDVR0O -BBYEFILtMd+T4YgH1cgc+hVsVOV+480FMIGkBgNVHSMEgZwwgZmAFILtMd+T4YgH -1cgc+hVsVOV+480FoXakdDByMQswCQYDVQQGEwJVUzETMBEGA1UECAwKV2FzaGlu -Z3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEgMB4GA1UECgwXQW1hem9uIFdlYiBTZXJ2 -aWNlcyBMTEMxGjAYBgNVBAMMEWVjMi5hbWF6b25hd3MuY29tggkAyXq4hX/OokUw -DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQBhkNTBIFgWFd+ZhC/LhRUY -4OjEiykmbEp6hlzQ79T0Tfbn5A4NYDI2icBP0+hmf6qSnIhwJF6typyd1yPK5Fqt -NTpxxcXmUKquX+pHmIkK1LKDO8rNE84jqxrxRsfDi6by82fjVYf2pgjJW8R1FAw+ -mL5WQRFexbfB5aXhcMo0AA== ------END CERTIFICATE-----` - -// awsSignatureAlgorithm is the signature algorithm used to verify the identity -// document signature. -const awsSignatureAlgorithm = x509.SHA256WithRSA - -type awsConfig struct { - identityURL string - signatureURL string - tokenURL string - tokenTTL string - certificates []*x509.Certificate - signatureAlgorithm x509.SignatureAlgorithm -} - -func newAWSConfig(certPath string) (*awsConfig, error) { - var certBytes []byte - if certPath == "" { - certBytes = []byte(awsCertificate) - } else { - if b, err := ioutil.ReadFile(certPath); err == nil { - certBytes = b - } else { - return nil, errors.Wrapf(err, "error reading %s", certPath) - } - } - - // Read all the certificates. - var certs []*x509.Certificate - for len(certBytes) > 0 { - var block *pem.Block - block, certBytes = pem.Decode(certBytes) - if block == nil { - break - } - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, errors.Wrap(err, "error parsing AWS IID certificate") - } - certs = append(certs, cert) - } - if len(certs) == 0 { - return nil, errors.New("error parsing AWS IID certificate: no certificates found") - } - - return &awsConfig{ - identityURL: awsIdentityURL, - signatureURL: awsSignatureURL, - tokenURL: awsAPITokenURL, - tokenTTL: awsAPITokenTTL, - certificates: certs, - signatureAlgorithm: awsSignatureAlgorithm, - }, nil -} - -type awsPayload struct { - jose.Claims - Amazon awsAmazonPayload `json:"amazon"` - SANs []string `json:"sans"` - document awsInstanceIdentityDocument -} - -type awsAmazonPayload struct { - Document []byte `json:"document"` - Signature []byte `json:"signature"` -} - -type awsInstanceIdentityDocument struct { - AccountID string `json:"accountId"` - Architecture string `json:"architecture"` - AvailabilityZone string `json:"availabilityZone"` - BillingProducts []string `json:"billingProducts"` - DevpayProductCodes []string `json:"devpayProductCodes"` - ImageID string `json:"imageId"` - InstanceID string `json:"instanceId"` - InstanceType string `json:"instanceType"` - KernelID string `json:"kernelId"` - PendingTime time.Time `json:"pendingTime"` - PrivateIP string `json:"privateIp"` - RamdiskID string `json:"ramdiskId"` - Region string `json:"region"` - Version string `json:"version"` -} - -// AWS is the provisioner that supports identity tokens created from the Amazon -// Web Services Instance Identity Documents. -// -// If DisableCustomSANs is true, only the internal DNS and IP will be added as a -// SAN. By default it will accept any SAN in the CSR. -// -// If DisableTrustOnFirstUse is true, multiple sign request for this provisioner -// with the same instance will be accepted. By default only the first request -// will be accepted. -// -// If InstanceAge is set, only the instances with a pendingTime within the given -// period will be accepted. -// -// IIDRoots can be used to specify a path to the certificates used to verify the -// identity certificate signature. -// -// Amazon Identity docs are available at -// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html -type AWS struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - Accounts []string `json:"accounts"` - DisableCustomSANs bool `json:"disableCustomSANs"` - DisableTrustOnFirstUse bool `json:"disableTrustOnFirstUse"` - IMDSVersions []string `json:"imdsVersions"` - InstanceAge Duration `json:"instanceAge,omitempty"` - IIDRoots string `json:"iidRoots,omitempty"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - claimer *Claimer - config *awsConfig - audiences Audiences -} - -// GetID returns the provisioner unique identifier. -func (p *AWS) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *AWS) GetIDForToken() string { - return "aws/" + p.Name -} - -// GetTokenID returns the identifier of the token. -func (p *AWS) GetTokenID(token string) (string, error) { - payload, err := p.authorizeToken(token) - if err != nil { - return "", err - } - // If TOFU is disabled create an ID for the token, so it cannot be reused. - // The timestamps, document and signatures should be mostly unique. - if p.DisableTrustOnFirstUse { - sum := sha256.Sum256([]byte(token)) - return strings.ToLower(hex.EncodeToString(sum[:])), nil - } - - // Use provisioner + instance-id as the identifier. - unique := fmt.Sprintf("%s.%s", p.GetIDForToken(), payload.document.InstanceID) - sum := sha256.Sum256([]byte(unique)) - return strings.ToLower(hex.EncodeToString(sum[:])), nil -} - -// GetName returns the name of the provisioner. -func (p *AWS) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *AWS) GetType() Type { - return TypeAWS -} - -// GetEncryptedKey is not available in an AWS provisioner. -func (p *AWS) GetEncryptedKey() (kid string, key string, ok bool) { - return "", "", false -} - -// GetIdentityToken retrieves the identity document and it's signature and -// generates a token with them. -func (p *AWS) GetIdentityToken(subject, caURL string) (string, error) { - // Initialize the config if this method is used from the cli. - if err := p.assertConfig(); err != nil { - return "", err - } - - var idoc awsInstanceIdentityDocument - doc, err := p.readURL(p.config.identityURL) - if err != nil { - return "", errors.Wrap(err, "error retrieving identity document:\n Are you in an AWS VM?\n Is the metadata service enabled?\n Are you using the proper metadata service version?") - } - if err := json.Unmarshal(doc, &idoc); err != nil { - return "", errors.Wrap(err, "error unmarshaling identity document") - } - sig, err := p.readURL(p.config.signatureURL) - if err != nil { - return "", errors.Wrap(err, "error retrieving identity document:\n Are you in an AWS VM?\n Is the metadata service enabled?\n Are you using the proper metadata service version?") - } - signature, err := base64.StdEncoding.DecodeString(string(sig)) - if err != nil { - return "", errors.Wrap(err, "error decoding identity document signature") - } - if err := p.checkSignature(doc, signature); err != nil { - return "", err - } - - audience, err := generateSignAudience(caURL, p.GetIDForToken()) - if err != nil { - return "", err - } - - // Create unique ID for Trust On First Use (TOFU). Only the first instance - // per provisioner is allowed as we don't have a way to trust the given - // sans. - unique := fmt.Sprintf("%s.%s", p.GetIDForToken(), idoc.InstanceID) - sum := sha256.Sum256([]byte(unique)) - - // Create a JWT from the identity document - signer, err := jose.NewSigner( - jose.SigningKey{Algorithm: jose.HS256, Key: signature}, - new(jose.SignerOptions).WithType("JWT"), - ) - if err != nil { - return "", errors.Wrap(err, "error creating signer") - } - - now := time.Now() - payload := awsPayload{ - Claims: jose.Claims{ - Issuer: awsIssuer, - Subject: subject, - Audience: []string{audience}, - Expiry: jose.NewNumericDate(now.Add(5 * time.Minute)), - NotBefore: jose.NewNumericDate(now), - IssuedAt: jose.NewNumericDate(now), - ID: strings.ToLower(hex.EncodeToString(sum[:])), - }, - Amazon: awsAmazonPayload{ - Document: doc, - Signature: signature, - }, - } - - tok, err := jose.Signed(signer).Claims(payload).CompactSerialize() - if err != nil { - return "", errors.Wrap(err, "error serializing token") - } - - return tok, nil -} - -// Init validates and initializes the AWS provisioner. -func (p *AWS) Init(config Config) (err error) { - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - case p.InstanceAge.Value() < 0: - return errors.New("provisioner instanceAge cannot be negative") - } - // Update claims with global ones - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - // Add default config - if p.config, err = newAWSConfig(p.IIDRoots); err != nil { - return err - } - p.audiences = config.Audiences.WithFragment(p.GetIDForToken()) - - // validate IMDS versions - if len(p.IMDSVersions) == 0 { - p.IMDSVersions = []string{"v2", "v1"} - } - for _, v := range p.IMDSVersions { - switch v { - case "v1": - // valid - case "v2": - // valid - default: - return errors.Errorf("%s: not a supported AWS Instance Metadata Service version", v) - } - } - - return nil -} - -// AuthorizeSign validates the given token and returns the sign options that -// will be used on certificate creation. -func (p *AWS) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - payload, err := p.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "aws.AuthorizeSign") - } - - doc := payload.document - - // Template options - data := x509util.NewTemplateData() - data.SetCommonName(payload.Claims.Subject) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - // Enforce known CN and default DNS and IP if configured. - // By default we'll accept the CN and SANs in the CSR. - // There's no way to trust them other than TOFU. - var so []SignOption - if p.DisableCustomSANs { - dnsName := fmt.Sprintf("ip-%s.%s.compute.internal", strings.Replace(doc.PrivateIP, ".", "-", -1), doc.Region) - so = append(so, dnsNamesValidator([]string{dnsName})) - so = append(so, ipAddressesValidator([]net.IP{ - net.ParseIP(doc.PrivateIP), - })) - so = append(so, emailAddressesValidator(nil)) - so = append(so, urisValidator(nil)) - - // Template options - data.SetSANs([]string{dnsName, doc.PrivateIP}) - } - - templateOptions, err := CustomTemplateOptions(p.Options, data, x509util.DefaultIIDLeafTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "aws.AuthorizeSign") - } - - return append(so, - templateOptions, - // modifiers / withOptions - newProvisionerExtensionOption(TypeAWS, p.Name, doc.AccountID, "InstanceID", doc.InstanceID), - profileDefaultDuration(p.claimer.DefaultTLSCertDuration()), - // validators - defaultPublicKeyValidator{}, - commonNameValidator(payload.Claims.Subject), - newValidityValidator(p.claimer.MinTLSCertDuration(), p.claimer.MaxTLSCertDuration()), - ), nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -// NOTE: This method does not actually validate the certificate or check it's -// revocation status. Just confirms that the provisioner that created the -// certificate was configured to allow renewals. -func (p *AWS) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if p.claimer.IsDisableRenewal() { - return errs.Unauthorized("aws.AuthorizeRenew; renew is disabled for aws provisioner '%s'", p.GetName()) - } - return nil -} - -// assertConfig initializes the config if it has not been initialized -func (p *AWS) assertConfig() (err error) { - if p.config != nil { - return - } - p.config, err = newAWSConfig(p.IIDRoots) - return err -} - -// checkSignature returns an error if the signature is not valid. -func (p *AWS) checkSignature(signed, signature []byte) error { - for _, crt := range p.config.certificates { - if err := crt.CheckSignature(p.config.signatureAlgorithm, signed, signature); err == nil { - return nil - } - } - return errors.New("error validating identity document signature") -} - -// readURL does a GET request to the given url and returns the body. It's not -// using pkg/errors to avoid verbose errors, the caller should use it and write -// the appropriate error. -func (p *AWS) readURL(url string) ([]byte, error) { - var resp *http.Response - var err error - - for _, v := range p.IMDSVersions { - switch v { - case "v1": - resp, err = p.readURLv1(url) - if err == nil && resp.StatusCode < 400 { - return p.readResponseBody(resp) - } - case "v2": - resp, err = p.readURLv2(url) - if err == nil && resp.StatusCode < 400 { - return p.readResponseBody(resp) - } - default: - return nil, fmt.Errorf("%s: not a supported AWS Instance Metadata Service version", v) - } - if resp != nil { - resp.Body.Close() - } - } - - // all versions have been exhausted and we haven't returned successfully yet so pass - // the error on to the caller - if err != nil { - return nil, err - } - return nil, fmt.Errorf("Request for metadata returned non-successful status code %d", - resp.StatusCode) -} - -func (p *AWS) readURLv1(url string) (*http.Response, error) { - client := http.Client{} - - req, err := http.NewRequest(http.MethodGet, url, http.NoBody) - if err != nil { - return nil, err - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - return resp, nil -} - -func (p *AWS) readURLv2(url string) (*http.Response, error) { - client := http.Client{} - - // first get the token - req, err := http.NewRequest(http.MethodPut, p.config.tokenURL, nil) - if err != nil { - return nil, err - } - req.Header.Set(awsMetadataTokenTTLHeader, p.config.tokenTTL) - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Request for API token returned non-successful status code %d", resp.StatusCode) - } - token, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - // now make the request - req, err = http.NewRequest(http.MethodGet, url, http.NoBody) - if err != nil { - return nil, err - } - req.Header.Set(awsMetadataTokenHeader, string(token)) - resp, err = client.Do(req) - if err != nil { - return nil, err - } - return resp, nil -} - -func (p *AWS) readResponseBody(resp *http.Response) ([]byte, error) { - defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - return b, nil -} - -// authorizeToken performs common jwt authorization actions and returns the -// claims for case specific downstream parsing. -// e.g. a Sign request will auth/validate different fields than a Revoke request. -func (p *AWS) authorizeToken(token string) (*awsPayload, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, errs.Wrapf(http.StatusUnauthorized, err, "aws.authorizeToken; error parsing aws token") - } - if len(jwt.Headers) == 0 { - return nil, errs.InternalServer("aws.authorizeToken; error parsing token, header is missing") - } - - var unsafeClaims awsPayload - if err := jwt.UnsafeClaimsWithoutVerification(&unsafeClaims); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "aws.authorizeToken; error unmarshaling claims") - } - - var payload awsPayload - if err := jwt.Claims(unsafeClaims.Amazon.Signature, &payload); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "aws.authorizeToken; error verifying claims") - } - - // Validate identity document signature - if err := p.checkSignature(payload.Amazon.Document, payload.Amazon.Signature); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "aws.authorizeToken; invalid aws token signature") - } - - var doc awsInstanceIdentityDocument - if err := json.Unmarshal(payload.Amazon.Document, &doc); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "aws.authorizeToken; error unmarshaling aws identity document") - } - - switch { - case doc.AccountID == "": - return nil, errs.Unauthorized("aws.authorizeToken; aws identity document accountId cannot be empty") - case doc.InstanceID == "": - return nil, errs.Unauthorized("aws.authorizeToken; aws identity document instanceId cannot be empty") - case doc.PrivateIP == "": - return nil, errs.Unauthorized("aws.authorizeToken; aws identity document privateIp cannot be empty") - case doc.Region == "": - return nil, errs.Unauthorized("aws.authorizeToken; aws identity document region cannot be empty") - } - - // According to "rfc7519 JSON Web Token" acceptable skew should be no - // more than a few minutes. - now := time.Now().UTC() - if err = payload.ValidateWithLeeway(jose.Expected{ - Issuer: awsIssuer, - Time: now, - }, time.Minute); err != nil { - return nil, errs.Wrapf(http.StatusUnauthorized, err, "aws.authorizeToken; invalid aws token") - } - - // validate audiences with the defaults - if !matchesAudience(payload.Audience, p.audiences.Sign) { - return nil, errs.Unauthorized("aws.authorizeToken; invalid token - invalid audience claim (aud)") - } - - // Validate subject, it has to be known if disableCustomSANs is enabled - if p.DisableCustomSANs { - if payload.Subject != doc.InstanceID && - payload.Subject != doc.PrivateIP && - payload.Subject != fmt.Sprintf("ip-%s.%s.compute.internal", strings.Replace(doc.PrivateIP, ".", "-", -1), doc.Region) { - return nil, errs.Unauthorized("aws.authorizeToken; invalid token - invalid subject claim (sub)") - } - } - - // validate accounts - if len(p.Accounts) > 0 { - var found bool - for _, sa := range p.Accounts { - if sa == doc.AccountID { - found = true - break - } - } - if !found { - return nil, errs.Unauthorized("aws.authorizeToken; invalid aws identity document - accountId is not valid") - } - } - - // validate instance age - if d := p.InstanceAge.Value(); d > 0 { - if now.Sub(doc.PendingTime) > d { - return nil, errs.Unauthorized("aws.authorizeToken; aws identity document pendingTime is too old") - } - } - - payload.document = doc - return &payload, nil -} - -// AuthorizeSSHSign returns the list of SignOption for a SignSSH request. -func (p *AWS) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - if !p.claimer.IsSSHCAEnabled() { - return nil, errs.Unauthorized("aws.AuthorizeSSHSign; ssh ca is disabled for aws provisioner '%s'", p.GetName()) - } - claims, err := p.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "aws.AuthorizeSSHSign") - } - - doc := claims.document - signOptions := []SignOption{} - - // Enforce host certificate. - defaults := SignSSHOptions{ - CertType: SSHHostCert, - } - - // Validated principals. - principals := []string{ - doc.PrivateIP, - fmt.Sprintf("ip-%s.%s.compute.internal", strings.Replace(doc.PrivateIP, ".", "-", -1), doc.Region), - } - - // Only enforce known principals if disable custom sans is true. - if p.DisableCustomSANs { - defaults.Principals = principals - } else { - // Check that at least one principal is sent in the request. - signOptions = append(signOptions, &sshCertOptionsRequireValidator{ - Principals: true, - }) - } - - // Certificate templates. - data := sshutil.CreateTemplateData(sshutil.HostCert, doc.InstanceID, principals) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := CustomSSHTemplateOptions(p.Options, data, sshutil.DefaultIIDTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "aws.AuthorizeSSHSign") - } - signOptions = append(signOptions, templateOptions) - - return append(signOptions, - // Validate user SignSSHOptions. - sshCertOptionsValidator(defaults), - // Set the validity bounds if not set. - &sshDefaultDuration{p.claimer}, - // Validate public key - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{p.claimer}, - // Require all the fields in the SSH certificate - &sshCertDefaultValidator{}, - ), nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/azure.go b/vendor/github.com/smallstep/certificates/authority/provisioner/azure.go deleted file mode 100644 index fee50658..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/azure.go +++ /dev/null @@ -1,405 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/sha256" - "crypto/x509" - "encoding/hex" - "encoding/json" - "io/ioutil" - "net/http" - "regexp" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/sshutil" - "go.step.sm/crypto/x509util" -) - -// azureOIDCBaseURL is the base discovery url for Microsoft Azure tokens. -const azureOIDCBaseURL = "https://login.microsoftonline.com" - -// azureIdentityTokenURL is the URL to get the identity token for an instance. -const azureIdentityTokenURL = "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https%3A%2F%2Fmanagement.azure.com%2F" - -// azureDefaultAudience is the default audience used. -const azureDefaultAudience = "https://management.azure.com/" - -// azureXMSMirIDRegExp is the regular expression used to parse the xms_mirid claim. -// Using case insensitive as resourceGroups appears as resourcegroups. -var azureXMSMirIDRegExp = regexp.MustCompile(`(?i)^/subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachines/([^/]+)$`) - -type azureConfig struct { - oidcDiscoveryURL string - identityTokenURL string -} - -func newAzureConfig(tenantID string) *azureConfig { - return &azureConfig{ - oidcDiscoveryURL: azureOIDCBaseURL + "/" + tenantID + "/.well-known/openid-configuration", - identityTokenURL: azureIdentityTokenURL, - } -} - -type azureIdentityToken struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ClientID string `json:"client_id"` - ExpiresIn int64 `json:"expires_in,string"` - ExpiresOn int64 `json:"expires_on,string"` - ExtExpiresIn int64 `json:"ext_expires_in,string"` - NotBefore int64 `json:"not_before,string"` - Resource string `json:"resource"` - TokenType string `json:"token_type"` -} - -type azurePayload struct { - jose.Claims - AppID string `json:"appid"` - AppIDAcr string `json:"appidacr"` - IdentityProvider string `json:"idp"` - ObjectID string `json:"oid"` - TenantID string `json:"tid"` - Version string `json:"ver"` - XMSMirID string `json:"xms_mirid"` -} - -// Azure is the provisioner that supports identity tokens created from the -// Microsoft Azure Instance Metadata service. -// -// The default audience is "https://management.azure.com/". -// -// If DisableCustomSANs is true, only the internal DNS and IP will be added as a -// SAN. By default it will accept any SAN in the CSR. -// -// If DisableTrustOnFirstUse is true, multiple sign request for this provisioner -// with the same instance will be accepted. By default only the first request -// will be accepted. -// -// Microsoft Azure identity docs are available at -// https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token -// and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service -type Azure struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - TenantID string `json:"tenantID"` - ResourceGroups []string `json:"resourceGroups"` - Audience string `json:"audience,omitempty"` - DisableCustomSANs bool `json:"disableCustomSANs"` - DisableTrustOnFirstUse bool `json:"disableTrustOnFirstUse"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - claimer *Claimer - config *azureConfig - oidcConfig openIDConfiguration - keyStore *keyStore -} - -// GetID returns the provisioner unique identifier. -func (p *Azure) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *Azure) GetIDForToken() string { - return p.TenantID -} - -// GetTokenID returns the identifier of the token. The default value for Azure -// the SHA256 of "xms_mirid", but if DisableTrustOnFirstUse is set to true, then -// it will be the token kid. -func (p *Azure) GetTokenID(token string) (string, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return "", errors.Wrap(err, "error parsing token") - } - - // Get claims w/out verification. We need to look up the provisioner - // key in order to verify the claims and we need the issuer from the claims - // before we can look up the provisioner. - var claims azurePayload - if err = jwt.UnsafeClaimsWithoutVerification(&claims); err != nil { - return "", errors.Wrap(err, "error verifying claims") - } - - // If TOFU is disabled then allow token re-use. Azure caches the token for - // 24h and without allowing the re-use we cannot use it twice. - if p.DisableTrustOnFirstUse { - return "", ErrAllowTokenReuse - } - - sum := sha256.Sum256([]byte(claims.XMSMirID)) - return strings.ToLower(hex.EncodeToString(sum[:])), nil -} - -// GetName returns the name of the provisioner. -func (p *Azure) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *Azure) GetType() Type { - return TypeAzure -} - -// GetEncryptedKey is not available in an Azure provisioner. -func (p *Azure) GetEncryptedKey() (kid string, key string, ok bool) { - return "", "", false -} - -// GetIdentityToken retrieves from the metadata service the identity token and -// returns it. -func (p *Azure) GetIdentityToken(subject, caURL string) (string, error) { - // Initialize the config if this method is used from the cli. - p.assertConfig() - - req, err := http.NewRequest("GET", p.config.identityTokenURL, http.NoBody) - if err != nil { - return "", errors.Wrap(err, "error creating request") - } - req.Header.Set("Metadata", "true") - resp, err := http.DefaultClient.Do(req) - if err != nil { - return "", errors.Wrap(err, "error getting identity token, are you in a Azure VM?") - } - defer resp.Body.Close() - - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", errors.Wrap(err, "error reading identity token response") - } - if resp.StatusCode >= 400 { - return "", errors.Errorf("error getting identity token: status=%d, response=%s", resp.StatusCode, b) - } - - var identityToken azureIdentityToken - if err := json.Unmarshal(b, &identityToken); err != nil { - return "", errors.Wrap(err, "error unmarshaling identity token response") - } - - return identityToken.AccessToken, nil -} - -// Init validates and initializes the Azure provisioner. -func (p *Azure) Init(config Config) (err error) { - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - case p.TenantID == "": - return errors.New("provisioner tenantId cannot be empty") - case p.Audience == "": // use default audience - p.Audience = azureDefaultAudience - } - // Initialize config - p.assertConfig() - - // Update claims with global ones - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - - // Decode and validate openid-configuration endpoint - if err := getAndDecode(p.config.oidcDiscoveryURL, &p.oidcConfig); err != nil { - return err - } - if err := p.oidcConfig.Validate(); err != nil { - return errors.Wrapf(err, "error parsing %s", p.config.oidcDiscoveryURL) - } - // Get JWK key set - if p.keyStore, err = newKeyStore(p.oidcConfig.JWKSetURI); err != nil { - return err - } - - return nil -} - -// authorizeToken returns the claims, name, group, error. -func (p *Azure) authorizeToken(token string) (*azurePayload, string, string, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, "", "", errs.Wrap(http.StatusUnauthorized, err, "azure.authorizeToken; error parsing azure token") - } - if len(jwt.Headers) == 0 { - return nil, "", "", errs.Unauthorized("azure.authorizeToken; azure token missing header") - } - - var found bool - var claims azurePayload - keys := p.keyStore.Get(jwt.Headers[0].KeyID) - for _, key := range keys { - if err := jwt.Claims(key.Public(), &claims); err == nil { - found = true - break - } - } - if !found { - return nil, "", "", errs.Unauthorized("azure.authorizeToken; cannot validate azure token") - } - - if err := claims.ValidateWithLeeway(jose.Expected{ - Audience: []string{p.Audience}, - Issuer: p.oidcConfig.Issuer, - Time: time.Now(), - }, 1*time.Minute); err != nil { - return nil, "", "", errs.Wrap(http.StatusUnauthorized, err, "azure.authorizeToken; failed to validate azure token payload") - } - - // Validate TenantID - if claims.TenantID != p.TenantID { - return nil, "", "", errs.Unauthorized("azure.authorizeToken; azure token validation failed - invalid tenant id claim (tid)") - } - - re := azureXMSMirIDRegExp.FindStringSubmatch(claims.XMSMirID) - if len(re) != 4 { - return nil, "", "", errs.Unauthorized("azure.authorizeToken; error parsing xms_mirid claim - %s", claims.XMSMirID) - } - group, name := re[2], re[3] - return &claims, name, group, nil -} - -// AuthorizeSign validates the given token and returns the sign options that -// will be used on certificate creation. -func (p *Azure) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - _, name, group, err := p.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "azure.AuthorizeSign") - } - - // Filter by resource group - if len(p.ResourceGroups) > 0 { - var found bool - for _, g := range p.ResourceGroups { - if g == group { - found = true - break - } - } - if !found { - return nil, errs.Unauthorized("azure.AuthorizeSign; azure token validation failed - invalid resource group") - } - } - - // Template options - data := x509util.NewTemplateData() - data.SetCommonName(name) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - // Enforce known common name and default DNS if configured. - // By default we'll accept the CN and SANs in the CSR. - // There's no way to trust them other than TOFU. - var so []SignOption - if p.DisableCustomSANs { - // name will work only inside the virtual network - so = append(so, commonNameValidator(name)) - so = append(so, dnsNamesValidator([]string{name})) - so = append(so, ipAddressesValidator(nil)) - so = append(so, emailAddressesValidator(nil)) - so = append(so, urisValidator(nil)) - - // Enforce SANs in the template. - data.SetSANs([]string{name}) - } - - templateOptions, err := CustomTemplateOptions(p.Options, data, x509util.DefaultIIDLeafTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "aws.AuthorizeSign") - } - - return append(so, - templateOptions, - // modifiers / withOptions - newProvisionerExtensionOption(TypeAzure, p.Name, p.TenantID), - profileDefaultDuration(p.claimer.DefaultTLSCertDuration()), - // validators - defaultPublicKeyValidator{}, - newValidityValidator(p.claimer.MinTLSCertDuration(), p.claimer.MaxTLSCertDuration()), - ), nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -// NOTE: This method does not actually validate the certificate or check it's -// revocation status. Just confirms that the provisioner that created the -// certificate was configured to allow renewals. -func (p *Azure) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if p.claimer.IsDisableRenewal() { - return errs.Unauthorized("azure.AuthorizeRenew; renew is disabled for azure provisioner '%s'", p.GetName()) - } - return nil -} - -// AuthorizeSSHSign returns the list of SignOption for a SignSSH request. -func (p *Azure) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - if !p.claimer.IsSSHCAEnabled() { - return nil, errs.Unauthorized("azure.AuthorizeSSHSign; sshCA is disabled for provisioner '%s'", p.GetName()) - } - - _, name, _, err := p.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "azure.AuthorizeSSHSign") - } - - signOptions := []SignOption{} - - // Enforce host certificate. - defaults := SignSSHOptions{ - CertType: SSHHostCert, - } - - // Validated principals. - principals := []string{name} - - // Only enforce known principals if disable custom sans is true. - if p.DisableCustomSANs { - defaults.Principals = principals - } else { - // Check that at least one principal is sent in the request. - signOptions = append(signOptions, &sshCertOptionsRequireValidator{ - Principals: true, - }) - } - - // Certificate templates. - data := sshutil.CreateTemplateData(sshutil.HostCert, name, principals) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := CustomSSHTemplateOptions(p.Options, data, sshutil.DefaultIIDTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "azure.AuthorizeSSHSign") - } - signOptions = append(signOptions, templateOptions) - - return append(signOptions, - // Validate user SignSSHOptions. - sshCertOptionsValidator(defaults), - // Set the validity bounds if not set. - &sshDefaultDuration{p.claimer}, - // Validate public key - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{p.claimer}, - // Require all the fields in the SSH certificate - &sshCertDefaultValidator{}, - ), nil -} - -// assertConfig initializes the config if it has not been initialized -func (p *Azure) assertConfig() { - if p.config == nil { - p.config = newAzureConfig(p.TenantID) - } -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/claims.go b/vendor/github.com/smallstep/certificates/authority/provisioner/claims.go deleted file mode 100644 index 629a313c..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/claims.go +++ /dev/null @@ -1,226 +0,0 @@ -package provisioner - -import ( - "time" - - "github.com/pkg/errors" - "golang.org/x/crypto/ssh" -) - -// Claims so that individual provisioners can override global claims. -type Claims struct { - // TLS CA properties - MinTLSDur *Duration `json:"minTLSCertDuration,omitempty"` - MaxTLSDur *Duration `json:"maxTLSCertDuration,omitempty"` - DefaultTLSDur *Duration `json:"defaultTLSCertDuration,omitempty"` - DisableRenewal *bool `json:"disableRenewal,omitempty"` - // SSH CA properties - MinUserSSHDur *Duration `json:"minUserSSHCertDuration,omitempty"` - MaxUserSSHDur *Duration `json:"maxUserSSHCertDuration,omitempty"` - DefaultUserSSHDur *Duration `json:"defaultUserSSHCertDuration,omitempty"` - MinHostSSHDur *Duration `json:"minHostSSHCertDuration,omitempty"` - MaxHostSSHDur *Duration `json:"maxHostSSHCertDuration,omitempty"` - DefaultHostSSHDur *Duration `json:"defaultHostSSHCertDuration,omitempty"` - EnableSSHCA *bool `json:"enableSSHCA,omitempty"` -} - -// Claimer is the type that controls claims. It provides an interface around the -// current claim and the global one. -type Claimer struct { - global Claims - claims *Claims -} - -// NewClaimer initializes a new claimer with the given claims. -func NewClaimer(claims *Claims, global Claims) (*Claimer, error) { - c := &Claimer{global: global, claims: claims} - return c, c.Validate() -} - -// Claims returns the merge of the inner and global claims. -func (c *Claimer) Claims() Claims { - disableRenewal := c.IsDisableRenewal() - enableSSHCA := c.IsSSHCAEnabled() - return Claims{ - MinTLSDur: &Duration{c.MinTLSCertDuration()}, - MaxTLSDur: &Duration{c.MaxTLSCertDuration()}, - DefaultTLSDur: &Duration{c.DefaultTLSCertDuration()}, - DisableRenewal: &disableRenewal, - MinUserSSHDur: &Duration{c.MinUserSSHCertDuration()}, - MaxUserSSHDur: &Duration{c.MaxUserSSHCertDuration()}, - DefaultUserSSHDur: &Duration{c.DefaultUserSSHCertDuration()}, - MinHostSSHDur: &Duration{c.MinHostSSHCertDuration()}, - MaxHostSSHDur: &Duration{c.MaxHostSSHCertDuration()}, - DefaultHostSSHDur: &Duration{c.DefaultHostSSHCertDuration()}, - EnableSSHCA: &enableSSHCA, - } -} - -// DefaultTLSCertDuration returns the default TLS cert duration for the -// provisioner. If the default is not set within the provisioner, then the global -// default from the authority configuration will be used. -func (c *Claimer) DefaultTLSCertDuration() time.Duration { - if c.claims == nil || c.claims.DefaultTLSDur == nil { - return c.global.DefaultTLSDur.Duration - } - return c.claims.DefaultTLSDur.Duration -} - -// MinTLSCertDuration returns the minimum TLS cert duration for the provisioner. -// If the minimum is not set within the provisioner, then the global -// minimum from the authority configuration will be used. -func (c *Claimer) MinTLSCertDuration() time.Duration { - if c.claims == nil || c.claims.MinTLSDur == nil { - if c.claims != nil && c.claims.DefaultTLSDur != nil && c.claims.DefaultTLSDur.Duration < c.global.MinTLSDur.Duration { - return c.claims.DefaultTLSDur.Duration - } - return c.global.MinTLSDur.Duration - } - return c.claims.MinTLSDur.Duration -} - -// MaxTLSCertDuration returns the maximum TLS cert duration for the provisioner. -// If the maximum is not set within the provisioner, then the global -// maximum from the authority configuration will be used. -func (c *Claimer) MaxTLSCertDuration() time.Duration { - if c.claims == nil || c.claims.MaxTLSDur == nil { - if c.claims != nil && c.claims.DefaultTLSDur != nil && c.claims.DefaultTLSDur.Duration > c.global.MaxTLSDur.Duration { - return c.claims.DefaultTLSDur.Duration - } - return c.global.MaxTLSDur.Duration - } - return c.claims.MaxTLSDur.Duration -} - -// IsDisableRenewal returns if the renewal flow is disabled for the -// provisioner. If the property is not set within the provisioner, then the -// global value from the authority configuration will be used. -func (c *Claimer) IsDisableRenewal() bool { - if c.claims == nil || c.claims.DisableRenewal == nil { - return *c.global.DisableRenewal - } - return *c.claims.DisableRenewal -} - -// DefaultSSHCertDuration returns the default SSH certificate duration for the -// given certificate type. -func (c *Claimer) DefaultSSHCertDuration(certType uint32) (time.Duration, error) { - switch certType { - case ssh.UserCert: - return c.DefaultUserSSHCertDuration(), nil - case ssh.HostCert: - return c.DefaultHostSSHCertDuration(), nil - case 0: - return 0, errors.New("ssh certificate type has not been set") - default: - return 0, errors.Errorf("ssh certificate has an unknown type: %d", certType) - } -} - -// DefaultUserSSHCertDuration returns the default SSH user cert duration for the -// provisioner. If the default is not set within the provisioner, then the -// global default from the authority configuration will be used. -func (c *Claimer) DefaultUserSSHCertDuration() time.Duration { - if c.claims == nil || c.claims.DefaultUserSSHDur == nil { - return c.global.DefaultUserSSHDur.Duration - } - return c.claims.DefaultUserSSHDur.Duration -} - -// MinUserSSHCertDuration returns the minimum SSH user cert duration for the -// provisioner. If the minimum is not set within the provisioner, then the -// global minimum from the authority configuration will be used. -func (c *Claimer) MinUserSSHCertDuration() time.Duration { - if c.claims == nil || c.claims.MinUserSSHDur == nil { - if c.claims != nil && c.claims.DefaultUserSSHDur != nil && c.claims.DefaultUserSSHDur.Duration < c.global.MinUserSSHDur.Duration { - return c.claims.DefaultUserSSHDur.Duration - } - return c.global.MinUserSSHDur.Duration - } - return c.claims.MinUserSSHDur.Duration -} - -// MaxUserSSHCertDuration returns the maximum SSH user cert duration for the -// provisioner. If the maximum is not set within the provisioner, then the -// global maximum from the authority configuration will be used. -func (c *Claimer) MaxUserSSHCertDuration() time.Duration { - if c.claims == nil || c.claims.MaxUserSSHDur == nil { - if c.claims != nil && c.claims.DefaultUserSSHDur != nil && c.claims.DefaultUserSSHDur.Duration > c.global.MaxUserSSHDur.Duration { - return c.claims.DefaultUserSSHDur.Duration - } - return c.global.MaxUserSSHDur.Duration - } - return c.claims.MaxUserSSHDur.Duration -} - -// DefaultHostSSHCertDuration returns the default SSH host cert duration for the -// provisioner. If the default is not set within the provisioner, then the -// global default from the authority configuration will be used. -func (c *Claimer) DefaultHostSSHCertDuration() time.Duration { - if c.claims == nil || c.claims.DefaultHostSSHDur == nil { - return c.global.DefaultHostSSHDur.Duration - } - return c.claims.DefaultHostSSHDur.Duration -} - -// MinHostSSHCertDuration returns the minimum SSH host cert duration for the -// provisioner. If the minimum is not set within the provisioner, then the -// global minimum from the authority configuration will be used. -func (c *Claimer) MinHostSSHCertDuration() time.Duration { - if c.claims == nil || c.claims.MinHostSSHDur == nil { - if c.claims != nil && c.claims.DefaultHostSSHDur != nil && c.claims.DefaultHostSSHDur.Duration < c.global.MinHostSSHDur.Duration { - return c.claims.DefaultHostSSHDur.Duration - } - return c.global.MinHostSSHDur.Duration - } - return c.claims.MinHostSSHDur.Duration -} - -// MaxHostSSHCertDuration returns the maximum SSH Host cert duration for the -// provisioner. If the maximum is not set within the provisioner, then the -// global maximum from the authority configuration will be used. -func (c *Claimer) MaxHostSSHCertDuration() time.Duration { - if c.claims == nil || c.claims.MaxHostSSHDur == nil { - if c.claims != nil && c.claims.DefaultHostSSHDur != nil && c.claims.DefaultHostSSHDur.Duration > c.global.MaxHostSSHDur.Duration { - return c.claims.DefaultHostSSHDur.Duration - } - return c.global.MaxHostSSHDur.Duration - } - return c.claims.MaxHostSSHDur.Duration -} - -// IsSSHCAEnabled returns if the SSH CA is enabled for the provisioner. If the -// property is not set within the provisioner, then the global value from the -// authority configuration will be used. -func (c *Claimer) IsSSHCAEnabled() bool { - if c.claims == nil || c.claims.EnableSSHCA == nil { - return *c.global.EnableSSHCA - } - return *c.claims.EnableSSHCA -} - -// Validate validates and modifies the Claims with default values. -func (c *Claimer) Validate() error { - var ( - min = c.MinTLSCertDuration() - max = c.MaxTLSCertDuration() - def = c.DefaultTLSCertDuration() - ) - switch { - case min <= 0: - return errors.Errorf("claims: MinTLSCertDuration must be greater than 0") - case max <= 0: - return errors.Errorf("claims: MaxTLSCertDuration must be greater than 0") - case def <= 0: - return errors.Errorf("claims: DefaultTLSCertDuration must be greater than 0") - case max < min: - return errors.Errorf("claims: MaxCertDuration cannot be less "+ - "than MinCertDuration: MaxCertDuration - %v, MinCertDuration - %v", max, min) - case def < min: - return errors.Errorf("claims: DefaultCertDuration cannot be less than MinCertDuration: DefaultCertDuration - %v, MinCertDuration - %v", def, min) - case max < def: - return errors.Errorf("claims: MaxCertDuration cannot be less than DefaultCertDuration: MaxCertDuration - %v, DefaultCertDuration - %v", max, def) - default: - return nil - } -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/collection.go b/vendor/github.com/smallstep/certificates/authority/provisioner/collection.go deleted file mode 100644 index 3ba98a23..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/collection.go +++ /dev/null @@ -1,351 +0,0 @@ -package provisioner - -import ( - "crypto/sha1" - "crypto/x509" - "encoding/asn1" - "encoding/binary" - "encoding/hex" - "fmt" - "net/url" - "sort" - "strings" - "sync" - - "github.com/smallstep/certificates/authority/admin" - "go.step.sm/crypto/jose" -) - -// DefaultProvisionersLimit is the default limit for listing provisioners. -const DefaultProvisionersLimit = 20 - -// DefaultProvisionersMax is the maximum limit for listing provisioners. -const DefaultProvisionersMax = 100 - -type uidProvisioner struct { - provisioner Interface - uid string -} - -type provisionerSlice []uidProvisioner - -func (p provisionerSlice) Len() int { return len(p) } -func (p provisionerSlice) Less(i, j int) bool { return p[i].uid < p[j].uid } -func (p provisionerSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// loadByTokenPayload is a payload used to extract the id used to load the -// provisioner. -type loadByTokenPayload struct { - jose.Claims - AuthorizedParty string `json:"azp"` // OIDC client id - TenantID string `json:"tid"` // Microsoft Azure tenant id -} - -// Collection is a memory map of provisioners. -type Collection struct { - byID *sync.Map - byKey *sync.Map - byName *sync.Map - byTokenID *sync.Map - sorted provisionerSlice - audiences Audiences -} - -// NewCollection initializes a collection of provisioners. The given list of -// audiences are the audiences used by the JWT provisioner. -func NewCollection(audiences Audiences) *Collection { - return &Collection{ - byID: new(sync.Map), - byKey: new(sync.Map), - byName: new(sync.Map), - byTokenID: new(sync.Map), - audiences: audiences, - } -} - -// Load a provisioner by the ID. -func (c *Collection) Load(id string) (Interface, bool) { - return loadProvisioner(c.byID, id) -} - -// LoadByName a provisioner by name. -func (c *Collection) LoadByName(name string) (Interface, bool) { - return loadProvisioner(c.byName, name) -} - -// LoadByTokenID a provisioner by identifier found in token. -// For different provisioner types this identifier may be found in in different -// attributes of the token. -func (c *Collection) LoadByTokenID(tokenProvisionerID string) (Interface, bool) { - return loadProvisioner(c.byTokenID, tokenProvisionerID) -} - -// LoadByToken parses the token claims and loads the provisioner associated. -func (c *Collection) LoadByToken(token *jose.JSONWebToken, claims *jose.Claims) (Interface, bool) { - var audiences []string - // Get all audiences with the given fragment - fragment := extractFragment(claims.Audience) - if fragment == "" { - audiences = c.audiences.All() - } else { - audiences = c.audiences.WithFragment(fragment).All() - } - - // match with server audiences - if matchesAudience(claims.Audience, audiences) { - // Use fragment to get provisioner name (GCP, AWS, SSHPOP) - if fragment != "" { - return c.LoadByTokenID(fragment) - } - // If matches with stored audiences it will be a JWT token (default), and - // the id would be :. - // TODO: is this ok? - return c.LoadByTokenID(claims.Issuer + ":" + token.Headers[0].KeyID) - } - - // The ID will be just the clientID stored in azp, aud or tid. - var payload loadByTokenPayload - if err := token.UnsafeClaimsWithoutVerification(&payload); err != nil { - return nil, false - } - - // Kubernetes Service Account tokens. - if payload.Issuer == k8sSAIssuer { - if p, ok := c.LoadByTokenID(K8sSAID); ok { - return p, ok - } - // Kubernetes service account provisioner not found - return nil, false - } - - // Audience is required for non k8sSA tokens. - if len(payload.Audience) == 0 { - return nil, false - } - - // Try with azp (OIDC) - if len(payload.AuthorizedParty) > 0 { - if p, ok := c.LoadByTokenID(payload.AuthorizedParty); ok { - return p, ok - } - } - // Try with tid (Azure) - if payload.TenantID != "" { - if p, ok := c.LoadByTokenID(payload.TenantID); ok { - return p, ok - } - } - // Fallback to aud - return c.LoadByTokenID(payload.Audience[0]) -} - -// LoadByCertificate looks for the provisioner extension and extracts the -// proper id to load the provisioner. -func (c *Collection) LoadByCertificate(cert *x509.Certificate) (Interface, bool) { - for _, e := range cert.Extensions { - if e.Id.Equal(stepOIDProvisioner) { - var provisioner stepProvisionerASN1 - if _, err := asn1.Unmarshal(e.Value, &provisioner); err != nil { - return nil, false - } - return c.LoadByName(string(provisioner.Name)) - } - } - - // Default to noop provisioner if an extension is not found. This allows to - // accept a renewal of a cert without the provisioner extension. - return &noop{}, true -} - -// LoadEncryptedKey returns an encrypted key by indexed by KeyID. At this moment -// only JWK encrypted keys are indexed by KeyID. -func (c *Collection) LoadEncryptedKey(keyID string) (string, bool) { - p, ok := loadProvisioner(c.byKey, keyID) - if !ok { - return "", false - } - _, key, ok := p.GetEncryptedKey() - return key, ok -} - -// Store adds a provisioner to the collection and enforces the uniqueness of -// provisioner IDs. -func (c *Collection) Store(p Interface) error { - // Store provisioner always in byID. ID must be unique. - if _, loaded := c.byID.LoadOrStore(p.GetID(), p); loaded { - return admin.NewError(admin.ErrorBadRequestType, - "cannot add multiple provisioners with the same id") - } - // Store provisioner always by name. - if _, loaded := c.byName.LoadOrStore(p.GetName(), p); loaded { - c.byID.Delete(p.GetID()) - return admin.NewError(admin.ErrorBadRequestType, - "cannot add multiple provisioners with the same name") - } - // Store provisioner always by ID presented in token. - if _, loaded := c.byTokenID.LoadOrStore(p.GetIDForToken(), p); loaded { - c.byID.Delete(p.GetID()) - c.byName.Delete(p.GetName()) - return admin.NewError(admin.ErrorBadRequestType, - "cannot add multiple provisioners with the same token identifier") - } - - // Store provisioner in byKey if EncryptedKey is defined. - if kid, _, ok := p.GetEncryptedKey(); ok { - c.byKey.Store(kid, p) - } - - // Store sorted provisioners. - // Use the first 4 bytes (32bit) of the sum to insert the order - // Using big endian format to get the strings sorted: - // 0x00000000, 0x00000001, 0x00000002, ... - bi := make([]byte, 4) - sum := provisionerSum(p) - binary.BigEndian.PutUint32(bi, uint32(c.sorted.Len())) - sum[0], sum[1], sum[2], sum[3] = bi[0], bi[1], bi[2], bi[3] - c.sorted = append(c.sorted, uidProvisioner{ - provisioner: p, - uid: hex.EncodeToString(sum), - }) - sort.Sort(c.sorted) - return nil -} - -// Remove deletes an provisioner from all associated collections and lists. -func (c *Collection) Remove(id string) error { - prov, ok := c.Load(id) - if !ok { - return admin.NewError(admin.ErrorNotFoundType, "provisioner %s not found", id) - } - - var found bool - for i, elem := range c.sorted { - if elem.provisioner.GetID() == id { - // Remove index in sorted list - copy(c.sorted[i:], c.sorted[i+1:]) // Shift a[i+1:] left one index. - c.sorted[len(c.sorted)-1] = uidProvisioner{} // Erase last element (write zero value). - c.sorted = c.sorted[:len(c.sorted)-1] // Truncate slice. - found = true - break - } - } - if !found { - return admin.NewError(admin.ErrorNotFoundType, "provisioner %s not found in sorted list", prov.GetName()) - } - - c.byID.Delete(id) - c.byName.Delete(prov.GetName()) - c.byTokenID.Delete(prov.GetIDForToken()) - if kid, _, ok := prov.GetEncryptedKey(); ok { - c.byKey.Delete(kid) - } - - return nil -} - -// Update updates the given provisioner in all related lists and collections. -func (c *Collection) Update(nu Interface) error { - old, ok := c.Load(nu.GetID()) - if !ok { - return admin.NewError(admin.ErrorNotFoundType, "provisioner %s not found", nu.GetID()) - } - - if old.GetName() != nu.GetName() { - if _, ok := c.LoadByName(nu.GetName()); ok { - return admin.NewError(admin.ErrorBadRequestType, - "provisioner with name %s already exists", nu.GetName()) - } - } - if old.GetIDForToken() != nu.GetIDForToken() { - if _, ok := c.LoadByTokenID(nu.GetIDForToken()); ok { - return admin.NewError(admin.ErrorBadRequestType, - "provisioner with Token ID %s already exists", nu.GetIDForToken()) - } - } - - if err := c.Remove(old.GetID()); err != nil { - return err - } - - return c.Store(nu) -} - -// Find implements pagination on a list of sorted provisioners. -func (c *Collection) Find(cursor string, limit int) (List, string) { - switch { - case limit <= 0: - limit = DefaultProvisionersLimit - case limit > DefaultProvisionersMax: - limit = DefaultProvisionersMax - } - - n := c.sorted.Len() - cursor = fmt.Sprintf("%040s", cursor) - i := sort.Search(n, func(i int) bool { return c.sorted[i].uid >= cursor }) - - slice := List{} - for ; i < n && len(slice) < limit; i++ { - slice = append(slice, c.sorted[i].provisioner) - } - - if i < n { - return slice, strings.TrimLeft(c.sorted[i].uid, "0") - } - return slice, "" -} - -func loadProvisioner(m *sync.Map, key string) (Interface, bool) { - i, ok := m.Load(key) - if !ok { - return nil, false - } - p, ok := i.(Interface) - if !ok { - return nil, false - } - return p, true -} - -// provisionerSum returns the SHA1 of the provisioners ID. From this we will -// create the unique and sorted id. -func provisionerSum(p Interface) []byte { - sum := sha1.Sum([]byte(p.GetID())) - return sum[:] -} - -// matchesAudience returns true if A and B share at least one element. -func matchesAudience(as, bs []string) bool { - if len(bs) == 0 || len(as) == 0 { - return false - } - - for _, b := range bs { - for _, a := range as { - if b == a || stripPort(a) == stripPort(b) { - return true - } - } - } - return false -} - -// stripPort attempts to strip the port from the given url. If parsing the url -// produces errors it will just return the passed argument. -func stripPort(rawurl string) string { - u, err := url.Parse(rawurl) - if err != nil { - return rawurl - } - u.Host = u.Hostname() - return u.String() -} - -// extractFragment extracts the first fragment of an audience url. -func extractFragment(audience []string) string { - for _, s := range audience { - if u, err := url.Parse(s); err == nil && u.Fragment != "" { - return u.Fragment - } - } - return "" -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/duration.go b/vendor/github.com/smallstep/certificates/authority/provisioner/duration.go deleted file mode 100644 index 1d8d8f7b..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/duration.go +++ /dev/null @@ -1,63 +0,0 @@ -package provisioner - -import ( - "encoding/json" - "time" - - "github.com/pkg/errors" -) - -// Duration is a wrapper around Time.Duration to aid with marshal/unmarshal. -type Duration struct { - time.Duration -} - -// NewDuration parses a duration string and returns a Duration type or an error -// if the given string is not a duration. -func NewDuration(s string) (*Duration, error) { - d, err := time.ParseDuration(s) - if err != nil { - return nil, errors.Wrapf(err, "error parsing %s as duration", s) - } - return &Duration{Duration: d}, nil -} - -// MarshalJSON parses a duration string and sets it to the duration. -// -// A duration string is a possibly signed sequence of decimal numbers, each with -// optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". -// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -func (d *Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Duration.String()) -} - -// UnmarshalJSON parses a duration string and sets it to the duration. -// -// A duration string is a possibly signed sequence of decimal numbers, each with -// optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". -// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". -func (d *Duration) UnmarshalJSON(data []byte) (err error) { - var ( - s string - dd time.Duration - ) - if d == nil { - return errors.New("duration cannot be nil") - } - if err = json.Unmarshal(data, &s); err != nil { - return errors.Wrapf(err, "error unmarshaling %s", data) - } - if dd, err = time.ParseDuration(s); err != nil { - return errors.Wrapf(err, "error parsing %s as duration", s) - } - d.Duration = dd - return -} - -// Value returns 0 if the duration is null, the inner duration otherwise. -func (d *Duration) Value() time.Duration { - if d == nil { - return 0 - } - return d.Duration -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/gcp.go b/vendor/github.com/smallstep/certificates/authority/provisioner/gcp.go deleted file mode 100644 index 1b599fb3..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/gcp.go +++ /dev/null @@ -1,440 +0,0 @@ -package provisioner - -import ( - "bytes" - "context" - "crypto/sha256" - "crypto/x509" - "encoding/hex" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/sshutil" - "go.step.sm/crypto/x509util" -) - -// gcpCertsURL is the url that serves Google OAuth2 public keys. -const gcpCertsURL = "https://www.googleapis.com/oauth2/v3/certs" - -// gcpIdentityURL is the base url for the identity document in GCP. -const gcpIdentityURL = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity" - -// gcpPayload extends jwt.Claims with custom GCP attributes. -type gcpPayload struct { - jose.Claims - AuthorizedParty string `json:"azp"` - Email string `json:"email"` - EmailVerified bool `json:"email_verified"` - Google gcpGooglePayload `json:"google"` -} - -type gcpGooglePayload struct { - ComputeEngine gcpComputeEnginePayload `json:"compute_engine"` -} - -type gcpComputeEnginePayload struct { - InstanceID string `json:"instance_id"` - InstanceName string `json:"instance_name"` - InstanceCreationTimestamp *jose.NumericDate `json:"instance_creation_timestamp"` - ProjectID string `json:"project_id"` - ProjectNumber int64 `json:"project_number"` - Zone string `json:"zone"` - LicenseID []string `json:"license_id"` -} - -type gcpConfig struct { - CertsURL string - IdentityURL string -} - -func newGCPConfig() *gcpConfig { - return &gcpConfig{ - CertsURL: gcpCertsURL, - IdentityURL: gcpIdentityURL, - } -} - -// GCP is the provisioner that supports identity tokens created by the Google -// Cloud Platform metadata API. -// -// If DisableCustomSANs is true, only the internal DNS and IP will be added as a -// SAN. By default it will accept any SAN in the CSR. -// -// If DisableTrustOnFirstUse is true, multiple sign request for this provisioner -// with the same instance will be accepted. By default only the first request -// will be accepted. -// -// If InstanceAge is set, only the instances with an instance_creation_timestamp -// within the given period will be accepted. -// -// Google Identity docs are available at -// https://cloud.google.com/compute/docs/instances/verifying-instance-identity -type GCP struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - ServiceAccounts []string `json:"serviceAccounts"` - ProjectIDs []string `json:"projectIDs"` - DisableCustomSANs bool `json:"disableCustomSANs"` - DisableTrustOnFirstUse bool `json:"disableTrustOnFirstUse"` - InstanceAge Duration `json:"instanceAge,omitempty"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - claimer *Claimer - config *gcpConfig - keyStore *keyStore - audiences Audiences -} - -// GetID returns the provisioner unique identifier. The name should uniquely -// identify any GCP provisioner. -func (p *GCP) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() - -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *GCP) GetIDForToken() string { - return "gcp/" + p.Name -} - -// GetTokenID returns the identifier of the token. The default value for GCP the -// SHA256 of "provisioner_id.instance_id", but if DisableTrustOnFirstUse is set -// to true, then it will be the SHA256 of the token. -func (p *GCP) GetTokenID(token string) (string, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return "", errors.Wrap(err, "error parsing token") - } - - // If TOFU is disabled create an ID for the token, so it cannot be reused. - if p.DisableTrustOnFirstUse { - sum := sha256.Sum256([]byte(token)) - return strings.ToLower(hex.EncodeToString(sum[:])), nil - } - - // Get claims w/out verification. - var claims gcpPayload - if err = jwt.UnsafeClaimsWithoutVerification(&claims); err != nil { - return "", errors.Wrap(err, "error verifying claims") - } - - // Create unique ID for Trust On First Use (TOFU). Only the first instance - // per provisioner is allowed as we don't have a way to trust the given - // sans. - unique := fmt.Sprintf("%s.%s", p.GetIDForToken(), claims.Google.ComputeEngine.InstanceID) - sum := sha256.Sum256([]byte(unique)) - return strings.ToLower(hex.EncodeToString(sum[:])), nil -} - -// GetName returns the name of the provisioner. -func (p *GCP) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *GCP) GetType() Type { - return TypeGCP -} - -// GetEncryptedKey is not available in a GCP provisioner. -func (p *GCP) GetEncryptedKey() (kid string, key string, ok bool) { - return "", "", false -} - -// GetIdentityURL returns the url that generates the GCP token. -func (p *GCP) GetIdentityURL(audience string) string { - // Initialize config if required - p.assertConfig() - - q := url.Values{} - q.Add("audience", audience) - q.Add("format", "full") - q.Add("licenses", "FALSE") - return fmt.Sprintf("%s?%s", p.config.IdentityURL, q.Encode()) -} - -// GetIdentityToken does an HTTP request to the identity url. -func (p *GCP) GetIdentityToken(subject, caURL string) (string, error) { - audience, err := generateSignAudience(caURL, p.GetIDForToken()) - if err != nil { - return "", err - } - - req, err := http.NewRequest("GET", p.GetIdentityURL(audience), http.NoBody) - if err != nil { - return "", errors.Wrap(err, "error creating identity request") - } - req.Header.Set("Metadata-Flavor", "Google") - resp, err := http.DefaultClient.Do(req) - if err != nil { - return "", errors.Wrap(err, "error doing identity request, are you in a GCP VM?") - } - defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", errors.Wrap(err, "error on identity request") - } - if resp.StatusCode >= 400 { - return "", errors.Errorf("error on identity request: status=%d, response=%s", resp.StatusCode, b) - } - return string(bytes.TrimSpace(b)), nil -} - -// Init validates and initializes the GCP provisioner. -func (p *GCP) Init(config Config) error { - var err error - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - case p.InstanceAge.Value() < 0: - return errors.New("provisioner instanceAge cannot be negative") - } - // Initialize config - p.assertConfig() - // Update claims with global ones - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - // Initialize key store - p.keyStore, err = newKeyStore(p.config.CertsURL) - if err != nil { - return err - } - - p.audiences = config.Audiences.WithFragment(p.GetIDForToken()) - return nil -} - -// AuthorizeSign validates the given token and returns the sign options that -// will be used on certificate creation. -func (p *GCP) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - claims, err := p.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "gcp.AuthorizeSign") - } - - ce := claims.Google.ComputeEngine - - // Template options - data := x509util.NewTemplateData() - data.SetCommonName(ce.InstanceName) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - // Enforce known common name and default DNS if configured. - // By default we we'll accept the CN and SANs in the CSR. - // There's no way to trust them other than TOFU. - var so []SignOption - if p.DisableCustomSANs { - dnsName1 := fmt.Sprintf("%s.c.%s.internal", ce.InstanceName, ce.ProjectID) - dnsName2 := fmt.Sprintf("%s.%s.c.%s.internal", ce.InstanceName, ce.Zone, ce.ProjectID) - so = append(so, commonNameSliceValidator([]string{ - ce.InstanceName, ce.InstanceID, dnsName1, dnsName2, - })) - so = append(so, dnsNamesValidator([]string{ - dnsName1, dnsName2, - })) - so = append(so, ipAddressesValidator(nil)) - so = append(so, emailAddressesValidator(nil)) - so = append(so, urisValidator(nil)) - - // Template SANs - data.SetSANs([]string{dnsName1, dnsName2}) - } - - templateOptions, err := CustomTemplateOptions(p.Options, data, x509util.DefaultIIDLeafTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "gcp.AuthorizeSign") - } - - return append(so, - templateOptions, - // modifiers / withOptions - newProvisionerExtensionOption(TypeGCP, p.Name, claims.Subject, "InstanceID", ce.InstanceID, "InstanceName", ce.InstanceName), - profileDefaultDuration(p.claimer.DefaultTLSCertDuration()), - // validators - defaultPublicKeyValidator{}, - newValidityValidator(p.claimer.MinTLSCertDuration(), p.claimer.MaxTLSCertDuration()), - ), nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -func (p *GCP) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if p.claimer.IsDisableRenewal() { - return errs.Unauthorized("gcp.AuthorizeRenew; renew is disabled for gcp provisioner '%s'", p.GetName()) - } - return nil -} - -// assertConfig initializes the config if it has not been initialized. -func (p *GCP) assertConfig() { - if p.config == nil { - p.config = newGCPConfig() - } -} - -// authorizeToken performs common jwt authorization actions and returns the -// claims for case specific downstream parsing. -// e.g. a Sign request will auth/validate different fields than a Revoke request. -func (p *GCP) authorizeToken(token string) (*gcpPayload, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "gcp.authorizeToken; error parsing gcp token") - } - if len(jwt.Headers) == 0 { - return nil, errs.Unauthorized("gcp.authorizeToken; error parsing gcp token - header is missing") - } - - var found bool - var claims gcpPayload - kid := jwt.Headers[0].KeyID - keys := p.keyStore.Get(kid) - for _, key := range keys { - if err := jwt.Claims(key.Public(), &claims); err == nil { - found = true - break - } - } - if !found { - return nil, errs.Unauthorized("gcp.authorizeToken; failed to validate gcp token payload - cannot find key for kid %s", kid) - } - - // According to "rfc7519 JSON Web Token" acceptable skew should be no - // more than a few minutes. - now := time.Now().UTC() - if err = claims.ValidateWithLeeway(jose.Expected{ - Issuer: "https://accounts.google.com", - Time: now, - }, time.Minute); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "gcp.authorizeToken; invalid gcp token payload") - } - - // validate audiences with the defaults - if !matchesAudience(claims.Audience, p.audiences.Sign) { - return nil, errs.Unauthorized("gcp.authorizeToken; invalid gcp token - invalid audience claim (aud)") - } - - // validate subject (service account) - if len(p.ServiceAccounts) > 0 { - var found bool - for _, sa := range p.ServiceAccounts { - if sa == claims.Subject || sa == claims.Email { - found = true - break - } - } - if !found { - return nil, errs.Unauthorized("gcp.authorizeToken; invalid gcp token - invalid subject claim") - } - } - - // validate projects - if len(p.ProjectIDs) > 0 { - var found bool - for _, pi := range p.ProjectIDs { - if pi == claims.Google.ComputeEngine.ProjectID { - found = true - break - } - } - if !found { - return nil, errs.Unauthorized("gcp.authorizeToken; invalid gcp token - invalid project id") - } - } - - // validate instance age - if d := p.InstanceAge.Value(); d > 0 { - if now.Sub(claims.Google.ComputeEngine.InstanceCreationTimestamp.Time()) > d { - return nil, errs.Unauthorized("gcp.authorizeToken; token google.compute_engine.instance_creation_timestamp is too old") - } - } - - switch { - case claims.Google.ComputeEngine.InstanceID == "": - return nil, errs.Unauthorized("gcp.authorizeToken; gcp token google.compute_engine.instance_id cannot be empty") - case claims.Google.ComputeEngine.InstanceName == "": - return nil, errs.Unauthorized("gcp.authorizeToken; gcp token google.compute_engine.instance_name cannot be empty") - case claims.Google.ComputeEngine.ProjectID == "": - return nil, errs.Unauthorized("gcp.authorizeToken; gcp token google.compute_engine.project_id cannot be empty") - case claims.Google.ComputeEngine.Zone == "": - return nil, errs.Unauthorized("gcp.authorizeToken; gcp token google.compute_engine.zone cannot be empty") - } - - return &claims, nil -} - -// AuthorizeSSHSign returns the list of SignOption for a SignSSH request. -func (p *GCP) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - if !p.claimer.IsSSHCAEnabled() { - return nil, errs.Unauthorized("gcp.AuthorizeSSHSign; sshCA is disabled for gcp provisioner '%s'", p.GetName()) - } - claims, err := p.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "gcp.AuthorizeSSHSign") - } - - ce := claims.Google.ComputeEngine - signOptions := []SignOption{} - - // Enforce host certificate. - defaults := SignSSHOptions{ - CertType: SSHHostCert, - } - - // Validated principals. - principals := []string{ - fmt.Sprintf("%s.c.%s.internal", ce.InstanceName, ce.ProjectID), - fmt.Sprintf("%s.%s.c.%s.internal", ce.InstanceName, ce.Zone, ce.ProjectID), - } - - // Only enforce known principals if disable custom sans is true. - if p.DisableCustomSANs { - defaults.Principals = principals - } else { - // Check that at least one principal is sent in the request. - signOptions = append(signOptions, &sshCertOptionsRequireValidator{ - Principals: true, - }) - } - - // Certificate templates. - data := sshutil.CreateTemplateData(sshutil.HostCert, ce.InstanceName, principals) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := CustomSSHTemplateOptions(p.Options, data, sshutil.DefaultIIDTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "gcp.AuthorizeSSHSign") - } - signOptions = append(signOptions, templateOptions) - - return append(signOptions, - // Validate user SignSSHOptions. - sshCertOptionsValidator(defaults), - // Set the validity bounds if not set. - &sshDefaultDuration{p.claimer}, - // Validate public key - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{p.claimer}, - // Require all the fields in the SSH certificate - &sshCertDefaultValidator{}, - ), nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/jwk.go b/vendor/github.com/smallstep/certificates/authority/provisioner/jwk.go deleted file mode 100644 index 56768fb7..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/jwk.go +++ /dev/null @@ -1,278 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/x509" - "net/http" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/sshutil" - "go.step.sm/crypto/x509util" -) - -// jwtPayload extends jwt.Claims with step attributes. -type jwtPayload struct { - jose.Claims - SANs []string `json:"sans,omitempty"` - Step *stepPayload `json:"step,omitempty"` -} - -type stepPayload struct { - SSH *SignSSHOptions `json:"ssh,omitempty"` -} - -// JWK is the default provisioner, an entity that can sign tokens necessary for -// signature requests. -type JWK struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - Key *jose.JSONWebKey `json:"key"` - EncryptedKey string `json:"encryptedKey,omitempty"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - claimer *Claimer - audiences Audiences -} - -// GetID returns the provisioner unique identifier. The name and credential id -// should uniquely identify any JWK provisioner. -func (p *JWK) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *JWK) GetIDForToken() string { - return p.Name + ":" + p.Key.KeyID -} - -// GetTokenID returns the identifier of the token. -func (p *JWK) GetTokenID(ott string) (string, error) { - // Validate payload - token, err := jose.ParseSigned(ott) - if err != nil { - return "", errors.Wrap(err, "error parsing token") - } - - // Get claims w/out verification. We need to look up the provisioner - // key in order to verify the claims and we need the issuer from the claims - // before we can look up the provisioner. - var claims jose.Claims - if err = token.UnsafeClaimsWithoutVerification(&claims); err != nil { - return "", errors.Wrap(err, "error verifying claims") - } - return claims.ID, nil -} - -// GetName returns the name of the provisioner. -func (p *JWK) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *JWK) GetType() Type { - return TypeJWK -} - -// GetEncryptedKey returns the base provisioner encrypted key if it's defined. -func (p *JWK) GetEncryptedKey() (string, string, bool) { - return p.Key.KeyID, p.EncryptedKey, len(p.EncryptedKey) > 0 -} - -// Init initializes and validates the fields of a JWK type. -func (p *JWK) Init(config Config) (err error) { - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - case p.Key == nil: - return errors.New("provisioner key cannot be empty") - } - - // Update claims with global ones - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - - p.audiences = config.Audiences - return err -} - -// authorizeToken performs common jwt authorization actions and returns the -// claims for case specific downstream parsing. -// e.g. a Sign request will auth/validate different fields than a Revoke request. -func (p *JWK) authorizeToken(token string, audiences []string) (*jwtPayload, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "jwk.authorizeToken; error parsing jwk token") - } - - var claims jwtPayload - if err = jwt.Claims(p.Key, &claims); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "jwk.authorizeToken; error parsing jwk claims") - } - - // According to "rfc7519 JSON Web Token" acceptable skew should be no - // more than a few minutes. - if err = claims.ValidateWithLeeway(jose.Expected{ - Issuer: p.Name, - Time: time.Now().UTC(), - }, time.Minute); err != nil { - return nil, errs.Wrapf(http.StatusUnauthorized, err, "jwk.authorizeToken; invalid jwk claims") - } - - // validate audiences with the defaults - if !matchesAudience(claims.Audience, audiences) { - return nil, errs.Unauthorized("jwk.authorizeToken; invalid jwk token audience claim (aud); want %s, but got %s", - audiences, claims.Audience) - } - - if claims.Subject == "" { - return nil, errs.Unauthorized("jwk.authorizeToken; jwk token subject cannot be empty") - } - - return &claims, nil -} - -// AuthorizeRevoke returns an error if the provisioner does not have rights to -// revoke the certificate with serial number in the `sub` property. -func (p *JWK) AuthorizeRevoke(ctx context.Context, token string) error { - _, err := p.authorizeToken(token, p.audiences.Revoke) - return errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeRevoke") -} - -// AuthorizeSign validates the given token. -func (p *JWK) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - claims, err := p.authorizeToken(token, p.audiences.Sign) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeSign") - } - - // NOTE: This is for backwards compatibility with older versions of cli - // and certificates. Older versions added the token subject as the only SAN - // in a CSR by default. - if len(claims.SANs) == 0 { - claims.SANs = []string{claims.Subject} - } - - // Certificate templates - data := x509util.CreateTemplateData(claims.Subject, claims.SANs) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := TemplateOptions(p.Options, data) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeSign") - } - - return []SignOption{ - templateOptions, - // modifiers / withOptions - newProvisionerExtensionOption(TypeJWK, p.Name, p.Key.KeyID), - profileDefaultDuration(p.claimer.DefaultTLSCertDuration()), - // validators - commonNameValidator(claims.Subject), - defaultPublicKeyValidator{}, - defaultSANsValidator(claims.SANs), - newValidityValidator(p.claimer.MinTLSCertDuration(), p.claimer.MaxTLSCertDuration()), - }, nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -// NOTE: This method does not actually validate the certificate or check it's -// revocation status. Just confirms that the provisioner that created the -// certificate was configured to allow renewals. -func (p *JWK) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if p.claimer.IsDisableRenewal() { - return errs.Unauthorized("jwk.AuthorizeRenew; renew is disabled for jwk provisioner '%s'", p.GetName()) - } - return nil -} - -// AuthorizeSSHSign returns the list of SignOption for a SignSSH request. -func (p *JWK) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - if !p.claimer.IsSSHCAEnabled() { - return nil, errs.Unauthorized("jwk.AuthorizeSSHSign; sshCA is disabled for jwk provisioner '%s'", p.GetName()) - } - claims, err := p.authorizeToken(token, p.audiences.SSHSign) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeSSHSign") - } - if claims.Step == nil || claims.Step.SSH == nil { - return nil, errs.Unauthorized("jwk.AuthorizeSSHSign; jwk token must be an SSH provisioning token") - } - - opts := claims.Step.SSH - signOptions := []SignOption{ - // validates user's SignSSHOptions with the ones in the token - sshCertOptionsValidator(*opts), - // validate users's KeyID is the token subject. - sshCertOptionsValidator(SignSSHOptions{KeyID: claims.Subject}), - } - - // Default template attributes. - certType := sshutil.UserCert - keyID := claims.Subject - principals := []string{claims.Subject} - - // Use options in the token. - if opts.CertType != "" { - if certType, err = sshutil.CertTypeFromString(opts.CertType); err != nil { - return nil, errs.Wrap(http.StatusBadRequest, err, "jwk.AuthorizeSSHSign") - } - } - if opts.KeyID != "" { - keyID = opts.KeyID - } - if len(opts.Principals) > 0 { - principals = opts.Principals - } - - // Certificate templates. - data := sshutil.CreateTemplateData(certType, keyID, principals) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := TemplateSSHOptions(p.Options, data) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeSign") - } - signOptions = append(signOptions, templateOptions) - - // Add modifiers from custom claims - t := now() - if !opts.ValidAfter.IsZero() { - signOptions = append(signOptions, sshCertValidAfterModifier(opts.ValidAfter.RelativeTime(t).Unix())) - } - if !opts.ValidBefore.IsZero() { - signOptions = append(signOptions, sshCertValidBeforeModifier(opts.ValidBefore.RelativeTime(t).Unix())) - } - - return append(signOptions, - // Set the validity bounds if not set. - &sshDefaultDuration{p.claimer}, - // Validate public key - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{p.claimer}, - // Require and validate all the default fields in the SSH certificate. - &sshCertDefaultValidator{}, - ), nil -} - -// AuthorizeSSHRevoke returns nil if the token is valid, false otherwise. -func (p *JWK) AuthorizeSSHRevoke(ctx context.Context, token string) error { - _, err := p.authorizeToken(token, p.audiences.SSHRevoke) - return errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeSSHRevoke") -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/k8sSA.go b/vendor/github.com/smallstep/certificates/authority/provisioner/k8sSA.go deleted file mode 100644 index d260f5ec..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/k8sSA.go +++ /dev/null @@ -1,317 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "net/http" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/pemutil" - "go.step.sm/crypto/sshutil" - "go.step.sm/crypto/x509util" -) - -// NOTE: There can be at most one kubernetes service account provisioner configured -// per instance of step-ca. This is due to a lack of distinguishing information -// contained in kubernetes service account tokens. - -const ( - // K8sSAName is the default name used for kubernetes service account provisioners. - K8sSAName = "k8sSA-default" - // K8sSAID is the default ID for kubernetes service account provisioners. - K8sSAID = "k8ssa/" + K8sSAName - k8sSAIssuer = "kubernetes/serviceaccount" -) - -// jwtPayload extends jwt.Claims with step attributes. -type k8sSAPayload struct { - jose.Claims - Namespace string `json:"kubernetes.io/serviceaccount/namespace,omitempty"` - SecretName string `json:"kubernetes.io/serviceaccount/secret.name,omitempty"` - ServiceAccountName string `json:"kubernetes.io/serviceaccount/service-account.name,omitempty"` - ServiceAccountUID string `json:"kubernetes.io/serviceaccount/service-account.uid,omitempty"` -} - -// K8sSA represents a Kubernetes ServiceAccount provisioner; an -// entity trusted to make signature requests. -type K8sSA struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - PubKeys []byte `json:"publicKeys,omitempty"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - claimer *Claimer - audiences Audiences - //kauthn kauthn.AuthenticationV1Interface - pubKeys []interface{} -} - -// GetID returns the provisioner unique identifier. The name and credential id -// should uniquely identify any K8sSA provisioner. -func (p *K8sSA) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *K8sSA) GetIDForToken() string { - return K8sSAID -} - -// GetTokenID returns an unimplemented error and does not use the input ott. -func (p *K8sSA) GetTokenID(ott string) (string, error) { - return "", errors.New("not implemented") -} - -// GetName returns the name of the provisioner. -func (p *K8sSA) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *K8sSA) GetType() Type { - return TypeK8sSA -} - -// GetEncryptedKey returns false, because the kubernetes provisioner does not -// have access to the private key. -func (p *K8sSA) GetEncryptedKey() (string, string, bool) { - return "", "", false -} - -// Init initializes and validates the fields of a K8sSA type. -func (p *K8sSA) Init(config Config) (err error) { - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - } - - if p.PubKeys != nil { - var ( - block *pem.Block - rest = p.PubKeys - ) - for rest != nil { - block, rest = pem.Decode(rest) - if block == nil { - break - } - key, err := pemutil.ParseKey(pem.EncodeToMemory(block)) - if err != nil { - return errors.Wrapf(err, "error parsing public key in provisioner '%s'", p.GetName()) - } - switch q := key.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: - default: - return errors.Errorf("Unexpected public key type %T in provisioner '%s'", q, p.GetName()) - } - p.pubKeys = append(p.pubKeys, key) - } - } else { - // TODO: Use the TokenReview API if no pub keys provided. This will need to - // be configured with additional attributes in the K8sSA struct for - // connecting to the kubernetes API server. - return errors.New("K8s Service Account provisioner cannot be initialized without pub keys") - } - /* - // NOTE: Not sure if we should be doing this initialization here ... - // If you have a k8sSA provisioner defined in your config, but you're not - // in a kubernetes pod then your CA will fail to startup. Maybe we just postpone - // creating the authn until token validation time? - if err := checkAccess(k8s.AuthorizationV1()); err != nil { - return errors.Wrapf(err, "error verifying access to kubernetes authz service for provisioner %s", p.GetID()) - } - - p.kauthn = k8s.AuthenticationV1() - */ - - // Update claims with global ones - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - - p.audiences = config.Audiences - return err -} - -// authorizeToken performs common jwt authorization actions and returns the -// claims for case specific downstream parsing. -// e.g. a Sign request will auth/validate different fields than a Revoke request. -func (p *K8sSA) authorizeToken(token string, audiences []string) (*k8sSAPayload, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, - "k8ssa.authorizeToken; error parsing k8sSA token") - } - - var ( - valid bool - claims k8sSAPayload - ) - if p.pubKeys == nil { - return nil, errs.Unauthorized("k8ssa.authorizeToken; k8sSA TokenReview API integration not implemented") - /* NOTE: We plan to support the TokenReview API in a future release. - Below is some code that should be useful when we prioritize - this integration. - - tr := kauthnApi.TokenReview{Spec: kauthnApi.TokenReviewSpec{Token: string(token)}} - rvw, err := p.kauthn.TokenReviews().Create(&tr) - if err != nil { - return nil, errors.Wrap(err, "error using kubernetes TokenReview API") - } - if rvw.Status.Error != "" { - return nil, errors.Errorf("error from kubernetes TokenReviewAPI: %s", rvw.Status.Error) - } - if !rvw.Status.Authenticated { - return nil, errors.New("error from kubernetes TokenReviewAPI: token could not be authenticated") - } - if err = jwt.UnsafeClaimsWithoutVerification(&claims); err != nil { - return nil, errors.Wrap(err, "error parsing claims") - } - */ - } - for _, pk := range p.pubKeys { - if err = jwt.Claims(pk, &claims); err == nil { - valid = true - break - } - } - if !valid { - return nil, errs.Unauthorized("k8ssa.authorizeToken; error validating k8sSA token and extracting claims") - } - - // According to "rfc7519 JSON Web Token" acceptable skew should be no - // more than a few minutes. - if err = claims.Validate(jose.Expected{ - Issuer: k8sSAIssuer, - }); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "k8ssa.authorizeToken; invalid k8sSA token claims") - } - - if claims.Subject == "" { - return nil, errs.Unauthorized("k8ssa.authorizeToken; k8sSA token subject cannot be empty") - } - - return &claims, nil -} - -// AuthorizeRevoke returns an error if the provisioner does not have rights to -// revoke the certificate with serial number in the `sub` property. -func (p *K8sSA) AuthorizeRevoke(ctx context.Context, token string) error { - _, err := p.authorizeToken(token, p.audiences.Revoke) - return errs.Wrap(http.StatusInternalServerError, err, "k8ssa.AuthorizeRevoke") -} - -// AuthorizeSign validates the given token. -func (p *K8sSA) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - claims, err := p.authorizeToken(token, p.audiences.Sign) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "k8ssa.AuthorizeSign") - } - - // Add some values to use in custom templates. - data := x509util.NewTemplateData() - data.SetCommonName(claims.ServiceAccountName) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - // Certificate templates: on K8sSA the default template is the certificate - // request. - templateOptions, err := CustomTemplateOptions(p.Options, data, x509util.DefaultAdminLeafTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "k8ssa.AuthorizeSign") - } - - return []SignOption{ - templateOptions, - // modifiers / withOptions - newProvisionerExtensionOption(TypeK8sSA, p.Name, ""), - profileDefaultDuration(p.claimer.DefaultTLSCertDuration()), - // validators - defaultPublicKeyValidator{}, - newValidityValidator(p.claimer.MinTLSCertDuration(), p.claimer.MaxTLSCertDuration()), - }, nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -func (p *K8sSA) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if p.claimer.IsDisableRenewal() { - return errs.Unauthorized("k8ssa.AuthorizeRenew; renew is disabled for k8sSA provisioner '%s'", p.GetName()) - } - return nil -} - -// AuthorizeSSHSign validates an request for an SSH certificate. -func (p *K8sSA) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - if !p.claimer.IsSSHCAEnabled() { - return nil, errs.Unauthorized("k8ssa.AuthorizeSSHSign; sshCA is disabled for k8sSA provisioner '%s'", p.GetName()) - } - claims, err := p.authorizeToken(token, p.audiences.SSHSign) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "k8ssa.AuthorizeSSHSign") - } - - // Certificate templates. - // Set some default variables to be used in the templates. - data := sshutil.CreateTemplateData(sshutil.HostCert, claims.ServiceAccountName, []string{claims.ServiceAccountName}) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := CustomSSHTemplateOptions(p.Options, data, sshutil.CertificateRequestTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "k8ssa.AuthorizeSSHSign") - } - signOptions := []SignOption{templateOptions} - - return append(signOptions, - // Require type, key-id and principals in the SignSSHOptions. - &sshCertOptionsRequireValidator{CertType: true, KeyID: true, Principals: true}, - // Set the validity bounds if not set. - &sshDefaultDuration{p.claimer}, - // Validate public key - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{p.claimer}, - // Require and validate all the default fields in the SSH certificate. - &sshCertDefaultValidator{}, - ), nil -} - -/* -func checkAccess(authz kauthz.AuthorizationV1Interface) error { - r := &kauthzApi.SelfSubjectAccessReview{ - Spec: kauthzApi.SelfSubjectAccessReviewSpec{ - ResourceAttributes: &kauthzApi.ResourceAttributes{ - Group: "authentication.k8s.io", - Version: "v1", - Resource: "tokenreviews", - Verb: "create", - }, - }, - } - rvw, err := authz.SelfSubjectAccessReviews().Create(r) - if err != nil { - return err - } - if !rvw.Status.Allowed { - return fmt.Errorf("Unable to create kubernetes token reviews: %s", rvw.Status.Reason) - } - - return nil -} -*/ diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/keystore.go b/vendor/github.com/smallstep/certificates/authority/provisioner/keystore.go deleted file mode 100644 index f775e150..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/keystore.go +++ /dev/null @@ -1,147 +0,0 @@ -package provisioner - -import ( - "encoding/json" - "math/rand" - "net/http" - "regexp" - "strconv" - "sync" - "time" - - "github.com/pkg/errors" - "go.step.sm/crypto/jose" -) - -const ( - defaultCacheAge = 12 * time.Hour - defaultCacheJitter = 1 * time.Hour -) - -var maxAgeRegex = regexp.MustCompile("max-age=([0-9]+)") - -type keyStore struct { - sync.RWMutex - uri string - keySet jose.JSONWebKeySet - timer *time.Timer - expiry time.Time - jitter time.Duration -} - -func newKeyStore(uri string) (*keyStore, error) { - keys, age, err := getKeysFromJWKsURI(uri) - if err != nil { - return nil, err - } - ks := &keyStore{ - uri: uri, - keySet: keys, - expiry: getExpirationTime(age), - jitter: getCacheJitter(age), - } - next := ks.nextReloadDuration(age) - ks.timer = time.AfterFunc(next, ks.reload) - return ks, nil -} - -func (ks *keyStore) Close() { - ks.timer.Stop() -} - -func (ks *keyStore) Get(kid string) (keys []jose.JSONWebKey) { - ks.RLock() - // Force reload if expiration has passed - if time.Now().After(ks.expiry) { - ks.RUnlock() - ks.reload() - ks.RLock() - } - keys = ks.keySet.Key(kid) - ks.RUnlock() - return -} - -func (ks *keyStore) reload() { - var next time.Duration - keys, age, err := getKeysFromJWKsURI(ks.uri) - if err != nil { - next = ks.nextReloadDuration(ks.jitter / 2) - } else { - ks.Lock() - ks.keySet = keys - ks.expiry = getExpirationTime(age) - ks.jitter = getCacheJitter(age) - next = ks.nextReloadDuration(age) - ks.Unlock() - } - - ks.Lock() - ks.timer.Reset(next) - ks.Unlock() -} - -// nextReloadDuration would return the duration for the next rotation. If age is -// 0 it will randomly rotate between 0-12 hours, but every time we call to Get -// it will automatically rotate. -func (ks *keyStore) nextReloadDuration(age time.Duration) time.Duration { - n := rand.Int63n(int64(ks.jitter)) - age -= time.Duration(n) - return abs(age) -} - -func getKeysFromJWKsURI(uri string) (jose.JSONWebKeySet, time.Duration, error) { - var keys jose.JSONWebKeySet - resp, err := http.Get(uri) - if err != nil { - return keys, 0, errors.Wrapf(err, "failed to connect to %s", uri) - } - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(&keys); err != nil { - return keys, 0, errors.Wrapf(err, "error reading %s", uri) - } - return keys, getCacheAge(resp.Header.Get("cache-control")), nil -} - -func getCacheAge(cacheControl string) time.Duration { - age := defaultCacheAge - if len(cacheControl) > 0 { - match := maxAgeRegex.FindAllStringSubmatch(cacheControl, -1) - if len(match) > 0 { - if len(match[0]) == 2 { - maxAge := match[0][1] - maxAgeInt, err := strconv.ParseInt(maxAge, 10, 64) - if err != nil { - return defaultCacheAge - } - age = time.Duration(maxAgeInt) * time.Second - } - } - } - return age -} - -func getCacheJitter(age time.Duration) time.Duration { - switch { - case age > time.Hour: - return defaultCacheJitter - case age == 0: - // Avoids a 0 jitter. The duration is not important as it will rotate - // automatically on each Get request. - return defaultCacheJitter - default: - return age / 3 - } -} - -func getExpirationTime(age time.Duration) time.Time { - return time.Now().Truncate(time.Second).Add(age) -} - -// abs returns the absolute value of n. -func abs(n time.Duration) time.Duration { - if n < 0 { - return -n - } - return n -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/method.go b/vendor/github.com/smallstep/certificates/authority/provisioner/method.go deleted file mode 100644 index f5cd5221..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/method.go +++ /dev/null @@ -1,63 +0,0 @@ -package provisioner - -import ( - "context" -) - -// Method indicates the action to action that we will perform, it's used as part -// of the context in the call to authorize. It defaults to Sing. -type Method int - -// The key to save the Method in the context. -type methodKey struct{} - -const ( - // SignMethod is the method used to sign X.509 certificates. - SignMethod Method = iota - // RevokeMethod is the method used to revoke X.509 certificates. - RevokeMethod - // RenewMethod is the method used to renew X.509 certificates. - RenewMethod - // SSHSignMethod is the method used to sign SSH certificates. - SSHSignMethod - // SSHRenewMethod is the method used to renew SSH certificates. - SSHRenewMethod - // SSHRevokeMethod is the method used to revoke SSH certificates. - SSHRevokeMethod - // SSHRekeyMethod is the method used to rekey SSH certificates. - SSHRekeyMethod -) - -// String returns a string representation of the context method. -func (m Method) String() string { - switch m { - case SignMethod: - return "sign-method" - case RevokeMethod: - return "revoke-method" - case RenewMethod: - return "renew-method" - case SSHSignMethod: - return "ssh-sign-method" - case SSHRenewMethod: - return "ssh-renew-method" - case SSHRevokeMethod: - return "ssh-revoke-method" - case SSHRekeyMethod: - return "ssh-rekey-method" - default: - return "unknown" - } -} - -// NewContextWithMethod creates a new context from ctx and attaches method to -// it. -func NewContextWithMethod(ctx context.Context, method Method) context.Context { - return context.WithValue(ctx, methodKey{}, method) -} - -// MethodFromContext returns the Method saved in ctx. -func MethodFromContext(ctx context.Context) Method { - m, _ := ctx.Value(methodKey{}).(Method) - return m -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/noop.go b/vendor/github.com/smallstep/certificates/authority/provisioner/noop.go deleted file mode 100644 index 18a38331..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/noop.go +++ /dev/null @@ -1,66 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/x509" - - "golang.org/x/crypto/ssh" -) - -// noop provisioners is a provisioner that accepts anything. -type noop struct{} - -func (p *noop) GetID() string { - return "noop" -} - -func (p *noop) GetIDForToken() string { - return "noop" -} - -func (p *noop) GetTokenID(token string) (string, error) { - return "", nil -} - -func (p *noop) GetName() string { - return "noop" -} -func (p *noop) GetType() Type { - return noopType -} - -func (p *noop) GetEncryptedKey() (kid string, key string, ok bool) { - return "", "", false -} - -func (p *noop) Init(config Config) error { - return nil -} - -func (p *noop) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - return []SignOption{}, nil -} - -func (p *noop) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - return nil -} - -func (p *noop) AuthorizeRevoke(ctx context.Context, token string) error { - return nil -} - -func (p *noop) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - return []SignOption{}, nil -} - -func (p *noop) AuthorizeSSHRenew(ctx context.Context, token string) (*ssh.Certificate, error) { - return nil, nil -} - -func (p *noop) AuthorizeSSHRevoke(ctx context.Context, token string) error { - return nil -} - -func (p *noop) AuthorizeSSHRekey(ctx context.Context, token string) (*ssh.Certificate, []SignOption, error) { - return nil, []SignOption{}, nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/oidc.go b/vendor/github.com/smallstep/certificates/authority/provisioner/oidc.go deleted file mode 100644 index b6bca872..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/oidc.go +++ /dev/null @@ -1,490 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/x509" - "encoding/json" - "net" - "net/http" - "net/url" - "path" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/sshutil" - "go.step.sm/crypto/x509util" -) - -// openIDConfiguration contains the necessary properties in the -// `/.well-known/openid-configuration` document. -type openIDConfiguration struct { - Issuer string `json:"issuer"` - JWKSetURI string `json:"jwks_uri"` -} - -// Validate validates the values in a well-known OpenID configuration endpoint. -func (c openIDConfiguration) Validate() error { - switch { - case c.Issuer == "": - return errors.New("issuer cannot be empty") - case c.JWKSetURI == "": - return errors.New("jwks_uri cannot be empty") - default: - return nil - } -} - -// openIDPayload represents the fields on the id_token JWT payload. -type openIDPayload struct { - jose.Claims - AtHash string `json:"at_hash"` - AuthorizedParty string `json:"azp"` - Email string `json:"email"` - EmailVerified bool `json:"email_verified"` - Hd string `json:"hd"` - Nonce string `json:"nonce"` - Groups []string `json:"groups"` -} - -// OIDC represents an OAuth 2.0 OpenID Connect provider. -// -// ClientSecret is mandatory, but it can be an empty string. -type OIDC struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - ClientID string `json:"clientID"` - ClientSecret string `json:"clientSecret"` - ConfigurationEndpoint string `json:"configurationEndpoint"` - TenantID string `json:"tenantID,omitempty"` - Admins []string `json:"admins,omitempty"` - Domains []string `json:"domains,omitempty"` - Groups []string `json:"groups,omitempty"` - ListenAddress string `json:"listenAddress,omitempty"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - configuration openIDConfiguration - keyStore *keyStore - claimer *Claimer - getIdentityFunc GetIdentityFunc -} - -// IsAdmin returns true if the given email is in the Admins allowlist, false -// otherwise. -func (o *OIDC) IsAdmin(email string) bool { - if email != "" { - email = sanitizeEmail(email) - for _, e := range o.Admins { - if email == sanitizeEmail(e) { - return true - } - } - } - return false -} - -// IsAdminGroup returns true if the one group in the given list is in the Admins -// allowlist, false otherwise. -func (o *OIDC) IsAdminGroup(groups []string) bool { - for _, g := range groups { - // The groups and emails can be in the same array for now, but consider - // making a specialized option later. - for _, gadmin := range o.Admins { - if g == gadmin { - return true - } - } - } - return false -} - -func sanitizeEmail(email string) string { - if i := strings.LastIndex(email, "@"); i >= 0 { - email = email[:i] + strings.ToLower(email[i:]) - } - return email -} - -// GetID returns the provisioner unique identifier, the OIDC provisioner the -// uses the clientID for this. -func (o *OIDC) GetID() string { - if o.ID != "" { - return o.ID - } - return o.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (o *OIDC) GetIDForToken() string { - return o.ClientID -} - -// GetTokenID returns the provisioner unique identifier, the OIDC provisioner the -// uses the clientID for this. -func (o *OIDC) GetTokenID(ott string) (string, error) { - // Validate payload - token, err := jose.ParseSigned(ott) - if err != nil { - return "", errors.Wrap(err, "error parsing token") - } - - // Get claims w/out verification. We need to look up the provisioner - // key in order to verify the claims and we need the issuer from the claims - // before we can look up the provisioner. - var claims openIDPayload - if err = token.UnsafeClaimsWithoutVerification(&claims); err != nil { - return "", errors.Wrap(err, "error verifying claims") - } - return claims.Nonce, nil -} - -// GetName returns the name of the provisioner. -func (o *OIDC) GetName() string { - return o.Name -} - -// GetType returns the type of provisioner. -func (o *OIDC) GetType() Type { - return TypeOIDC -} - -// GetEncryptedKey is not available in an OIDC provisioner. -func (o *OIDC) GetEncryptedKey() (kid string, key string, ok bool) { - return "", "", false -} - -// Init validates and initializes the OIDC provider. -func (o *OIDC) Init(config Config) (err error) { - switch { - case o.Type == "": - return errors.New("type cannot be empty") - case o.Name == "": - return errors.New("name cannot be empty") - case o.ClientID == "": - return errors.New("clientID cannot be empty") - case o.ConfigurationEndpoint == "": - return errors.New("configurationEndpoint cannot be empty") - } - - // Validate listenAddress if given - if o.ListenAddress != "" { - if _, _, err := net.SplitHostPort(o.ListenAddress); err != nil { - return errors.Wrap(err, "error parsing listenAddress") - } - } - - // Update claims with global ones - if o.claimer, err = NewClaimer(o.Claims, config.Claims); err != nil { - return err - } - - // Decode and validate openid-configuration endpoint - u, err := url.Parse(o.ConfigurationEndpoint) - if err != nil { - return errors.Wrapf(err, "error parsing %s", o.ConfigurationEndpoint) - } - if !strings.Contains(u.Path, "/.well-known/openid-configuration") { - u.Path = path.Join(u.Path, "/.well-known/openid-configuration") - } - if err := getAndDecode(u.String(), &o.configuration); err != nil { - return err - } - if err := o.configuration.Validate(); err != nil { - return errors.Wrapf(err, "error parsing %s", o.ConfigurationEndpoint) - } - // Replace {tenantid} with the configured one - if o.TenantID != "" { - o.configuration.Issuer = strings.Replace(o.configuration.Issuer, "{tenantid}", o.TenantID, -1) - } - // Get JWK key set - o.keyStore, err = newKeyStore(o.configuration.JWKSetURI) - if err != nil { - return err - } - - // Set the identity getter if it exists, otherwise use the default. - if config.GetIdentityFunc == nil { - o.getIdentityFunc = DefaultIdentityFunc - } else { - o.getIdentityFunc = config.GetIdentityFunc - } - return nil -} - -// ValidatePayload validates the given token payload. -func (o *OIDC) ValidatePayload(p openIDPayload) error { - // According to "rfc7519 JSON Web Token" acceptable skew should be no more - // than a few minutes. - if err := p.ValidateWithLeeway(jose.Expected{ - Issuer: o.configuration.Issuer, - Audience: jose.Audience{o.ClientID}, - Time: time.Now().UTC(), - }, time.Minute); err != nil { - return errs.Wrap(http.StatusUnauthorized, err, "validatePayload: failed to validate oidc token payload") - } - - // Validate azp if present - if p.AuthorizedParty != "" && p.AuthorizedParty != o.ClientID { - return errs.Unauthorized("validatePayload: failed to validate oidc token payload: invalid azp") - } - - // Validate domains (case-insensitive) - if p.Email != "" && len(o.Domains) > 0 && !o.IsAdmin(p.Email) { - email := sanitizeEmail(p.Email) - var found bool - for _, d := range o.Domains { - if strings.HasSuffix(email, "@"+strings.ToLower(d)) { - found = true - break - } - } - if !found { - return errs.Unauthorized("validatePayload: failed to validate oidc token payload: email is not allowed") - } - } - - // Filter by oidc group claim - if len(o.Groups) > 0 { - var found bool - for _, group := range o.Groups { - for _, g := range p.Groups { - if g == group { - found = true - break - } - } - } - if !found { - return errs.Unauthorized("validatePayload: oidc token payload validation failed: invalid group") - } - } - - return nil -} - -// authorizeToken applies the most common provisioner authorization claims, -// leaving the rest to context specific methods. -func (o *OIDC) authorizeToken(token string) (*openIDPayload, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, - "oidc.AuthorizeToken; error parsing oidc token") - } - - // Parse claims to get the kid - var claims openIDPayload - if err := jwt.UnsafeClaimsWithoutVerification(&claims); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, - "oidc.AuthorizeToken; error parsing oidc token claims") - } - - found := false - kid := jwt.Headers[0].KeyID - keys := o.keyStore.Get(kid) - for _, key := range keys { - if err := jwt.Claims(key, &claims); err == nil { - found = true - break - } - } - if !found { - return nil, errs.Unauthorized("oidc.AuthorizeToken; cannot validate oidc token") - } - - if err := o.ValidatePayload(claims); err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "oidc.AuthorizeToken") - } - - return &claims, nil -} - -// AuthorizeRevoke returns an error if the provisioner does not have rights to -// revoke the certificate with serial number in the `sub` property. -// Only tokens generated by an admin have the right to revoke a certificate. -func (o *OIDC) AuthorizeRevoke(ctx context.Context, token string) error { - claims, err := o.authorizeToken(token) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "oidc.AuthorizeRevoke") - } - - // Only admins can revoke certificates. - if o.IsAdmin(claims.Email) { - return nil - } - return errs.Unauthorized("oidc.AuthorizeRevoke; cannot revoke with non-admin oidc token") -} - -// AuthorizeSign validates the given token. -func (o *OIDC) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - claims, err := o.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "oidc.AuthorizeSign") - } - - // Certificate templates - sans := []string{} - if claims.Email != "" { - sans = append(sans, claims.Email) - } - - // Add uri SAN with iss#sub if issuer is a URL with schema. - // - // According to https://openid.net/specs/openid-connect-core-1_0.html the - // iss value is a case sensitive URL using the https scheme that contains - // scheme, host, and optionally, port number and path components and no - // query or fragment components. - if iss, err := url.Parse(claims.Issuer); err == nil && iss.Scheme != "" { - iss.Fragment = claims.Subject - sans = append(sans, iss.String()) - } - - data := x509util.CreateTemplateData(claims.Subject, sans) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - // Use the default template unless no-templates are configured and email is - // an admin, in that case we will use the CR template. - defaultTemplate := x509util.DefaultLeafTemplate - if !o.Options.GetX509Options().HasTemplate() && o.IsAdmin(claims.Email) { - defaultTemplate = x509util.DefaultAdminLeafTemplate - } - - templateOptions, err := CustomTemplateOptions(o.Options, data, defaultTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "oidc.AuthorizeSign") - } - - return []SignOption{ - templateOptions, - // modifiers / withOptions - newProvisionerExtensionOption(TypeOIDC, o.Name, o.ClientID), - profileDefaultDuration(o.claimer.DefaultTLSCertDuration()), - // validators - defaultPublicKeyValidator{}, - newValidityValidator(o.claimer.MinTLSCertDuration(), o.claimer.MaxTLSCertDuration()), - }, nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -// NOTE: This method does not actually validate the certificate or check it's -// revocation status. Just confirms that the provisioner that created the -// certificate was configured to allow renewals. -func (o *OIDC) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if o.claimer.IsDisableRenewal() { - return errs.Unauthorized("oidc.AuthorizeRenew; renew is disabled for oidc provisioner '%s'", o.GetName()) - } - return nil -} - -// AuthorizeSSHSign returns the list of SignOption for a SignSSH request. -func (o *OIDC) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - if !o.claimer.IsSSHCAEnabled() { - return nil, errs.Unauthorized("oidc.AuthorizeSSHSign; sshCA is disabled for oidc provisioner '%s'", o.GetName()) - } - claims, err := o.authorizeToken(token) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "oidc.AuthorizeSSHSign") - } - // Enforce an email claim - if claims.Email == "" { - return nil, errs.Unauthorized("oidc.AuthorizeSSHSign: failed to validate oidc token payload: email not found") - } - - // Get the identity using either the default identityFunc or one injected - // externally. Note that the PreferredUsername might be empty. - // TBD: Would preferred_username present a safety issue here? - iden, err := o.getIdentityFunc(ctx, o, claims.Email) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "oidc.AuthorizeSSHSign") - } - - // Certificate templates. - data := sshutil.CreateTemplateData(sshutil.UserCert, claims.Email, iden.Usernames) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - // Add custom extensions added in the identity function. - for k, v := range iden.Permissions.Extensions { - data.AddExtension(k, v) - } - // Add custom critical options added in the identity function. - for k, v := range iden.Permissions.CriticalOptions { - data.AddCriticalOption(k, v) - } - - // Use the default template unless no-templates are configured and email is - // an admin, in that case we will use the parameters in the request. - isAdmin := o.IsAdmin(claims.Email) - if !isAdmin && len(claims.Groups) > 0 { - isAdmin = o.IsAdminGroup(claims.Groups) - } - defaultTemplate := sshutil.DefaultTemplate - if isAdmin && !o.Options.GetSSHOptions().HasTemplate() { - defaultTemplate = sshutil.DefaultAdminTemplate - } - - templateOptions, err := CustomSSHTemplateOptions(o.Options, data, defaultTemplate) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeSign") - } - signOptions := []SignOption{templateOptions} - - // Admin users can use any principal, and can sign user and host certificates. - // Non-admin users can only use principals returned by the identityFunc, and - // can only sign user certificates. - if isAdmin { - signOptions = append(signOptions, &sshCertOptionsRequireValidator{ - CertType: true, - KeyID: true, - Principals: true, - }) - } else { - signOptions = append(signOptions, sshCertOptionsValidator(SignSSHOptions{ - CertType: SSHUserCert, - Principals: iden.Usernames, - })) - } - - return append(signOptions, - // Set the validity bounds if not set. - &sshDefaultDuration{o.claimer}, - // Validate public key - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{o.claimer}, - // Require all the fields in the SSH certificate - &sshCertDefaultValidator{}, - ), nil -} - -// AuthorizeSSHRevoke returns nil if the token is valid, false otherwise. -func (o *OIDC) AuthorizeSSHRevoke(ctx context.Context, token string) error { - claims, err := o.authorizeToken(token) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "oidc.AuthorizeSSHRevoke") - } - - // Only admins can revoke certificates. - if !o.IsAdmin(claims.Email) { - return errs.Unauthorized("oidc.AuthorizeSSHRevoke; cannot revoke with non-admin oidc token") - } - return nil -} - -func getAndDecode(uri string, v interface{}) error { - resp, err := http.Get(uri) - if err != nil { - return errors.Wrapf(err, "failed to connect to %s", uri) - } - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(v); err != nil { - return errors.Wrapf(err, "error reading %s", uri) - } - return nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/options.go b/vendor/github.com/smallstep/certificates/authority/provisioner/options.go deleted file mode 100644 index 100aa588..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/options.go +++ /dev/null @@ -1,145 +0,0 @@ -package provisioner - -import ( - "encoding/json" - "strings" - - "github.com/pkg/errors" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/x509util" -) - -// CertificateOptions is an interface that returns a list of options passed when -// creating a new certificate. -type CertificateOptions interface { - Options(SignOptions) []x509util.Option -} - -type certificateOptionsFunc func(SignOptions) []x509util.Option - -func (fn certificateOptionsFunc) Options(so SignOptions) []x509util.Option { - return fn(so) -} - -// Options are a collection of custom options that can be added to -// each provisioner. -type Options struct { - X509 *X509Options `json:"x509,omitempty"` - SSH *SSHOptions `json:"ssh,omitempty"` -} - -// GetX509Options returns the X.509 options. -func (o *Options) GetX509Options() *X509Options { - if o == nil { - return nil - } - return o.X509 -} - -// GetSSHOptions returns the SSH options. -func (o *Options) GetSSHOptions() *SSHOptions { - if o == nil { - return nil - } - return o.SSH -} - -// X509Options contains specific options for X.509 certificates. -type X509Options struct { - // Template contains a X.509 certificate template. It can be a JSON template - // escaped in a string or it can be also encoded in base64. - Template string `json:"template,omitempty"` - - // TemplateFile points to a file containing a X.509 certificate template. - TemplateFile string `json:"templateFile,omitempty"` - - // TemplateData is a JSON object with variables that can be used in custom - // templates. - TemplateData json.RawMessage `json:"templateData,omitempty"` -} - -// HasTemplate returns true if a template is defined in the provisioner options. -func (o *X509Options) HasTemplate() bool { - return o != nil && (o.Template != "" || o.TemplateFile != "") -} - -// TemplateOptions generates a CertificateOptions with the template and data -// defined in the ProvisionerOptions, the provisioner generated data, and the -// user data provided in the request. If no template has been provided, -// x509util.DefaultLeafTemplate will be used. -func TemplateOptions(o *Options, data x509util.TemplateData) (CertificateOptions, error) { - return CustomTemplateOptions(o, data, x509util.DefaultLeafTemplate) -} - -// CustomTemplateOptions generates a CertificateOptions with the template, data -// defined in the ProvisionerOptions, the provisioner generated data and the -// user data provided in the request. If no template has been provided in the -// ProvisionerOptions, the given template will be used. -func CustomTemplateOptions(o *Options, data x509util.TemplateData, defaultTemplate string) (CertificateOptions, error) { - opts := o.GetX509Options() - if data == nil { - data = x509util.NewTemplateData() - } - - if opts != nil { - // Add template data if any. - if len(opts.TemplateData) > 0 && string(opts.TemplateData) != "null" { - if err := json.Unmarshal(opts.TemplateData, &data); err != nil { - return nil, errors.Wrap(err, "error unmarshaling template data") - } - } - } - - return certificateOptionsFunc(func(so SignOptions) []x509util.Option { - // We're not provided user data without custom templates. - if !opts.HasTemplate() { - return []x509util.Option{ - x509util.WithTemplate(defaultTemplate, data), - } - } - - // Add user provided data. - if len(so.TemplateData) > 0 { - userObject := make(map[string]interface{}) - if err := json.Unmarshal(so.TemplateData, &userObject); err != nil { - data.SetUserData(map[string]interface{}{}) - } else { - data.SetUserData(userObject) - } - } - - // Load a template from a file if Template is not defined. - if opts.Template == "" && opts.TemplateFile != "" { - return []x509util.Option{ - x509util.WithTemplateFile(opts.TemplateFile, data), - } - } - - // Load a template from the Template fields - // 1. As a JSON in a string. - template := strings.TrimSpace(opts.Template) - if strings.HasPrefix(template, "{") { - return []x509util.Option{ - x509util.WithTemplate(template, data), - } - } - // 2. As a base64 encoded JSON. - return []x509util.Option{ - x509util.WithTemplateBase64(template, data), - } - }), nil -} - -// unsafeParseSigned parses the given token and returns all the claims without -// verifying the signature of the token. -func unsafeParseSigned(s string) (map[string]interface{}, error) { - token, err := jose.ParseSigned(s) - if err != nil { - return nil, err - } - claims := make(map[string]interface{}) - if err = token.UnsafeClaimsWithoutVerification(&claims); err != nil { - return nil, err - } - return claims, nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/provisioner.go b/vendor/github.com/smallstep/certificates/authority/provisioner/provisioner.go deleted file mode 100644 index 652cb888..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/provisioner.go +++ /dev/null @@ -1,538 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/x509" - "encoding/json" - stderrors "errors" - "net/url" - "regexp" - "strings" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/db" - "github.com/smallstep/certificates/errs" - "golang.org/x/crypto/ssh" -) - -// Interface is the interface that all provisioner types must implement. -type Interface interface { - GetID() string - GetIDForToken() string - GetTokenID(token string) (string, error) - GetName() string - GetType() Type - GetEncryptedKey() (kid string, key string, ok bool) - Init(config Config) error - AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) - AuthorizeRevoke(ctx context.Context, token string) error - AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error - AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) - AuthorizeSSHRevoke(ctx context.Context, token string) error - AuthorizeSSHRenew(ctx context.Context, token string) (*ssh.Certificate, error) - AuthorizeSSHRekey(ctx context.Context, token string) (*ssh.Certificate, []SignOption, error) -} - -// ErrAllowTokenReuse is an error that is returned by provisioners that allows -// the reuse of tokens. -// -// This is, for example, returned by the Azure provisioner when -// DisableTrustOnFirstUse is set to true. Azure caches tokens for up to 24hr and -// has no mechanism for getting a different token - this can be an issue when -// rebooting a VM. In contrast, AWS and GCP have facilities for requesting a new -// token. Therefore, for the Azure provisioner we are enabling token reuse, with -// the understanding that we are not following security best practices -var ErrAllowTokenReuse = stderrors.New("allow token reuse") - -// Audiences stores all supported audiences by request type. -type Audiences struct { - Sign []string - Revoke []string - SSHSign []string - SSHRevoke []string - SSHRenew []string - SSHRekey []string -} - -// All returns all supported audiences across all request types in one list. -func (a Audiences) All() (auds []string) { - auds = a.Sign - auds = append(auds, a.Revoke...) - auds = append(auds, a.SSHSign...) - auds = append(auds, a.SSHRevoke...) - auds = append(auds, a.SSHRenew...) - auds = append(auds, a.SSHRekey...) - return -} - -// WithFragment returns a copy of audiences where the url audiences contains the -// given fragment. -func (a Audiences) WithFragment(fragment string) Audiences { - ret := Audiences{ - Sign: make([]string, len(a.Sign)), - Revoke: make([]string, len(a.Revoke)), - SSHSign: make([]string, len(a.SSHSign)), - SSHRevoke: make([]string, len(a.SSHRevoke)), - SSHRenew: make([]string, len(a.SSHRenew)), - SSHRekey: make([]string, len(a.SSHRekey)), - } - for i, s := range a.Sign { - if u, err := url.Parse(s); err == nil { - ret.Sign[i] = u.ResolveReference(&url.URL{Fragment: fragment}).String() - } else { - ret.Sign[i] = s - } - } - for i, s := range a.Revoke { - if u, err := url.Parse(s); err == nil { - ret.Revoke[i] = u.ResolveReference(&url.URL{Fragment: fragment}).String() - } else { - ret.Revoke[i] = s - } - } - for i, s := range a.SSHSign { - if u, err := url.Parse(s); err == nil { - ret.SSHSign[i] = u.ResolveReference(&url.URL{Fragment: fragment}).String() - } else { - ret.SSHSign[i] = s - } - } - for i, s := range a.SSHRevoke { - if u, err := url.Parse(s); err == nil { - ret.SSHRevoke[i] = u.ResolveReference(&url.URL{Fragment: fragment}).String() - } else { - ret.SSHRevoke[i] = s - } - } - for i, s := range a.SSHRenew { - if u, err := url.Parse(s); err == nil { - ret.SSHRenew[i] = u.ResolveReference(&url.URL{Fragment: fragment}).String() - } else { - ret.SSHRenew[i] = s - } - } - for i, s := range a.SSHRekey { - if u, err := url.Parse(s); err == nil { - ret.SSHRekey[i] = u.ResolveReference(&url.URL{Fragment: fragment}).String() - } else { - ret.SSHRekey[i] = s - } - } - return ret -} - -// generateSignAudience generates a sign audience with the format -// https:///1.0/sign#provisionerID -func generateSignAudience(caURL string, provisionerID string) (string, error) { - u, err := url.Parse(caURL) - if err != nil { - return "", errors.Wrapf(err, "error parsing %s", caURL) - } - return u.ResolveReference(&url.URL{Path: "/1.0/sign", Fragment: provisionerID}).String(), nil -} - -// Type indicates the provisioner Type. -type Type int - -const ( - noopType Type = 0 - // TypeJWK is used to indicate the JWK provisioners. - TypeJWK Type = 1 - // TypeOIDC is used to indicate the OIDC provisioners. - TypeOIDC Type = 2 - // TypeGCP is used to indicate the GCP provisioners. - TypeGCP Type = 3 - // TypeAWS is used to indicate the AWS provisioners. - TypeAWS Type = 4 - // TypeAzure is used to indicate the Azure provisioners. - TypeAzure Type = 5 - // TypeACME is used to indicate the ACME provisioners. - TypeACME Type = 6 - // TypeX5C is used to indicate the X5C provisioners. - TypeX5C Type = 7 - // TypeK8sSA is used to indicate the X5C provisioners. - TypeK8sSA Type = 8 - // TypeSSHPOP is used to indicate the SSHPOP provisioners. - TypeSSHPOP Type = 9 - // TypeSCEP is used to indicate the SCEP provisioners - TypeSCEP Type = 10 -) - -// String returns the string representation of the type. -func (t Type) String() string { - switch t { - case TypeJWK: - return "JWK" - case TypeOIDC: - return "OIDC" - case TypeGCP: - return "GCP" - case TypeAWS: - return "AWS" - case TypeAzure: - return "Azure" - case TypeACME: - return "ACME" - case TypeX5C: - return "X5C" - case TypeK8sSA: - return "K8sSA" - case TypeSSHPOP: - return "SSHPOP" - case TypeSCEP: - return "SCEP" - default: - return "" - } -} - -// SSHKeys represents the SSH User and Host public keys. -type SSHKeys struct { - UserKeys []ssh.PublicKey - HostKeys []ssh.PublicKey -} - -// Config defines the default parameters used in the initialization of -// provisioners. -type Config struct { - // Claims are the default claims. - Claims Claims - // Audiences are the audiences used in the default provisioner, (JWK). - Audiences Audiences - // DB is the interface to the authority DB client. - DB db.AuthDB - // SSHKeys are the root SSH public keys - SSHKeys *SSHKeys - // GetIdentityFunc is a function that returns an identity that will be - // used by the provisioner to populate certificate attributes. - GetIdentityFunc GetIdentityFunc -} - -type provisioner struct { - Type string `json:"type"` -} - -// List represents a list of provisioners. -type List []Interface - -// UnmarshalJSON implements json.Unmarshaler and allows to unmarshal a list of a -// interfaces into the right type. -func (l *List) UnmarshalJSON(data []byte) error { - ps := []json.RawMessage{} - if err := json.Unmarshal(data, &ps); err != nil { - return errors.Wrap(err, "error unmarshaling provisioner list") - } - - *l = List{} - for _, data := range ps { - var typ provisioner - if err := json.Unmarshal(data, &typ); err != nil { - return errors.Errorf("error unmarshaling provisioner") - } - var p Interface - switch strings.ToLower(typ.Type) { - case "jwk": - p = &JWK{} - case "oidc": - p = &OIDC{} - case "gcp": - p = &GCP{} - case "aws": - p = &AWS{} - case "azure": - p = &Azure{} - case "acme": - p = &ACME{} - case "x5c": - p = &X5C{} - case "k8ssa": - p = &K8sSA{} - case "sshpop": - p = &SSHPOP{} - case "scep": - p = &SCEP{} - default: - // Skip unsupported provisioners. A client using this method may be - // compiled with a version of smallstep/certificates that does not - // support a specific provisioner type. If we don't skip unknown - // provisioners, a client encountering an unknown provisioner will - // break. Rather than break the client, we skip the provisioner. - // TODO: accept a pluggable logger (depending on client) that can - // warn the user that an unknown provisioner was found and suggest - // that the user update their client's dependency on - // step/certificates and recompile. - continue - } - if err := json.Unmarshal(data, p); err != nil { - return errors.Wrap(err, "error unmarshaling provisioner") - } - *l = append(*l, p) - } - - return nil -} - -var sshUserRegex = regexp.MustCompile("^[a-z][-a-z0-9_]*$") - -// SanitizeSSHUserPrincipal grabs an email or a string with the format -// local@domain and returns a sanitized version of the local, valid to be used -// as a user name. If the email starts with a letter between a and z, the -// resulting string will match the regular expression `^[a-z][-a-z0-9_]*$`. -func SanitizeSSHUserPrincipal(email string) string { - if i := strings.LastIndex(email, "@"); i >= 0 { - email = email[:i] - } - return strings.Map(func(r rune) rune { - switch { - case r >= 'a' && r <= 'z': - return r - case r >= '0' && r <= '9': - return r - case r == '-': - return '-' - case r == '.': // drop dots - return -1 - default: - return '_' - } - }, strings.ToLower(email)) -} - -type base struct{} - -// AuthorizeSign returns an unimplemented error. Provisioners should overwrite -// this method if they will support authorizing tokens for signing x509 Certificates. -func (b *base) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - return nil, errs.Unauthorized("provisioner.AuthorizeSign not implemented") -} - -// AuthorizeRevoke returns an unimplemented error. Provisioners should overwrite -// this method if they will support authorizing tokens for revoking x509 Certificates. -func (b *base) AuthorizeRevoke(ctx context.Context, token string) error { - return errs.Unauthorized("provisioner.AuthorizeRevoke not implemented") -} - -// AuthorizeRenew returns an unimplemented error. Provisioners should overwrite -// this method if they will support authorizing tokens for renewing x509 Certificates. -func (b *base) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - return errs.Unauthorized("provisioner.AuthorizeRenew not implemented") -} - -// AuthorizeSSHSign returns an unimplemented error. Provisioners should overwrite -// this method if they will support authorizing tokens for signing SSH Certificates. -func (b *base) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - return nil, errs.Unauthorized("provisioner.AuthorizeSSHSign not implemented") -} - -// AuthorizeRevoke returns an unimplemented error. Provisioners should overwrite -// this method if they will support authorizing tokens for revoking SSH Certificates. -func (b *base) AuthorizeSSHRevoke(ctx context.Context, token string) error { - return errs.Unauthorized("provisioner.AuthorizeSSHRevoke not implemented") -} - -// AuthorizeSSHRenew returns an unimplemented error. Provisioners should overwrite -// this method if they will support authorizing tokens for renewing SSH Certificates. -func (b *base) AuthorizeSSHRenew(ctx context.Context, token string) (*ssh.Certificate, error) { - return nil, errs.Unauthorized("provisioner.AuthorizeSSHRenew not implemented") -} - -// AuthorizeSSHRekey returns an unimplemented error. Provisioners should overwrite -// this method if they will support authorizing tokens for rekeying SSH Certificates. -func (b *base) AuthorizeSSHRekey(ctx context.Context, token string) (*ssh.Certificate, []SignOption, error) { - return nil, nil, errs.Unauthorized("provisioner.AuthorizeSSHRekey not implemented") -} - -// Identity is the type representing an externally supplied identity that is used -// by provisioners to populate certificate fields. -type Identity struct { - Usernames []string `json:"usernames"` - Permissions `json:"permissions"` -} - -// Permissions defines extra extensions and critical options to grant to an SSH certificate. -type Permissions struct { - Extensions map[string]string `json:"extensions"` - CriticalOptions map[string]string `json:"criticalOptions"` -} - -// GetIdentityFunc is a function that returns an identity. -type GetIdentityFunc func(ctx context.Context, p Interface, email string) (*Identity, error) - -// DefaultIdentityFunc return a default identity depending on the provisioner -// type. For OIDC email is always present and the usernames might -// contain empty strings. -func DefaultIdentityFunc(ctx context.Context, p Interface, email string) (*Identity, error) { - switch k := p.(type) { - case *OIDC: - // OIDC principals would be: - // ~~1. Preferred usernames.~~ Note: Under discussion, currently disabled - // 2. Sanitized local. - // 3. Raw local (if different). - // 4. Email address. - name := SanitizeSSHUserPrincipal(email) - if !sshUserRegex.MatchString(name) { - return nil, errors.Errorf("invalid principal '%s' from email '%s'", name, email) - } - usernames := []string{name} - if i := strings.LastIndex(email, "@"); i >= 0 { - usernames = append(usernames, email[:i]) - } - usernames = append(usernames, email) - return &Identity{ - Usernames: SanitizeStringSlices(usernames), - }, nil - default: - return nil, errors.Errorf("provisioner type '%T' not supported by identity function", k) - } -} - -// SanitizeStringSlices removes duplicated an empty strings. -func SanitizeStringSlices(original []string) []string { - output := []string{} - seen := make(map[string]struct{}) - for _, entry := range original { - if entry == "" { - continue - } - if _, value := seen[entry]; !value { - seen[entry] = struct{}{} - output = append(output, entry) - } - } - return output -} - -// MockProvisioner for testing -type MockProvisioner struct { - Mret1, Mret2, Mret3 interface{} - Merr error - MgetID func() string - MgetIDForToken func() string - MgetTokenID func(string) (string, error) - MgetName func() string - MgetType func() Type - MgetEncryptedKey func() (string, string, bool) - Minit func(Config) error - MauthorizeSign func(ctx context.Context, ott string) ([]SignOption, error) - MauthorizeRenew func(ctx context.Context, cert *x509.Certificate) error - MauthorizeRevoke func(ctx context.Context, ott string) error - MauthorizeSSHSign func(ctx context.Context, ott string) ([]SignOption, error) - MauthorizeSSHRenew func(ctx context.Context, ott string) (*ssh.Certificate, error) - MauthorizeSSHRekey func(ctx context.Context, ott string) (*ssh.Certificate, []SignOption, error) - MauthorizeSSHRevoke func(ctx context.Context, ott string) error -} - -// GetID mock -func (m *MockProvisioner) GetID() string { - if m.MgetID != nil { - return m.MgetID() - } - return m.Mret1.(string) -} - -// GetIDForToken mock -func (m *MockProvisioner) GetIDForToken() string { - if m.MgetIDForToken != nil { - return m.MgetIDForToken() - } - return m.Mret1.(string) -} - -// GetTokenID mock -func (m *MockProvisioner) GetTokenID(token string) (string, error) { - if m.MgetTokenID != nil { - return m.MgetTokenID(token) - } - if m.Mret1 == nil { - return "", m.Merr - } - return m.Mret1.(string), m.Merr -} - -// GetName mock -func (m *MockProvisioner) GetName() string { - if m.MgetName != nil { - return m.MgetName() - } - return m.Mret1.(string) -} - -// GetType mock -func (m *MockProvisioner) GetType() Type { - if m.MgetType != nil { - return m.MgetType() - } - return m.Mret1.(Type) -} - -// GetEncryptedKey mock -func (m *MockProvisioner) GetEncryptedKey() (string, string, bool) { - if m.MgetEncryptedKey != nil { - return m.MgetEncryptedKey() - } - return m.Mret1.(string), m.Mret2.(string), m.Mret3.(bool) -} - -// Init mock -func (m *MockProvisioner) Init(c Config) error { - if m.Minit != nil { - return m.Minit(c) - } - return m.Merr -} - -// AuthorizeSign mock -func (m *MockProvisioner) AuthorizeSign(ctx context.Context, ott string) ([]SignOption, error) { - if m.MauthorizeSign != nil { - return m.MauthorizeSign(ctx, ott) - } - return m.Mret1.([]SignOption), m.Merr -} - -// AuthorizeRevoke mock -func (m *MockProvisioner) AuthorizeRevoke(ctx context.Context, ott string) error { - if m.MauthorizeRevoke != nil { - return m.MauthorizeRevoke(ctx, ott) - } - return m.Merr -} - -// AuthorizeRenew mock -func (m *MockProvisioner) AuthorizeRenew(ctx context.Context, c *x509.Certificate) error { - if m.MauthorizeRenew != nil { - return m.MauthorizeRenew(ctx, c) - } - return m.Merr -} - -// AuthorizeSSHSign mock -func (m *MockProvisioner) AuthorizeSSHSign(ctx context.Context, ott string) ([]SignOption, error) { - if m.MauthorizeSign != nil { - return m.MauthorizeSign(ctx, ott) - } - return m.Mret1.([]SignOption), m.Merr -} - -// AuthorizeSSHRenew mock -func (m *MockProvisioner) AuthorizeSSHRenew(ctx context.Context, ott string) (*ssh.Certificate, error) { - if m.MauthorizeRenew != nil { - return m.MauthorizeSSHRenew(ctx, ott) - } - return m.Mret1.(*ssh.Certificate), m.Merr -} - -// AuthorizeSSHRekey mock -func (m *MockProvisioner) AuthorizeSSHRekey(ctx context.Context, ott string) (*ssh.Certificate, []SignOption, error) { - if m.MauthorizeSSHRekey != nil { - return m.MauthorizeSSHRekey(ctx, ott) - } - return m.Mret1.(*ssh.Certificate), m.Mret2.([]SignOption), m.Merr -} - -// AuthorizeSSHRevoke mock -func (m *MockProvisioner) AuthorizeSSHRevoke(ctx context.Context, ott string) error { - if m.MauthorizeSSHRevoke != nil { - return m.MauthorizeSSHRevoke(ctx, ott) - } - return m.Merr -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/scep.go b/vendor/github.com/smallstep/certificates/authority/provisioner/scep.go deleted file mode 100644 index 145a1920..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/scep.go +++ /dev/null @@ -1,131 +0,0 @@ -package provisioner - -import ( - "context" - "time" - - "github.com/pkg/errors" -) - -// SCEP is the SCEP provisioner type, an entity that can authorize the -// SCEP provisioning flow -type SCEP struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - - ForceCN bool `json:"forceCN,omitempty"` - ChallengePassword string `json:"challenge,omitempty"` - Capabilities []string `json:"capabilities,omitempty"` - // MinimumPublicKeyLength is the minimum length for public keys in CSRs - MinimumPublicKeyLength int `json:"minimumPublicKeyLength,omitempty"` - Options *Options `json:"options,omitempty"` - Claims *Claims `json:"claims,omitempty"` - claimer *Claimer - - secretChallengePassword string -} - -// GetID returns the provisioner unique identifier. -func (s *SCEP) GetID() string { - if s.ID != "" { - return s.ID - } - return s.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (s *SCEP) GetIDForToken() string { - return "scep/" + s.Name -} - -// GetName returns the name of the provisioner. -func (s *SCEP) GetName() string { - return s.Name -} - -// GetType returns the type of provisioner. -func (s *SCEP) GetType() Type { - return TypeSCEP -} - -// GetEncryptedKey returns the base provisioner encrypted key if it's defined. -func (s *SCEP) GetEncryptedKey() (string, string, bool) { - return "", "", false -} - -// GetTokenID returns the identifier of the token. -func (s *SCEP) GetTokenID(ott string) (string, error) { - return "", errors.New("scep provisioner does not implement GetTokenID") -} - -// GetOptions returns the configured provisioner options. -func (s *SCEP) GetOptions() *Options { - return s.Options -} - -// DefaultTLSCertDuration returns the default TLS cert duration enforced by -// the provisioner. -func (s *SCEP) DefaultTLSCertDuration() time.Duration { - return s.claimer.DefaultTLSCertDuration() -} - -// Init initializes and validates the fields of a SCEP type. -func (s *SCEP) Init(config Config) (err error) { - - switch { - case s.Type == "": - return errors.New("provisioner type cannot be empty") - case s.Name == "": - return errors.New("provisioner name cannot be empty") - } - - // Update claims with global ones - if s.claimer, err = NewClaimer(s.Claims, config.Claims); err != nil { - return err - } - - // Mask the actual challenge value, so it won't be marshaled - s.secretChallengePassword = s.ChallengePassword - s.ChallengePassword = "*** redacted ***" - - // Default to 2048 bits minimum public key length (for CSRs) if not set - if s.MinimumPublicKeyLength == 0 { - s.MinimumPublicKeyLength = 2048 - } - - if s.MinimumPublicKeyLength%8 != 0 { - return errors.Errorf("only minimum public keys exactly divisible by 8 are supported; %d is not exactly divisible by 8", s.MinimumPublicKeyLength) - } - - // TODO: add other, SCEP specific, options? - - return err -} - -// AuthorizeSign does not do any verification, because all verification is handled -// in the SCEP protocol. This method returns a list of modifiers / constraints -// on the resulting certificate. -func (s *SCEP) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - return []SignOption{ - // modifiers / withOptions - newProvisionerExtensionOption(TypeSCEP, s.Name, ""), - newForceCNOption(s.ForceCN), - profileDefaultDuration(s.claimer.DefaultTLSCertDuration()), - // validators - newPublicKeyMinimumLengthValidator(s.MinimumPublicKeyLength), - newValidityValidator(s.claimer.MinTLSCertDuration(), s.claimer.MaxTLSCertDuration()), - }, nil -} - -// GetChallengePassword returns the challenge password -func (s *SCEP) GetChallengePassword() string { - return s.secretChallengePassword -} - -// GetCapabilities returns the CA capabilities -func (s *SCEP) GetCapabilities() []string { - return s.Capabilities -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/sign_options.go b/vendor/github.com/smallstep/certificates/authority/provisioner/sign_options.go deleted file mode 100644 index 764916b6..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/sign_options.go +++ /dev/null @@ -1,488 +0,0 @@ -package provisioner - -import ( - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/json" - "net" - "net/url" - "reflect" - "time" - - "github.com/pkg/errors" - "go.step.sm/crypto/x509util" -) - -// DefaultCertValidity is the default validity for a certificate if none is specified. -const DefaultCertValidity = 24 * time.Hour - -// SignOptions contains the options that can be passed to the Sign method. Backdate -// is automatically filled and can only be configured in the CA. -type SignOptions struct { - NotAfter TimeDuration `json:"notAfter"` - NotBefore TimeDuration `json:"notBefore"` - TemplateData json.RawMessage `json:"templateData"` - Backdate time.Duration `json:"-"` -} - -// SignOption is the interface used to collect all extra options used in the -// Sign method. -type SignOption interface{} - -// CertificateValidator is an interface used to validate a given X.509 certificate. -type CertificateValidator interface { - Valid(cert *x509.Certificate, opts SignOptions) error -} - -// CertificateRequestValidator is an interface used to validate a given X.509 certificate request. -type CertificateRequestValidator interface { - Valid(cr *x509.CertificateRequest) error -} - -// CertificateModifier is an interface used to modify a given X.509 certificate. -// Types implementing this interface will be validated with a -// CertificateValidator. -type CertificateModifier interface { - Modify(cert *x509.Certificate, opts SignOptions) error -} - -// CertificateEnforcer is an interface used to modify a given X.509 certificate. -// Types implemented this interface will NOT be validated with a -// CertificateValidator. -type CertificateEnforcer interface { - Enforce(cert *x509.Certificate) error -} - -// CertificateModifierFunc allows to create simple certificate modifiers just -// with a function. -type CertificateModifierFunc func(cert *x509.Certificate, opts SignOptions) error - -// Modify implements CertificateModifier and just calls the defined function. -func (fn CertificateModifierFunc) Modify(cert *x509.Certificate, opts SignOptions) error { - return fn(cert, opts) -} - -// CertificateEnforcerFunc allows to create simple certificate enforcer just -// with a function. -type CertificateEnforcerFunc func(cert *x509.Certificate) error - -// Enforce implements CertificateEnforcer and just calls the defined function. -func (fn CertificateEnforcerFunc) Enforce(cert *x509.Certificate) error { - return fn(cert) -} - -// emailOnlyIdentity is a CertificateRequestValidator that checks that the only -// SAN provided is the given email address. -type emailOnlyIdentity string - -func (e emailOnlyIdentity) Valid(req *x509.CertificateRequest) error { - switch { - case len(req.DNSNames) > 0: - return errors.New("certificate request cannot contain DNS names") - case len(req.IPAddresses) > 0: - return errors.New("certificate request cannot contain IP addresses") - case len(req.URIs) > 0: - return errors.New("certificate request cannot contain URIs") - case len(req.EmailAddresses) == 0: - return errors.New("certificate request does not contain any email address") - case len(req.EmailAddresses) > 1: - return errors.New("certificate request contains too many email addresses") - case req.EmailAddresses[0] == "": - return errors.New("certificate request cannot contain an empty email address") - case req.EmailAddresses[0] != string(e): - return errors.Errorf("certificate request does not contain the valid email address, got %s, want %s", req.EmailAddresses[0], e) - default: - return nil - } -} - -// defaultPublicKeyValidator validates the public key of a certificate request. -type defaultPublicKeyValidator struct{} - -// Valid checks that certificate request common name matches the one configured. -func (v defaultPublicKeyValidator) Valid(req *x509.CertificateRequest) error { - switch k := req.PublicKey.(type) { - case *rsa.PublicKey: - if k.Size() < 256 { - return errors.New("rsa key in CSR must be at least 2048 bits (256 bytes)") - } - case *ecdsa.PublicKey, ed25519.PublicKey: - default: - return errors.Errorf("unrecognized public key of type '%T' in CSR", k) - } - return nil -} - -// publicKeyMinimumLengthValidator validates the length (in bits) of the public key -// of a certificate request is at least a certain length -type publicKeyMinimumLengthValidator struct { - length int -} - -// newPublicKeyMinimumLengthValidator creates a new publicKeyMinimumLengthValidator -// with the given length as its minimum value -// TODO: change the defaultPublicKeyValidator to have a configurable length instead? -func newPublicKeyMinimumLengthValidator(length int) publicKeyMinimumLengthValidator { - return publicKeyMinimumLengthValidator{ - length: length, - } -} - -// Valid checks that certificate request common name matches the one configured. -func (v publicKeyMinimumLengthValidator) Valid(req *x509.CertificateRequest) error { - switch k := req.PublicKey.(type) { - case *rsa.PublicKey: - minimumLengthInBytes := v.length / 8 - if k.Size() < minimumLengthInBytes { - return errors.Errorf("rsa key in CSR must be at least %d bits (%d bytes)", v.length, minimumLengthInBytes) - } - case *ecdsa.PublicKey, ed25519.PublicKey: - default: - return errors.Errorf("unrecognized public key of type '%T' in CSR", k) - } - return nil -} - -// commonNameValidator validates the common name of a certificate request. -type commonNameValidator string - -// Valid checks that certificate request common name matches the one configured. -// An empty common name is considered valid. -func (v commonNameValidator) Valid(req *x509.CertificateRequest) error { - if req.Subject.CommonName == "" { - return nil - } - if req.Subject.CommonName != string(v) { - return errors.Errorf("certificate request does not contain the valid common name; requested common name = %s, token subject = %s", req.Subject.CommonName, v) - } - return nil -} - -// commonNameSliceValidator validates thats the common name of a certificate -// request is present in the slice. An empty common name is considered valid. -type commonNameSliceValidator []string - -func (v commonNameSliceValidator) Valid(req *x509.CertificateRequest) error { - if req.Subject.CommonName == "" { - return nil - } - for _, cn := range v { - if req.Subject.CommonName == cn { - return nil - } - } - return errors.Errorf("certificate request does not contain the valid common name, got %s, want %s", req.Subject.CommonName, v) -} - -// dnsNamesValidator validates the DNS names SAN of a certificate request. -type dnsNamesValidator []string - -// Valid checks that certificate request DNS Names match those configured in -// the bootstrap (token) flow. -func (v dnsNamesValidator) Valid(req *x509.CertificateRequest) error { - if len(req.DNSNames) == 0 { - return nil - } - want := make(map[string]bool) - for _, s := range v { - want[s] = true - } - got := make(map[string]bool) - for _, s := range req.DNSNames { - got[s] = true - } - if !reflect.DeepEqual(want, got) { - return errors.Errorf("certificate request does not contain the valid DNS names - got %v, want %v", req.DNSNames, v) - } - return nil -} - -// ipAddressesValidator validates the IP addresses SAN of a certificate request. -type ipAddressesValidator []net.IP - -// Valid checks that certificate request IP Addresses match those configured in -// the bootstrap (token) flow. -func (v ipAddressesValidator) Valid(req *x509.CertificateRequest) error { - if len(req.IPAddresses) == 0 { - return nil - } - want := make(map[string]bool) - for _, ip := range v { - want[ip.String()] = true - } - got := make(map[string]bool) - for _, ip := range req.IPAddresses { - got[ip.String()] = true - } - if !reflect.DeepEqual(want, got) { - return errors.Errorf("IP Addresses claim failed - got %v, want %v", req.IPAddresses, v) - } - return nil -} - -// emailAddressesValidator validates the email address SANs of a certificate request. -type emailAddressesValidator []string - -// Valid checks that certificate request IP Addresses match those configured in -// the bootstrap (token) flow. -func (v emailAddressesValidator) Valid(req *x509.CertificateRequest) error { - if len(req.EmailAddresses) == 0 { - return nil - } - want := make(map[string]bool) - for _, s := range v { - want[s] = true - } - got := make(map[string]bool) - for _, s := range req.EmailAddresses { - got[s] = true - } - if !reflect.DeepEqual(want, got) { - return errors.Errorf("certificate request does not contain the valid Email Addresses - got %v, want %v", req.EmailAddresses, v) - } - return nil -} - -// urisValidator validates the URI SANs of a certificate request. -type urisValidator []*url.URL - -// Valid checks that certificate request IP Addresses match those configured in -// the bootstrap (token) flow. -func (v urisValidator) Valid(req *x509.CertificateRequest) error { - if len(req.URIs) == 0 { - return nil - } - want := make(map[string]bool) - for _, u := range v { - want[u.String()] = true - } - got := make(map[string]bool) - for _, u := range req.URIs { - got[u.String()] = true - } - if !reflect.DeepEqual(want, got) { - return errors.Errorf("URIs claim failed - got %v, want %v", req.URIs, v) - } - return nil -} - -// defaultsSANsValidator stores a set of SANs to eventually validate 1:1 against -// the SANs in an x509 certificate request. -type defaultSANsValidator []string - -// Valid verifies that the SANs stored in the validator match 1:1 with those -// requested in the x509 certificate request. -func (v defaultSANsValidator) Valid(req *x509.CertificateRequest) (err error) { - dnsNames, ips, emails, uris := x509util.SplitSANs(v) - if err = dnsNamesValidator(dnsNames).Valid(req); err != nil { - return - } else if err = emailAddressesValidator(emails).Valid(req); err != nil { - return - } else if err = ipAddressesValidator(ips).Valid(req); err != nil { - return - } else if err = urisValidator(uris).Valid(req); err != nil { - return - } - return -} - -// profileDefaultDuration is a modifier that sets the certificate -// duration. -type profileDefaultDuration time.Duration - -func (v profileDefaultDuration) Modify(cert *x509.Certificate, so SignOptions) error { - var backdate time.Duration - notBefore := so.NotBefore.Time() - if notBefore.IsZero() { - notBefore = now() - backdate = -1 * so.Backdate - - } - notAfter := so.NotAfter.RelativeTime(notBefore) - if notAfter.IsZero() { - if v != 0 { - notAfter = notBefore.Add(time.Duration(v)) - } else { - notAfter = notBefore.Add(DefaultCertValidity) - } - } - - cert.NotBefore = notBefore.Add(backdate) - cert.NotAfter = notAfter - return nil -} - -// profileLimitDuration is an x509 profile option that modifies an x509 validity -// period according to an imposed expiration time. -type profileLimitDuration struct { - def time.Duration - notBefore, notAfter time.Time -} - -// Option returns an x509util option that limits the validity period of a -// certificate to one that is superficially imposed. -func (v profileLimitDuration) Modify(cert *x509.Certificate, so SignOptions) error { - var backdate time.Duration - notBefore := so.NotBefore.Time() - if notBefore.IsZero() { - notBefore = now() - backdate = -1 * so.Backdate - } - if notBefore.Before(v.notBefore) { - return errors.Errorf("requested certificate notBefore (%s) is before "+ - "the active validity window of the provisioning credential (%s)", - notBefore, v.notBefore) - } - - notAfter := so.NotAfter.RelativeTime(notBefore) - if notAfter.After(v.notAfter) { - return errors.Errorf("requested certificate notAfter (%s) is after "+ - "the expiration of the provisioning credential (%s)", - notAfter, v.notAfter) - } - if notAfter.IsZero() { - t := notBefore.Add(v.def) - if t.After(v.notAfter) { - notAfter = v.notAfter - } else { - notAfter = t - } - } - - cert.NotBefore = notBefore.Add(backdate) - cert.NotAfter = notAfter - return nil -} - -// validityValidator validates the certificate validity settings. -type validityValidator struct { - min time.Duration - max time.Duration -} - -// newValidityValidator return a new validity validator. -func newValidityValidator(min, max time.Duration) *validityValidator { - return &validityValidator{min: min, max: max} -} - -// Valid validates the certificate validity settings (notBefore/notAfter) and -// and total duration. -func (v *validityValidator) Valid(cert *x509.Certificate, o SignOptions) error { - var ( - na = cert.NotAfter.Truncate(time.Second) - nb = cert.NotBefore.Truncate(time.Second) - now = time.Now().Truncate(time.Second) - ) - - d := na.Sub(nb) - - if na.Before(now) { - return errors.Errorf("notAfter cannot be in the past; na=%v", na) - } - if na.Before(nb) { - return errors.Errorf("notAfter cannot be before notBefore; na=%v, nb=%v", na, nb) - } - if d < v.min { - return errors.Errorf("requested duration of %v is less than the authorized minimum certificate duration of %v", - d, v.min) - } - // NOTE: this check is not "technically correct". We're allowing the max - // duration of a cert to be "max + backdate" and not all certificates will - // be backdated (e.g. if a user passes the NotBefore value then we do not - // apply a backdate). This is good enough. - if d > v.max+o.Backdate { - return errors.Errorf("requested duration of %v is more than the authorized maximum certificate duration of %v", - d, v.max+o.Backdate) - } - return nil -} - -var ( - stepOIDRoot = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 37476, 9000, 64} - stepOIDProvisioner = append(asn1.ObjectIdentifier(nil), append(stepOIDRoot, 1)...) -) - -type stepProvisionerASN1 struct { - Type int - Name []byte - CredentialID []byte - KeyValuePairs []string `asn1:"optional,omitempty"` -} - -type forceCNOption struct { - ForceCN bool -} - -func newForceCNOption(forceCN bool) *forceCNOption { - return &forceCNOption{forceCN} -} - -func (o *forceCNOption) Modify(cert *x509.Certificate, _ SignOptions) error { - if !o.ForceCN { - // Forcing CN is disabled, do nothing to certificate - return nil - } - - if cert.Subject.CommonName == "" { - if len(cert.DNSNames) > 0 { - cert.Subject.CommonName = cert.DNSNames[0] - } else { - return errors.New("Cannot force CN, DNSNames is empty") - } - } - - return nil -} - -type provisionerExtensionOption struct { - Type int - Name string - CredentialID string - KeyValuePairs []string -} - -func newProvisionerExtensionOption(typ Type, name, credentialID string, keyValuePairs ...string) *provisionerExtensionOption { - return &provisionerExtensionOption{ - Type: int(typ), - Name: name, - CredentialID: credentialID, - KeyValuePairs: keyValuePairs, - } -} - -func (o *provisionerExtensionOption) Modify(cert *x509.Certificate, _ SignOptions) error { - ext, err := createProvisionerExtension(o.Type, o.Name, o.CredentialID, o.KeyValuePairs...) - if err != nil { - return err - } - // Prepend the provisioner extension. In the auth.Sign code we will - // force the resulting certificate to only have one extension, the - // first stepOIDProvisioner that is found in the ExtraExtensions. - // A client could pass a csr containing a malicious stepOIDProvisioner - // ExtraExtension. If we were to append (rather than prepend) the correct - // stepOIDProvisioner extension, then the resulting certificate would - // contain the malicious extension, rather than the one applied by step-ca. - cert.ExtraExtensions = append([]pkix.Extension{ext}, cert.ExtraExtensions...) - return nil -} - -func createProvisionerExtension(typ int, name, credentialID string, keyValuePairs ...string) (pkix.Extension, error) { - b, err := asn1.Marshal(stepProvisionerASN1{ - Type: typ, - Name: []byte(name), - CredentialID: []byte(credentialID), - KeyValuePairs: keyValuePairs, - }) - if err != nil { - return pkix.Extension{}, errors.Wrapf(err, "error marshaling provisioner extension") - } - return pkix.Extension{ - Id: stepOIDProvisioner, - Critical: false, - Value: b, - }, nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/sign_ssh_options.go b/vendor/github.com/smallstep/certificates/authority/provisioner/sign_ssh_options.go deleted file mode 100644 index a872513e..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/sign_ssh_options.go +++ /dev/null @@ -1,504 +0,0 @@ -package provisioner - -import ( - "crypto/rsa" - "encoding/binary" - "encoding/json" - "math/big" - "time" - - "github.com/pkg/errors" - "go.step.sm/crypto/keyutil" - "golang.org/x/crypto/ssh" -) - -const ( - // SSHUserCert is the string used to represent ssh.UserCert. - SSHUserCert = "user" - - // SSHHostCert is the string used to represent ssh.HostCert. - SSHHostCert = "host" -) - -// SSHCertModifier is the interface used to change properties in an SSH -// certificate. -type SSHCertModifier interface { - SignOption - Modify(cert *ssh.Certificate, opts SignSSHOptions) error -} - -// SSHCertValidator is the interface used to validate an SSH certificate. -type SSHCertValidator interface { - SignOption - Valid(cert *ssh.Certificate, opts SignSSHOptions) error -} - -// SSHCertOptionsValidator is the interface used to validate the custom -// options used to modify the SSH certificate. -type SSHCertOptionsValidator interface { - SignOption - Valid(got SignSSHOptions) error -} - -// SignSSHOptions contains the options that can be passed to the SignSSH method. -type SignSSHOptions struct { - CertType string `json:"certType"` - KeyID string `json:"keyID"` - Principals []string `json:"principals"` - ValidAfter TimeDuration `json:"validAfter,omitempty"` - ValidBefore TimeDuration `json:"validBefore,omitempty"` - TemplateData json.RawMessage `json:"templateData,omitempty"` - Backdate time.Duration `json:"-"` -} - -// Validate validates the given SignSSHOptions. -func (o SignSSHOptions) Validate() error { - if o.CertType != "" && o.CertType != SSHUserCert && o.CertType != SSHHostCert { - return errors.Errorf("unknown certType %s", o.CertType) - } - return nil -} - -// Type returns the uint32 representation of the CertType. -func (o SignSSHOptions) Type() uint32 { - return sshCertTypeUInt32(o.CertType) -} - -// Modify implements SSHCertModifier and sets the SSHOption in the ssh.Certificate. -func (o SignSSHOptions) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - switch o.CertType { - case "": // ignore - case SSHUserCert: - cert.CertType = ssh.UserCert - case SSHHostCert: - cert.CertType = ssh.HostCert - default: - return errors.Errorf("ssh certificate has an unknown type - %s", o.CertType) - } - - cert.KeyId = o.KeyID - cert.ValidPrincipals = o.Principals - - return o.ModifyValidity(cert) -} - -// ModifyValidity modifies only the ValidAfter and ValidBefore on the given -// ssh.Certificate. -func (o SignSSHOptions) ModifyValidity(cert *ssh.Certificate) error { - t := now() - if !o.ValidAfter.IsZero() { - cert.ValidAfter = uint64(o.ValidAfter.RelativeTime(t).Unix()) - } - if !o.ValidBefore.IsZero() { - cert.ValidBefore = uint64(o.ValidBefore.RelativeTime(t).Unix()) - } - if cert.ValidAfter > 0 && cert.ValidBefore > 0 && cert.ValidAfter > cert.ValidBefore { - return errors.New("ssh certificate valid after cannot be greater than valid before") - } - return nil -} - -// match compares two SSHOptions and return an error if they don't match. It -// ignores zero values. -func (o SignSSHOptions) match(got SignSSHOptions) error { - if o.CertType != "" && got.CertType != "" && o.CertType != got.CertType { - return errors.Errorf("ssh certificate type does not match - got %v, want %v", got.CertType, o.CertType) - } - if len(o.Principals) > 0 && len(got.Principals) > 0 && !containsAllMembers(o.Principals, got.Principals) { - return errors.Errorf("ssh certificate principals does not match - got %v, want %v", got.Principals, o.Principals) - } - if !o.ValidAfter.IsZero() && !got.ValidAfter.IsZero() && !o.ValidAfter.Equal(&got.ValidAfter) { - return errors.Errorf("ssh certificate valid after does not match - got %v, want %v", got.ValidAfter, o.ValidAfter) - } - if !o.ValidBefore.IsZero() && !got.ValidBefore.IsZero() && !o.ValidBefore.Equal(&got.ValidBefore) { - return errors.Errorf("ssh certificate valid before does not match - got %v, want %v", got.ValidBefore, o.ValidBefore) - } - return nil -} - -// sshCertPrincipalsModifier is an SSHCertModifier that sets the -// principals to the SSH certificate. -type sshCertPrincipalsModifier []string - -// Modify the ValidPrincipals value of the cert. -func (o sshCertPrincipalsModifier) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - cert.ValidPrincipals = []string(o) - return nil -} - -// sshCertKeyIDModifier is an SSHCertModifier that sets the given -// Key ID in the SSH certificate. -type sshCertKeyIDModifier string - -func (m sshCertKeyIDModifier) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - cert.KeyId = string(m) - return nil -} - -// sshCertTypeModifier is an SSHCertModifier that sets the -// certificate type. -type sshCertTypeModifier string - -// Modify sets the CertType for the ssh certificate. -func (m sshCertTypeModifier) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - cert.CertType = sshCertTypeUInt32(string(m)) - return nil -} - -// sshCertValidAfterModifier is an SSHCertModifier that sets the -// ValidAfter in the SSH certificate. -type sshCertValidAfterModifier uint64 - -func (m sshCertValidAfterModifier) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - cert.ValidAfter = uint64(m) - return nil -} - -// sshCertValidBeforeModifier is an SSHCertModifier that sets the -// ValidBefore in the SSH certificate. -type sshCertValidBeforeModifier uint64 - -func (m sshCertValidBeforeModifier) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - cert.ValidBefore = uint64(m) - return nil -} - -// sshCertDefaultsModifier implements a SSHCertModifier that -// modifies the certificate with the given options if they are not set. -type sshCertDefaultsModifier SignSSHOptions - -// Modify implements the SSHCertModifier interface. -func (m sshCertDefaultsModifier) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - if cert.CertType == 0 { - cert.CertType = sshCertTypeUInt32(m.CertType) - } - if len(cert.ValidPrincipals) == 0 { - cert.ValidPrincipals = m.Principals - } - if cert.ValidAfter == 0 && !m.ValidAfter.IsZero() { - cert.ValidAfter = uint64(m.ValidAfter.Unix()) - } - if cert.ValidBefore == 0 && !m.ValidBefore.IsZero() { - cert.ValidBefore = uint64(m.ValidBefore.Unix()) - } - return nil -} - -// sshDefaultExtensionModifier implements an SSHCertModifier that sets -// the default extensions in an SSH certificate. -type sshDefaultExtensionModifier struct{} - -func (m *sshDefaultExtensionModifier) Modify(cert *ssh.Certificate, _ SignSSHOptions) error { - switch cert.CertType { - // Default to no extensions for HostCert. - case ssh.HostCert: - return nil - case ssh.UserCert: - if cert.Extensions == nil { - cert.Extensions = make(map[string]string) - } - cert.Extensions["permit-X11-forwarding"] = "" - cert.Extensions["permit-agent-forwarding"] = "" - cert.Extensions["permit-port-forwarding"] = "" - cert.Extensions["permit-pty"] = "" - cert.Extensions["permit-user-rc"] = "" - return nil - default: - return errors.New("ssh certificate type has not been set or is invalid") - } -} - -// sshDefaultDuration is an SSHCertModifier that sets the certificate -// ValidAfter and ValidBefore if they have not been set. It will fail if a -// CertType has not been set or is not valid. -type sshDefaultDuration struct { - *Claimer -} - -// Modify implements SSHCertModifier and sets the validity if it has not been -// set, but it always applies the backdate. -func (m *sshDefaultDuration) Modify(cert *ssh.Certificate, o SignSSHOptions) error { - d, err := m.DefaultSSHCertDuration(cert.CertType) - if err != nil { - return err - } - - var backdate uint64 - if cert.ValidAfter == 0 { - backdate = uint64(o.Backdate / time.Second) - cert.ValidAfter = uint64(now().Truncate(time.Second).Unix()) - } - if cert.ValidBefore == 0 { - cert.ValidBefore = cert.ValidAfter + uint64(d/time.Second) - } - // Apply backdate safely - if cert.ValidAfter > backdate { - cert.ValidAfter -= backdate - } - return nil -} - -// sshLimitDuration adjusts the duration to min(default, remaining provisioning -// credential duration). E.g. if the default is 12hrs but the remaining validity -// of the provisioning credential is only 4hrs, this option will set the value -// to 4hrs (the min of the two values). It will fail if a CertType has not been -// set or is not valid. -type sshLimitDuration struct { - *Claimer - NotAfter time.Time -} - -// Modify implements SSHCertModifier and modifies the validity of the -// certificate to expire before the configured limit. -func (m *sshLimitDuration) Modify(cert *ssh.Certificate, o SignSSHOptions) error { - if m.NotAfter.IsZero() { - defaultDuration := &sshDefaultDuration{m.Claimer} - return defaultDuration.Modify(cert, o) - } - - // Make sure the duration is within the limits. - d, err := m.DefaultSSHCertDuration(cert.CertType) - if err != nil { - return err - } - - var backdate uint64 - if cert.ValidAfter == 0 { - backdate = uint64(o.Backdate / time.Second) - cert.ValidAfter = uint64(now().Truncate(time.Second).Unix()) - } - - certValidAfter := time.Unix(int64(cert.ValidAfter), 0) - if certValidAfter.After(m.NotAfter) { - return errors.Errorf("provisioning credential expiration (%s) is before requested certificate validAfter (%s)", - m.NotAfter, certValidAfter) - } - - if cert.ValidBefore == 0 { - certValidBefore := certValidAfter.Add(d) - if m.NotAfter.Before(certValidBefore) { - certValidBefore = m.NotAfter - } - cert.ValidBefore = uint64(certValidBefore.Unix()) - } else { - certValidBefore := time.Unix(int64(cert.ValidBefore), 0) - if m.NotAfter.Before(certValidBefore) { - return errors.Errorf("provisioning credential expiration (%s) is before requested certificate validBefore (%s)", - m.NotAfter, certValidBefore) - } - } - - // Apply backdate safely - if cert.ValidAfter > backdate { - cert.ValidAfter -= backdate - } - - return nil -} - -// sshCertOptionsValidator validates the user SSHOptions with the ones -// usually present in the token. -type sshCertOptionsValidator SignSSHOptions - -// Valid implements SSHCertOptionsValidator and returns nil if both -// SSHOptions match. -func (v sshCertOptionsValidator) Valid(got SignSSHOptions) error { - want := SignSSHOptions(v) - return want.match(got) -} - -// sshCertOptionsRequireValidator defines which elements in the SignSSHOptions are required. -type sshCertOptionsRequireValidator struct { - CertType bool - KeyID bool - Principals bool -} - -func (v *sshCertOptionsRequireValidator) Valid(got SignSSHOptions) error { - switch { - case v.CertType && got.CertType == "": - return errors.New("ssh certificate certType cannot be empty") - case v.KeyID && got.KeyID == "": - return errors.New("ssh certificate keyID cannot be empty") - case v.Principals && len(got.Principals) == 0: - return errors.New("ssh certificate principals cannot be empty") - default: - return nil - } -} - -type sshCertValidityValidator struct { - *Claimer -} - -func (v *sshCertValidityValidator) Valid(cert *ssh.Certificate, opts SignSSHOptions) error { - switch { - case cert.ValidAfter == 0: - return errors.New("ssh certificate validAfter cannot be 0") - case cert.ValidBefore < uint64(now().Unix()): - return errors.New("ssh certificate validBefore cannot be in the past") - case cert.ValidBefore < cert.ValidAfter: - return errors.New("ssh certificate validBefore cannot be before validAfter") - } - - var min, max time.Duration - switch cert.CertType { - case ssh.UserCert: - min = v.MinUserSSHCertDuration() - max = v.MaxUserSSHCertDuration() - case ssh.HostCert: - min = v.MinHostSSHCertDuration() - max = v.MaxHostSSHCertDuration() - case 0: - return errors.New("ssh certificate type has not been set") - default: - return errors.Errorf("unknown ssh certificate type %d", cert.CertType) - } - - // To not take into account the backdate, time.Now() will be used to - // calculate the duration if ValidAfter is in the past. - dur := time.Duration(cert.ValidBefore-cert.ValidAfter) * time.Second - - switch { - case dur < min: - return errors.Errorf("requested duration of %s is less than minimum "+ - "accepted duration for selected provisioner of %s", dur, min) - case dur > max+opts.Backdate: - return errors.Errorf("requested duration of %s is greater than maximum "+ - "accepted duration for selected provisioner of %s", dur, max+opts.Backdate) - default: - return nil - } -} - -// sshCertDefaultValidator implements a simple validator for all the -// fields in the SSH certificate. -type sshCertDefaultValidator struct{} - -// Valid returns an error if the given certificate does not contain the -// necessary fields. We skip ValidPrincipals and Extensions as with custom -// templates you can set them empty. -func (v *sshCertDefaultValidator) Valid(cert *ssh.Certificate, o SignSSHOptions) error { - switch { - case len(cert.Nonce) == 0: - return errors.New("ssh certificate nonce cannot be empty") - case cert.Key == nil: - return errors.New("ssh certificate key cannot be nil") - case cert.Serial == 0: - return errors.New("ssh certificate serial cannot be 0") - case cert.CertType != ssh.UserCert && cert.CertType != ssh.HostCert: - return errors.Errorf("ssh certificate has an unknown type: %d", cert.CertType) - case cert.KeyId == "": - return errors.New("ssh certificate key id cannot be empty") - case cert.ValidAfter == 0: - return errors.New("ssh certificate validAfter cannot be 0") - case cert.ValidBefore < uint64(now().Unix()): - return errors.New("ssh certificate validBefore cannot be in the past") - case cert.ValidBefore < cert.ValidAfter: - return errors.New("ssh certificate validBefore cannot be before validAfter") - case cert.SignatureKey == nil: - return errors.New("ssh certificate signature key cannot be nil") - case cert.Signature == nil: - return errors.New("ssh certificate signature cannot be nil") - default: - return nil - } -} - -// sshDefaultPublicKeyValidator implements a validator for the certificate key. -type sshDefaultPublicKeyValidator struct{} - -// Valid checks that certificate request common name matches the one configured. -func (v sshDefaultPublicKeyValidator) Valid(cert *ssh.Certificate, o SignSSHOptions) error { - if cert.Key == nil { - return errors.New("ssh certificate key cannot be nil") - } - switch cert.Key.Type() { - case ssh.KeyAlgoRSA: - _, in, ok := sshParseString(cert.Key.Marshal()) - if !ok { - return errors.New("ssh certificate key is invalid") - } - key, err := sshParseRSAPublicKey(in) - if err != nil { - return err - } - if key.Size() < keyutil.MinRSAKeyBytes { - return errors.Errorf("ssh certificate key must be at least %d bits (%d bytes)", - 8*keyutil.MinRSAKeyBytes, keyutil.MinRSAKeyBytes) - } - return nil - case ssh.KeyAlgoDSA: - return errors.New("ssh certificate key algorithm (DSA) is not supported") - default: - return nil - } -} - -// sshCertTypeUInt32 -func sshCertTypeUInt32(ct string) uint32 { - switch ct { - case SSHUserCert: - return ssh.UserCert - case SSHHostCert: - return ssh.HostCert - default: - return 0 - } -} - -// containsAllMembers reports whether all members of subgroup are within group. -func containsAllMembers(group, subgroup []string) bool { - lg, lsg := len(group), len(subgroup) - if lsg > lg || (lg > 0 && lsg == 0) { - return false - } - visit := make(map[string]struct{}, lg) - for i := 0; i < lg; i++ { - visit[group[i]] = struct{}{} - } - for i := 0; i < lsg; i++ { - if _, ok := visit[subgroup[i]]; !ok { - return false - } - } - return true -} - -func sshParseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -func sshParseRSAPublicKey(in []byte) (*rsa.PublicKey, error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := ssh.Unmarshal(in, &w); err != nil { - return nil, errors.Wrap(err, "error unmarshalling public key") - } - if w.E.BitLen() > 24 { - return nil, errors.New("invalid public key: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, errors.New("invalid public key: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return &key, nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/ssh_options.go b/vendor/github.com/smallstep/certificates/authority/provisioner/ssh_options.go deleted file mode 100644 index 7ee236d1..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/ssh_options.go +++ /dev/null @@ -1,108 +0,0 @@ -package provisioner - -import ( - "encoding/json" - "strings" - - "github.com/pkg/errors" - "go.step.sm/crypto/sshutil" -) - -// SSHCertificateOptions is an interface that returns a list of options passed when -// creating a new certificate. -type SSHCertificateOptions interface { - Options(SignSSHOptions) []sshutil.Option -} - -type sshCertificateOptionsFunc func(SignSSHOptions) []sshutil.Option - -func (fn sshCertificateOptionsFunc) Options(so SignSSHOptions) []sshutil.Option { - return fn(so) -} - -// SSHOptions are a collection of custom options that can be added to each -// provisioner. -type SSHOptions struct { - // Template contains an SSH certificate template. It can be a JSON template - // escaped in a string or it can be also encoded in base64. - Template string `json:"template,omitempty"` - - // TemplateFile points to a file containing a SSH certificate template. - TemplateFile string `json:"templateFile,omitempty"` - - // TemplateData is a JSON object with variables that can be used in custom - // templates. - TemplateData json.RawMessage `json:"templateData,omitempty"` -} - -// HasTemplate returns true if a template is defined in the provisioner options. -func (o *SSHOptions) HasTemplate() bool { - return o != nil && (o.Template != "" || o.TemplateFile != "") -} - -// TemplateSSHOptions generates a SSHCertificateOptions with the template and -// data defined in the ProvisionerOptions, the provisioner generated data, and -// the user data provided in the request. If no template has been provided, -// x509util.DefaultLeafTemplate will be used. -func TemplateSSHOptions(o *Options, data sshutil.TemplateData) (SSHCertificateOptions, error) { - return CustomSSHTemplateOptions(o, data, sshutil.DefaultTemplate) -} - -// CustomSSHTemplateOptions generates a CertificateOptions with the template, data -// defined in the ProvisionerOptions, the provisioner generated data and the -// user data provided in the request. If no template has been provided in the -// ProvisionerOptions, the given template will be used. -func CustomSSHTemplateOptions(o *Options, data sshutil.TemplateData, defaultTemplate string) (SSHCertificateOptions, error) { - opts := o.GetSSHOptions() - if data == nil { - data = sshutil.NewTemplateData() - } - - if opts != nil { - // Add template data if any. - if len(opts.TemplateData) > 0 && string(opts.TemplateData) != "null" { - if err := json.Unmarshal(opts.TemplateData, &data); err != nil { - return nil, errors.Wrap(err, "error unmarshaling template data") - } - } - } - - return sshCertificateOptionsFunc(func(so SignSSHOptions) []sshutil.Option { - // We're not provided user data without custom templates. - if !opts.HasTemplate() { - return []sshutil.Option{ - sshutil.WithTemplate(defaultTemplate, data), - } - } - - // Add user provided data. - if len(so.TemplateData) > 0 { - userObject := make(map[string]interface{}) - if err := json.Unmarshal(so.TemplateData, &userObject); err != nil { - data.SetUserData(map[string]interface{}{}) - } else { - data.SetUserData(userObject) - } - } - - // Load a template from a file if Template is not defined. - if opts.Template == "" && opts.TemplateFile != "" { - return []sshutil.Option{ - sshutil.WithTemplateFile(opts.TemplateFile, data), - } - } - - // Load a template from the Template fields - // 1. As a JSON in a string. - template := strings.TrimSpace(opts.Template) - if strings.HasPrefix(template, "{") { - return []sshutil.Option{ - sshutil.WithTemplate(template, data), - } - } - // 2. As a base64 encoded JSON. - return []sshutil.Option{ - sshutil.WithTemplateBase64(template, data), - } - }), nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/sshpop.go b/vendor/github.com/smallstep/certificates/authority/provisioner/sshpop.go deleted file mode 100644 index 8bc76edf..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/sshpop.go +++ /dev/null @@ -1,283 +0,0 @@ -package provisioner - -import ( - "context" - "encoding/base64" - "net/http" - "strconv" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/db" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "golang.org/x/crypto/ssh" -) - -// sshPOPPayload extends jwt.Claims with step attributes. -type sshPOPPayload struct { - jose.Claims - SANs []string `json:"sans,omitempty"` - Step *stepPayload `json:"step,omitempty"` - sshCert *ssh.Certificate -} - -// SSHPOP is the default provisioner, an entity that can sign tokens necessary for -// signature requests. -type SSHPOP struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - Claims *Claims `json:"claims,omitempty"` - db db.AuthDB - claimer *Claimer - audiences Audiences - sshPubKeys *SSHKeys -} - -// GetID returns the provisioner unique identifier. The name and credential id -// should uniquely identify any SSH-POP provisioner. -func (p *SSHPOP) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *SSHPOP) GetIDForToken() string { - return "sshpop/" + p.Name -} - -// GetTokenID returns the identifier of the token. -func (p *SSHPOP) GetTokenID(ott string) (string, error) { - // Validate payload - token, err := jose.ParseSigned(ott) - if err != nil { - return "", errors.Wrap(err, "error parsing token") - } - - // Get claims w/out verification. We need to look up the provisioner - // key in order to verify the claims and we need the issuer from the claims - // before we can look up the provisioner. - var claims jose.Claims - if err = token.UnsafeClaimsWithoutVerification(&claims); err != nil { - return "", errors.Wrap(err, "error verifying claims") - } - return claims.ID, nil -} - -// GetName returns the name of the provisioner. -func (p *SSHPOP) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *SSHPOP) GetType() Type { - return TypeSSHPOP -} - -// GetEncryptedKey returns the base provisioner encrypted key if it's defined. -func (p *SSHPOP) GetEncryptedKey() (string, string, bool) { - return "", "", false -} - -// Init initializes and validates the fields of a SSHPOP type. -func (p *SSHPOP) Init(config Config) error { - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - case config.SSHKeys == nil: - return errors.New("provisioner public SSH validation keys cannot be empty") - } - - // Update claims with global ones - var err error - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - - p.audiences = config.Audiences.WithFragment(p.GetIDForToken()) - p.db = config.DB - p.sshPubKeys = config.SSHKeys - return nil -} - -// authorizeToken performs common jwt authorization actions and returns the -// claims for case specific downstream parsing. -// e.g. a Sign request will auth/validate different fields than a Revoke request. -func (p *SSHPOP) authorizeToken(token string, audiences []string) (*sshPOPPayload, error) { - sshCert, jwt, err := ExtractSSHPOPCert(token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, - "sshpop.authorizeToken; error extracting sshpop header from token") - } - - // Check for revocation. - if isRevoked, err := p.db.IsSSHRevoked(strconv.FormatUint(sshCert.Serial, 10)); err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, - "sshpop.authorizeToken; error checking checking sshpop cert revocation") - } else if isRevoked { - return nil, errs.Unauthorized("sshpop.authorizeToken; sshpop certificate is revoked") - } - - // Check validity period of the certificate. - n := time.Now() - if sshCert.ValidAfter != 0 && time.Unix(int64(sshCert.ValidAfter), 0).After(n) { - return nil, errs.Unauthorized("sshpop.authorizeToken; sshpop certificate validAfter is in the future") - } - if sshCert.ValidBefore != 0 && time.Unix(int64(sshCert.ValidBefore), 0).Before(n) { - return nil, errs.Unauthorized("sshpop.authorizeToken; sshpop certificate validBefore is in the past") - } - sshCryptoPubKey, ok := sshCert.Key.(ssh.CryptoPublicKey) - if !ok { - return nil, errs.InternalServer("sshpop.authorizeToken; sshpop public key could not be cast to ssh CryptoPublicKey") - } - pubKey := sshCryptoPubKey.CryptoPublicKey() - - var ( - found bool - data = bytesForSigning(sshCert) - keys []ssh.PublicKey - ) - if sshCert.CertType == ssh.UserCert { - keys = p.sshPubKeys.UserKeys - } else { - keys = p.sshPubKeys.HostKeys - } - for _, k := range keys { - if err = (&ssh.Certificate{Key: k}).Verify(data, sshCert.Signature); err == nil { - found = true - break - } - } - if !found { - return nil, errs.Unauthorized("sshpop.authorizeToken; could not find valid ca signer to verify sshpop certificate") - } - - // Using the ssh certificates key to validate the claims accomplishes two - // things: - // 1. Asserts that the private key used to sign the token corresponds - // to the public certificate in the `sshpop` header of the token. - // 2. Asserts that the claims are valid - have not been tampered with. - var claims sshPOPPayload - if err = jwt.Claims(pubKey, &claims); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "sshpop.authorizeToken; error parsing sshpop token claims") - } - - // According to "rfc7519 JSON Web Token" acceptable skew should be no - // more than a few minutes. - if err = claims.ValidateWithLeeway(jose.Expected{ - Issuer: p.Name, - Time: time.Now().UTC(), - }, time.Minute); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "sshpop.authorizeToken; invalid sshpop token") - } - - // validate audiences with the defaults - if !matchesAudience(claims.Audience, audiences) { - return nil, errs.Unauthorized("sshpop.authorizeToken; sshpop token has invalid audience "+ - "claim (aud): expected %s, but got %s", audiences, claims.Audience) - } - - if claims.Subject == "" { - return nil, errs.Unauthorized("sshpop.authorizeToken; sshpop token subject cannot be empty") - } - - claims.sshCert = sshCert - return &claims, nil -} - -// AuthorizeSSHRevoke validates the authorization token and extracts/validates -// the SSH certificate from the ssh-pop header. -func (p *SSHPOP) AuthorizeSSHRevoke(ctx context.Context, token string) error { - claims, err := p.authorizeToken(token, p.audiences.SSHRevoke) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "sshpop.AuthorizeSSHRevoke") - } - if claims.Subject != strconv.FormatUint(claims.sshCert.Serial, 10) { - return errs.BadRequest("sshpop.AuthorizeSSHRevoke; sshpop token subject " + - "must be equivalent to sshpop certificate serial number") - } - return nil -} - -// AuthorizeSSHRenew validates the authorization token and extracts/validates -// the SSH certificate from the ssh-pop header. -func (p *SSHPOP) AuthorizeSSHRenew(ctx context.Context, token string) (*ssh.Certificate, error) { - claims, err := p.authorizeToken(token, p.audiences.SSHRenew) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "sshpop.AuthorizeSSHRenew") - } - if claims.sshCert.CertType != ssh.HostCert { - return nil, errs.BadRequest("sshpop.AuthorizeSSHRenew; sshpop certificate must be a host ssh certificate") - } - - return claims.sshCert, nil - -} - -// AuthorizeSSHRekey validates the authorization token and extracts/validates -// the SSH certificate from the ssh-pop header. -func (p *SSHPOP) AuthorizeSSHRekey(ctx context.Context, token string) (*ssh.Certificate, []SignOption, error) { - claims, err := p.authorizeToken(token, p.audiences.SSHRekey) - if err != nil { - return nil, nil, errs.Wrap(http.StatusInternalServerError, err, "sshpop.AuthorizeSSHRekey") - } - if claims.sshCert.CertType != ssh.HostCert { - return nil, nil, errs.BadRequest("sshpop.AuthorizeSSHRekey; sshpop certificate must be a host ssh certificate") - } - return claims.sshCert, []SignOption{ - // Validate public key - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{p.claimer}, - // Require and validate all the default fields in the SSH certificate. - &sshCertDefaultValidator{}, - }, nil - -} - -// ExtractSSHPOPCert parses a JWT and extracts and loads the SSH Certificate -// in the sshpop header. If the header is missing, an error is returned. -func ExtractSSHPOPCert(token string) (*ssh.Certificate, *jose.JSONWebToken, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, nil, errors.Wrapf(err, "extractSSHPOPCert; error parsing token") - } - - encodedSSHCert, ok := jwt.Headers[0].ExtraHeaders["sshpop"] - if !ok { - return nil, nil, errors.New("extractSSHPOPCert; token missing sshpop header") - } - encodedSSHCertStr, ok := encodedSSHCert.(string) - if !ok { - return nil, nil, errors.Errorf("extractSSHPOPCert; error unexpected type for sshpop header: "+ - "want 'string', but got '%T'", encodedSSHCert) - } - sshCertBytes, err := base64.StdEncoding.DecodeString(encodedSSHCertStr) - if err != nil { - return nil, nil, errors.Wrap(err, "extractSSHPOPCert; error base64 decoding sshpop header") - } - sshPub, err := ssh.ParsePublicKey(sshCertBytes) - if err != nil { - return nil, nil, errors.Wrap(err, "extractSSHPOPCert; error parsing ssh public key") - } - sshCert, ok := sshPub.(*ssh.Certificate) - if !ok { - return nil, nil, errors.New("extractSSHPOPCert; error converting ssh public key to ssh certificate") - } - return sshCert, jwt, nil -} - -func bytesForSigning(cert *ssh.Certificate) []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/timeduration.go b/vendor/github.com/smallstep/certificates/authority/provisioner/timeduration.go deleted file mode 100644 index 7d197217..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/timeduration.go +++ /dev/null @@ -1,146 +0,0 @@ -package provisioner - -import ( - "encoding/json" - "time" - - "github.com/pkg/errors" -) - -var now = func() time.Time { - return time.Now().UTC() -} - -// TimeDuration is a type that represents a time but the JSON unmarshaling can -// use a time using the RFC 3339 format or a time.Duration string. If a duration -// is used, the time will be set on the first call to TimeDuration.Time. -type TimeDuration struct { - t time.Time - d time.Duration -} - -// NewTimeDuration returns a TimeDuration with the defined time. -func NewTimeDuration(t time.Time) TimeDuration { - return TimeDuration{t: t} -} - -// ParseTimeDuration returns a new TimeDuration parsing the RFC 3339 time or -// time.Duration string. -func ParseTimeDuration(s string) (TimeDuration, error) { - if s == "" { - return TimeDuration{}, nil - } - - // Try to use the unquoted RFC 3339 format - var t time.Time - if err := t.UnmarshalText([]byte(s)); err == nil { - return TimeDuration{t: t.UTC()}, nil - } - - // Try to use the time.Duration string format - if d, err := time.ParseDuration(s); err == nil { - return TimeDuration{d: d}, nil - } - - return TimeDuration{}, errors.Errorf("failed to parse %s", s) -} - -// SetDuration initializes the TimeDuration with the given duration string. If -// the time was set it will re-set to zero. -func (t *TimeDuration) SetDuration(d time.Duration) { - t.t, t.d = time.Time{}, d -} - -// SetTime initializes the TimeDuration with the given time. If the duration is -// set it will be re-set to zero. -func (t *TimeDuration) SetTime(tt time.Time) { - t.t, t.d = tt, 0 -} - -// IsZero returns true the TimeDuration represents the zero value, false -// otherwise. -func (t *TimeDuration) IsZero() bool { - return t.t.IsZero() && t.d == 0 -} - -// Equal returns if t and other are equal. -func (t *TimeDuration) Equal(other *TimeDuration) bool { - return t.t.Equal(other.t) && t.d == other.d -} - -// MarshalJSON implements the json.Marshaler interface. If the time is set it -// will return the time in RFC 3339 format if not it will return the duration -// string. -func (t TimeDuration) MarshalJSON() ([]byte, error) { - switch { - case t.t.IsZero(): - if t.d == 0 { - return []byte(`""`), nil - } - return json.Marshal(t.d.String()) - default: - return t.t.MarshalJSON() - } -} - -// UnmarshalJSON implements the json.Unmarshaler interface. The time is expected -// to be a quoted string in RFC 3339 format or a quoted time.Duration string. -func (t *TimeDuration) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return errors.Wrapf(err, "error unmarshaling %s", data) - } - - // Empty TimeDuration - if s == "" { - *t = TimeDuration{} - return nil - } - - // Try to use the unquoted RFC 3339 format - var tt time.Time - if err := tt.UnmarshalText([]byte(s)); err == nil { - *t = TimeDuration{t: tt} - return nil - } - - // Try to use the time.Duration string format - if d, err := time.ParseDuration(s); err == nil { - *t = TimeDuration{d: d} - return nil - } - - return errors.Errorf("failed to parse %s", data) -} - -// Time calculates the time if needed and returns it. -func (t *TimeDuration) Time() time.Time { - return t.RelativeTime(now()) -} - -// Unix calculates the time if needed it and returns the Unix time in seconds. -func (t *TimeDuration) Unix() int64 { - return t.RelativeTime(now()).Unix() -} - -// RelativeTime returns the embedded time.Time or the base time plus the -// duration if this is not zero. -func (t *TimeDuration) RelativeTime(base time.Time) time.Time { - switch { - case t == nil: - return time.Time{} - case t.t.IsZero(): - if t.d == 0 { - return time.Time{} - } - t.t = base.Add(t.d) - return t.t.UTC() - default: - return t.t.UTC() - } -} - -// String implements the fmt.Stringer interface. -func (t *TimeDuration) String() string { - return t.Time().String() -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioner/x5c.go b/vendor/github.com/smallstep/certificates/authority/provisioner/x5c.go deleted file mode 100644 index a05f39c7..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioner/x5c.go +++ /dev/null @@ -1,315 +0,0 @@ -package provisioner - -import ( - "context" - "crypto/x509" - "encoding/pem" - "net/http" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/sshutil" - "go.step.sm/crypto/x509util" -) - -// x5cPayload extends jwt.Claims with step attributes. -type x5cPayload struct { - jose.Claims - SANs []string `json:"sans,omitempty"` - Step *stepPayload `json:"step,omitempty"` - chains [][]*x509.Certificate -} - -// X5C is the default provisioner, an entity that can sign tokens necessary for -// signature requests. -type X5C struct { - *base - ID string `json:"-"` - Type string `json:"type"` - Name string `json:"name"` - Roots []byte `json:"roots"` - Claims *Claims `json:"claims,omitempty"` - Options *Options `json:"options,omitempty"` - claimer *Claimer - audiences Audiences - rootPool *x509.CertPool -} - -// GetID returns the provisioner unique identifier. The name and credential id -// should uniquely identify any X5C provisioner. -func (p *X5C) GetID() string { - if p.ID != "" { - return p.ID - } - return p.GetIDForToken() -} - -// GetIDForToken returns an identifier that will be used to load the provisioner -// from a token. -func (p *X5C) GetIDForToken() string { - return "x5c/" + p.Name -} - -// GetTokenID returns the identifier of the token. -func (p *X5C) GetTokenID(ott string) (string, error) { - // Validate payload - token, err := jose.ParseSigned(ott) - if err != nil { - return "", errors.Wrap(err, "error parsing token") - } - - // Get claims w/out verification. We need to look up the provisioner - // key in order to verify the claims and we need the issuer from the claims - // before we can look up the provisioner. - var claims jose.Claims - if err = token.UnsafeClaimsWithoutVerification(&claims); err != nil { - return "", errors.Wrap(err, "error verifying claims") - } - return claims.ID, nil -} - -// GetName returns the name of the provisioner. -func (p *X5C) GetName() string { - return p.Name -} - -// GetType returns the type of provisioner. -func (p *X5C) GetType() Type { - return TypeX5C -} - -// GetEncryptedKey returns the base provisioner encrypted key if it's defined. -func (p *X5C) GetEncryptedKey() (string, string, bool) { - return "", "", false -} - -// Init initializes and validates the fields of a X5C type. -func (p *X5C) Init(config Config) error { - switch { - case p.Type == "": - return errors.New("provisioner type cannot be empty") - case p.Name == "": - return errors.New("provisioner name cannot be empty") - case len(p.Roots) == 0: - return errors.New("provisioner root(s) cannot be empty") - } - - p.rootPool = x509.NewCertPool() - - var ( - block *pem.Block - rest = p.Roots - ) - for rest != nil { - block, rest = pem.Decode(rest) - if block == nil { - break - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return errors.Wrap(err, "error parsing x509 certificate from PEM block") - } - p.rootPool.AddCert(cert) - } - - // Verify that at least one root was found. - if len(p.rootPool.Subjects()) == 0 { - return errors.Errorf("no x509 certificates found in roots attribute for provisioner '%s'", p.GetName()) - } - - // Update claims with global ones - var err error - if p.claimer, err = NewClaimer(p.Claims, config.Claims); err != nil { - return err - } - - p.audiences = config.Audiences.WithFragment(p.GetIDForToken()) - return nil -} - -// authorizeToken performs common jwt authorization actions and returns the -// claims for case specific downstream parsing. -// e.g. a Sign request will auth/validate different fields than a Revoke request. -func (p *X5C) authorizeToken(token string, audiences []string) (*x5cPayload, error) { - jwt, err := jose.ParseSigned(token) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "x5c.authorizeToken; error parsing x5c token") - } - - verifiedChains, err := jwt.Headers[0].Certificates(x509.VerifyOptions{ - Roots: p.rootPool, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - }) - if err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, - "x5c.authorizeToken; error verifying x5c certificate chain in token") - } - leaf := verifiedChains[0][0] - - if leaf.KeyUsage&x509.KeyUsageDigitalSignature == 0 { - return nil, errs.Unauthorized("x5c.authorizeToken; certificate used to sign x5c token cannot be used for digital signature") - } - - // Using the leaf certificates key to validate the claims accomplishes two - // things: - // 1. Asserts that the private key used to sign the token corresponds - // to the public certificate in the `x5c` header of the token. - // 2. Asserts that the claims are valid - have not been tampered with. - var claims x5cPayload - if err = jwt.Claims(leaf.PublicKey, &claims); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "x5c.authorizeToken; error parsing x5c claims") - } - - // According to "rfc7519 JSON Web Token" acceptable skew should be no - // more than a few minutes. - if err = claims.ValidateWithLeeway(jose.Expected{ - Issuer: p.Name, - Time: time.Now().UTC(), - }, time.Minute); err != nil { - return nil, errs.Wrapf(http.StatusUnauthorized, err, "x5c.authorizeToken; invalid x5c claims") - } - - // validate audiences with the defaults - if !matchesAudience(claims.Audience, audiences) { - return nil, errs.Unauthorized("x5c.authorizeToken; x5c token has invalid audience "+ - "claim (aud); expected %s, but got %s", audiences, claims.Audience) - } - - if claims.Subject == "" { - return nil, errs.Unauthorized("x5c.authorizeToken; x5c token subject cannot be empty") - } - - // Save the verified chains on the x5c payload object. - claims.chains = verifiedChains - return &claims, nil -} - -// AuthorizeRevoke returns an error if the provisioner does not have rights to -// revoke the certificate with serial number in the `sub` property. -func (p *X5C) AuthorizeRevoke(ctx context.Context, token string) error { - _, err := p.authorizeToken(token, p.audiences.Revoke) - return errs.Wrap(http.StatusInternalServerError, err, "x5c.AuthorizeRevoke") -} - -// AuthorizeSign validates the given token. -func (p *X5C) AuthorizeSign(ctx context.Context, token string) ([]SignOption, error) { - claims, err := p.authorizeToken(token, p.audiences.Sign) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "x5c.AuthorizeSign") - } - - // NOTE: This is for backwards compatibility with older versions of cli - // and certificates. Older versions added the token subject as the only SAN - // in a CSR by default. - if len(claims.SANs) == 0 { - claims.SANs = []string{claims.Subject} - } - - // Certificate templates - data := x509util.CreateTemplateData(claims.Subject, claims.SANs) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := TemplateOptions(p.Options, data) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "jwk.AuthorizeSign") - } - - return []SignOption{ - templateOptions, - // modifiers / withOptions - newProvisionerExtensionOption(TypeX5C, p.Name, ""), - profileLimitDuration{p.claimer.DefaultTLSCertDuration(), - claims.chains[0][0].NotBefore, claims.chains[0][0].NotAfter}, - // validators - commonNameValidator(claims.Subject), - defaultSANsValidator(claims.SANs), - defaultPublicKeyValidator{}, - newValidityValidator(p.claimer.MinTLSCertDuration(), p.claimer.MaxTLSCertDuration()), - }, nil -} - -// AuthorizeRenew returns an error if the renewal is disabled. -func (p *X5C) AuthorizeRenew(ctx context.Context, cert *x509.Certificate) error { - if p.claimer.IsDisableRenewal() { - return errs.Unauthorized("x5c.AuthorizeRenew; renew is disabled for x5c provisioner '%s'", p.GetName()) - } - return nil -} - -// AuthorizeSSHSign returns the list of SignOption for a SignSSH request. -func (p *X5C) AuthorizeSSHSign(ctx context.Context, token string) ([]SignOption, error) { - if !p.claimer.IsSSHCAEnabled() { - return nil, errs.Unauthorized("x5c.AuthorizeSSHSign; sshCA is disabled for x5c provisioner '%s'", p.GetName()) - } - - claims, err := p.authorizeToken(token, p.audiences.SSHSign) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "x5c.AuthorizeSSHSign") - } - - if claims.Step == nil || claims.Step.SSH == nil { - return nil, errs.Unauthorized("x5c.AuthorizeSSHSign; x5c token must be an SSH provisioning token") - } - - opts := claims.Step.SSH - signOptions := []SignOption{ - // validates user's SSHOptions with the ones in the token - sshCertOptionsValidator(*opts), - // validate users's KeyID is the token subject. - sshCertOptionsValidator(SignSSHOptions{KeyID: claims.Subject}), - } - - // Default template attributes. - certType := sshutil.UserCert - keyID := claims.Subject - principals := []string{claims.Subject} - - // Use options in the token. - if opts.CertType != "" { - if certType, err = sshutil.CertTypeFromString(opts.CertType); err != nil { - return nil, errs.Wrap(http.StatusBadRequest, err, "x5c.AuthorizeSSHSign") - } - } - if opts.KeyID != "" { - keyID = opts.KeyID - } - if len(opts.Principals) > 0 { - principals = opts.Principals - } - - // Certificate templates. - data := sshutil.CreateTemplateData(certType, keyID, principals) - if v, err := unsafeParseSigned(token); err == nil { - data.SetToken(v) - } - - templateOptions, err := TemplateSSHOptions(p.Options, data) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "x5c.AuthorizeSSHSign") - } - signOptions = append(signOptions, templateOptions) - - // Add modifiers from custom claims - t := now() - if !opts.ValidAfter.IsZero() { - signOptions = append(signOptions, sshCertValidAfterModifier(opts.ValidAfter.RelativeTime(t).Unix())) - } - if !opts.ValidBefore.IsZero() { - signOptions = append(signOptions, sshCertValidBeforeModifier(opts.ValidBefore.RelativeTime(t).Unix())) - } - - return append(signOptions, - // Checks the validity bounds, and set the validity if has not been set. - &sshLimitDuration{p.claimer, claims.chains[0][0].NotAfter}, - // Validate public key. - &sshDefaultPublicKeyValidator{}, - // Validate the validity period. - &sshCertValidityValidator{p.claimer}, - // Require all the fields in the SSH certificate - &sshCertDefaultValidator{}, - ), nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/provisioners.go b/vendor/github.com/smallstep/certificates/authority/provisioners.go deleted file mode 100644 index d2581e76..00000000 --- a/vendor/github.com/smallstep/certificates/authority/provisioners.go +++ /dev/null @@ -1,601 +0,0 @@ -package authority - -import ( - "context" - "crypto/x509" - "encoding/json" - "fmt" - - "github.com/smallstep/certificates/authority/admin" - "github.com/smallstep/certificates/authority/config" - "github.com/smallstep/certificates/authority/provisioner" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/linkedca" - "gopkg.in/square/go-jose.v2/jwt" -) - -// GetEncryptedKey returns the JWE key corresponding to the given kid argument. -func (a *Authority) GetEncryptedKey(kid string) (string, error) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - key, ok := a.provisioners.LoadEncryptedKey(kid) - if !ok { - return "", errs.NotFound("encrypted key with kid %s was not found", kid) - } - return key, nil -} - -// GetProvisioners returns a map listing each provisioner and the JWK Key Set -// with their public keys. -func (a *Authority) GetProvisioners(cursor string, limit int) (provisioner.List, string, error) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - provisioners, nextCursor := a.provisioners.Find(cursor, limit) - return provisioners, nextCursor, nil -} - -// LoadProvisionerByCertificate returns an interface to the provisioner that -// provisioned the certificate. -func (a *Authority) LoadProvisionerByCertificate(crt *x509.Certificate) (provisioner.Interface, error) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - p, ok := a.provisioners.LoadByCertificate(crt) - if !ok { - return nil, admin.NewError(admin.ErrorNotFoundType, "unable to load provisioner from certificate") - } - return p, nil -} - -// LoadProvisionerByToken returns an interface to the provisioner that -// provisioned the token. -func (a *Authority) LoadProvisionerByToken(token *jwt.JSONWebToken, claims *jwt.Claims) (provisioner.Interface, error) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - p, ok := a.provisioners.LoadByToken(token, claims) - if !ok { - return nil, admin.NewError(admin.ErrorNotFoundType, "unable to load provisioner from token") - } - return p, nil -} - -// LoadProvisionerByID returns an interface to the provisioner with the given ID. -func (a *Authority) LoadProvisionerByID(id string) (provisioner.Interface, error) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - p, ok := a.provisioners.Load(id) - if !ok { - return nil, admin.NewError(admin.ErrorNotFoundType, "provisioner %s not found", id) - } - return p, nil -} - -// LoadProvisionerByName returns an interface to the provisioner with the given Name. -func (a *Authority) LoadProvisionerByName(name string) (provisioner.Interface, error) { - a.adminMutex.RLock() - defer a.adminMutex.RUnlock() - p, ok := a.provisioners.LoadByName(name) - if !ok { - return nil, admin.NewError(admin.ErrorNotFoundType, "provisioner %s not found", name) - } - return p, nil -} - -func (a *Authority) generateProvisionerConfig(ctx context.Context) (*provisioner.Config, error) { - // Merge global and configuration claims - claimer, err := provisioner.NewClaimer(a.config.AuthorityConfig.Claims, config.GlobalProvisionerClaims) - if err != nil { - return nil, err - } - // TODO: should we also be combining the ssh federated roots here? - // If we rotate ssh roots keys, sshpop provisioner will lose ability to - // validate old SSH certificates, unless they are added as federated certs. - sshKeys, err := a.GetSSHRoots(ctx) - if err != nil { - return nil, err - } - return &provisioner.Config{ - Claims: claimer.Claims(), - Audiences: a.config.GetAudiences(), - DB: a.db, - SSHKeys: &provisioner.SSHKeys{ - UserKeys: sshKeys.UserKeys, - HostKeys: sshKeys.HostKeys, - }, - GetIdentityFunc: a.getIdentityFunc, - }, nil - -} - -// StoreProvisioner stores an provisioner.Interface to the authority. -func (a *Authority) StoreProvisioner(ctx context.Context, prov *linkedca.Provisioner) error { - a.adminMutex.Lock() - defer a.adminMutex.Unlock() - - certProv, err := ProvisionerToCertificates(prov) - if err != nil { - return admin.WrapErrorISE(err, - "error converting to certificates provisioner from linkedca provisioner") - } - - if _, ok := a.provisioners.LoadByName(prov.GetName()); ok { - return admin.NewError(admin.ErrorBadRequestType, - "provisioner with name %s already exists", prov.GetName()) - } - if _, ok := a.provisioners.LoadByTokenID(certProv.GetIDForToken()); ok { - return admin.NewError(admin.ErrorBadRequestType, - "provisioner with token ID %s already exists", certProv.GetIDForToken()) - } - - // Store to database -- this will set the ID. - if err := a.adminDB.CreateProvisioner(ctx, prov); err != nil { - return admin.WrapErrorISE(err, "error creating admin") - } - - // We need a new conversion that has the newly set ID. - certProv, err = ProvisionerToCertificates(prov) - if err != nil { - return admin.WrapErrorISE(err, - "error converting to certificates provisioner from linkedca provisioner") - } - - provisionerConfig, err := a.generateProvisionerConfig(ctx) - if err != nil { - return admin.WrapErrorISE(err, "error generating provisioner config") - } - - if err := certProv.Init(*provisionerConfig); err != nil { - return admin.WrapErrorISE(err, "error initializing provisioner %s", prov.Name) - } - - if err := a.provisioners.Store(certProv); err != nil { - if err := a.reloadAdminResources(ctx); err != nil { - return admin.WrapErrorISE(err, "error reloading admin resources on failed provisioner store") - } - return admin.WrapErrorISE(err, "error storing provisioner in authority cache") - } - return nil -} - -// UpdateProvisioner stores an provisioner.Interface to the authority. -func (a *Authority) UpdateProvisioner(ctx context.Context, nu *linkedca.Provisioner) error { - a.adminMutex.Lock() - defer a.adminMutex.Unlock() - - certProv, err := ProvisionerToCertificates(nu) - if err != nil { - return admin.WrapErrorISE(err, - "error converting to certificates provisioner from linkedca provisioner") - } - - provisionerConfig, err := a.generateProvisionerConfig(ctx) - if err != nil { - return admin.WrapErrorISE(err, "error generating provisioner config") - } - - if err := certProv.Init(*provisionerConfig); err != nil { - return admin.WrapErrorISE(err, "error initializing provisioner %s", nu.Name) - } - - if err := a.provisioners.Update(certProv); err != nil { - return admin.WrapErrorISE(err, "error updating provisioner '%s' in authority cache", nu.Name) - } - if err := a.adminDB.UpdateProvisioner(ctx, nu); err != nil { - if err := a.reloadAdminResources(ctx); err != nil { - return admin.WrapErrorISE(err, "error reloading admin resources on failed provisioner update") - } - return admin.WrapErrorISE(err, "error updating provisioner '%s'", nu.Name) - } - return nil -} - -// RemoveProvisioner removes an provisioner.Interface from the authority. -func (a *Authority) RemoveProvisioner(ctx context.Context, id string) error { - a.adminMutex.Lock() - defer a.adminMutex.Unlock() - - p, ok := a.provisioners.Load(id) - if !ok { - return admin.NewError(admin.ErrorBadRequestType, - "provisioner %s not found", id) - } - - provName, provID := p.GetName(), p.GetID() - // Validate - // - Check that there will be SUPER_ADMINs that remain after we - // remove this provisioner. - if a.admins.SuperCount() == a.admins.SuperCountByProvisioner(provName) { - return admin.NewError(admin.ErrorBadRequestType, - "cannot remove provisioner %s because no super admins will remain", provName) - } - - // Delete all admins associated with the provisioner. - admins, ok := a.admins.LoadByProvisioner(provName) - if ok { - for _, adm := range admins { - if err := a.removeAdmin(ctx, adm.Id); err != nil { - return admin.WrapErrorISE(err, "error deleting admin %s, as part of provisioner %s deletion", adm.Subject, provName) - } - } - } - - // Remove provisioner from authority caches. - if err := a.provisioners.Remove(provID); err != nil { - return admin.WrapErrorISE(err, "error removing admin from authority cache") - } - // Remove provisioner from database. - if err := a.adminDB.DeleteProvisioner(ctx, provID); err != nil { - if err := a.reloadAdminResources(ctx); err != nil { - return admin.WrapErrorISE(err, "error reloading admin resources on failed provisioner remove") - } - return admin.WrapErrorISE(err, "error deleting provisioner %s", provName) - } - return nil -} - -func CreateFirstProvisioner(ctx context.Context, db admin.DB, password string) (*linkedca.Provisioner, error) { - jwk, jwe, err := jose.GenerateDefaultKeyPair([]byte(password)) - if err != nil { - return nil, admin.WrapErrorISE(err, "error generating JWK key pair") - } - - jwkPubBytes, err := jwk.MarshalJSON() - if err != nil { - return nil, admin.WrapErrorISE(err, "error marshaling JWK") - } - jwePrivStr, err := jwe.CompactSerialize() - if err != nil { - return nil, admin.WrapErrorISE(err, "error serializing JWE") - } - - p := &linkedca.Provisioner{ - Name: "Admin JWK", - Type: linkedca.Provisioner_JWK, - Details: &linkedca.ProvisionerDetails{ - Data: &linkedca.ProvisionerDetails_JWK{ - JWK: &linkedca.JWKProvisioner{ - PublicKey: jwkPubBytes, - EncryptedPrivateKey: []byte(jwePrivStr), - }, - }, - }, - Claims: &linkedca.Claims{ - X509: &linkedca.X509Claims{ - Enabled: true, - Durations: &linkedca.Durations{ - Default: "5m", - }, - }, - }, - } - if err := db.CreateProvisioner(ctx, p); err != nil { - return nil, admin.WrapErrorISE(err, "error creating provisioner") - } - return p, nil -} - -func ValidateClaims(c *linkedca.Claims) error { - if c == nil { - return nil - } - if c.X509 != nil { - if c.X509.Durations != nil { - if err := ValidateDurations(c.X509.Durations); err != nil { - return err - } - } - } - if c.Ssh != nil { - if c.Ssh.UserDurations != nil { - if err := ValidateDurations(c.Ssh.UserDurations); err != nil { - return err - } - } - if c.Ssh.HostDurations != nil { - if err := ValidateDurations(c.Ssh.HostDurations); err != nil { - return err - } - } - } - return nil -} - -func ValidateDurations(d *linkedca.Durations) error { - var ( - err error - min, max, def *provisioner.Duration - ) - - if d.Min != "" { - min, err = provisioner.NewDuration(d.Min) - if err != nil { - return admin.WrapError(admin.ErrorBadRequestType, err, "min duration '%s' is invalid", d.Min) - } - if min.Value() < 0 { - return admin.WrapError(admin.ErrorBadRequestType, err, "min duration '%s' cannot be less than 0", d.Min) - } - } - if d.Max != "" { - max, err = provisioner.NewDuration(d.Max) - if err != nil { - return admin.WrapError(admin.ErrorBadRequestType, err, "max duration '%s' is invalid", d.Max) - } - if max.Value() < 0 { - return admin.WrapError(admin.ErrorBadRequestType, err, "max duration '%s' cannot be less than 0", d.Max) - } - } - if d.Default != "" { - def, err = provisioner.NewDuration(d.Default) - if err != nil { - return admin.WrapError(admin.ErrorBadRequestType, err, "default duration '%s' is invalid", d.Default) - } - if def.Value() < 0 { - return admin.WrapError(admin.ErrorBadRequestType, err, "default duration '%s' cannot be less than 0", d.Default) - } - } - if d.Min != "" && d.Max != "" && min.Value() > max.Value() { - return admin.NewError(admin.ErrorBadRequestType, - "min duration '%s' cannot be greater than max duration '%s'", d.Min, d.Max) - } - if d.Min != "" && d.Default != "" && min.Value() > def.Value() { - return admin.NewError(admin.ErrorBadRequestType, - "min duration '%s' cannot be greater than default duration '%s'", d.Min, d.Default) - } - if d.Default != "" && d.Max != "" && min.Value() > def.Value() { - return admin.NewError(admin.ErrorBadRequestType, - "default duration '%s' cannot be greater than max duration '%s'", d.Default, d.Max) - } - return nil -} - -func provisionerListToCertificates(l []*linkedca.Provisioner) (provisioner.List, error) { - var nu provisioner.List - for _, p := range l { - certProv, err := ProvisionerToCertificates(p) - if err != nil { - return nil, err - } - nu = append(nu, certProv) - } - return nu, nil -} - -func optionsToCertificates(p *linkedca.Provisioner) *provisioner.Options { - ops := &provisioner.Options{ - X509: &provisioner.X509Options{}, - SSH: &provisioner.SSHOptions{}, - } - if p.X509Template != nil { - ops.X509.Template = string(p.X509Template.Template) - ops.X509.TemplateData = p.X509Template.Data - } - if p.SshTemplate != nil { - ops.SSH.Template = string(p.SshTemplate.Template) - ops.SSH.TemplateData = p.SshTemplate.Data - } - return ops -} - -func durationsToCertificates(d *linkedca.Durations) (min, max, def *provisioner.Duration, err error) { - if len(d.Min) > 0 { - min, err = provisioner.NewDuration(d.Min) - if err != nil { - return nil, nil, nil, admin.WrapErrorISE(err, "error parsing minimum duration '%s'", d.Min) - } - } - if len(d.Max) > 0 { - max, err = provisioner.NewDuration(d.Max) - if err != nil { - return nil, nil, nil, admin.WrapErrorISE(err, "error parsing maximum duration '%s'", d.Max) - } - } - if len(d.Default) > 0 { - def, err = provisioner.NewDuration(d.Default) - if err != nil { - return nil, nil, nil, admin.WrapErrorISE(err, "error parsing default duration '%s'", d.Default) - } - } - return -} - -// claimsToCertificates converts the linkedca provisioner claims type to the -// certifictes claims type. -func claimsToCertificates(c *linkedca.Claims) (*provisioner.Claims, error) { - if c == nil { - return nil, nil - } - - pc := &provisioner.Claims{ - DisableRenewal: &c.DisableRenewal, - } - - var err error - - if xc := c.X509; xc != nil { - if d := xc.Durations; d != nil { - pc.MinTLSDur, pc.MaxTLSDur, pc.DefaultTLSDur, err = durationsToCertificates(d) - if err != nil { - return nil, err - } - } - } - if sc := c.Ssh; sc != nil { - pc.EnableSSHCA = &sc.Enabled - if d := sc.UserDurations; d != nil { - pc.MinUserSSHDur, pc.MaxUserSSHDur, pc.DefaultUserSSHDur, err = durationsToCertificates(d) - if err != nil { - return nil, err - } - } - if d := sc.HostDurations; d != nil { - pc.MinHostSSHDur, pc.MaxHostSSHDur, pc.DefaultHostSSHDur, err = durationsToCertificates(d) - if err != nil { - return nil, err - } - } - } - - return pc, nil -} - -// ProvisionerToCertificates converts the linkedca provisioner type to the certificates provisioner -// interface. -func ProvisionerToCertificates(p *linkedca.Provisioner) (provisioner.Interface, error) { - claims, err := claimsToCertificates(p.Claims) - if err != nil { - return nil, err - } - - details := p.Details.GetData() - if details == nil { - return nil, fmt.Errorf("provisioner does not have any details") - } - - options := optionsToCertificates(p) - - switch d := details.(type) { - case *linkedca.ProvisionerDetails_JWK: - jwk := new(jose.JSONWebKey) - if err := json.Unmarshal(d.JWK.PublicKey, &jwk); err != nil { - return nil, err - } - return &provisioner.JWK{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - Key: jwk, - EncryptedKey: string(d.JWK.EncryptedPrivateKey), - Claims: claims, - Options: options, - }, nil - case *linkedca.ProvisionerDetails_X5C: - var roots []byte - for i, root := range d.X5C.GetRoots() { - if i > 0 { - roots = append(roots, '\n') - } - roots = append(roots, root...) - } - return &provisioner.X5C{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - Roots: roots, - Claims: claims, - Options: options, - }, nil - case *linkedca.ProvisionerDetails_K8SSA: - var publicKeys []byte - for i, k := range d.K8SSA.GetPublicKeys() { - if i > 0 { - publicKeys = append(publicKeys, '\n') - } - publicKeys = append(publicKeys, k...) - } - return &provisioner.K8sSA{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - PubKeys: publicKeys, - Claims: claims, - Options: options, - }, nil - case *linkedca.ProvisionerDetails_SSHPOP: - return &provisioner.SSHPOP{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - Claims: claims, - }, nil - case *linkedca.ProvisionerDetails_ACME: - cfg := d.ACME - return &provisioner.ACME{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - ForceCN: cfg.ForceCn, - Claims: claims, - Options: options, - }, nil - case *linkedca.ProvisionerDetails_OIDC: - cfg := d.OIDC - return &provisioner.OIDC{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - TenantID: cfg.TenantId, - ClientID: cfg.ClientId, - ClientSecret: cfg.ClientSecret, - ConfigurationEndpoint: cfg.ConfigurationEndpoint, - Admins: cfg.Admins, - Domains: cfg.Domains, - Groups: cfg.Groups, - ListenAddress: cfg.ListenAddress, - Claims: claims, - Options: options, - }, nil - case *linkedca.ProvisionerDetails_AWS: - cfg := d.AWS - instanceAge, err := parseInstanceAge(cfg.InstanceAge) - if err != nil { - return nil, err - } - return &provisioner.AWS{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - Accounts: cfg.Accounts, - DisableCustomSANs: cfg.DisableCustomSans, - DisableTrustOnFirstUse: cfg.DisableTrustOnFirstUse, - InstanceAge: instanceAge, - Claims: claims, - Options: options, - }, nil - case *linkedca.ProvisionerDetails_GCP: - cfg := d.GCP - instanceAge, err := parseInstanceAge(cfg.InstanceAge) - if err != nil { - return nil, err - } - return &provisioner.GCP{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - ServiceAccounts: cfg.ServiceAccounts, - ProjectIDs: cfg.ProjectIds, - DisableCustomSANs: cfg.DisableCustomSans, - DisableTrustOnFirstUse: cfg.DisableTrustOnFirstUse, - InstanceAge: instanceAge, - Claims: claims, - Options: options, - }, nil - case *linkedca.ProvisionerDetails_Azure: - cfg := d.Azure - return &provisioner.Azure{ - ID: p.Id, - Type: p.Type.String(), - Name: p.Name, - TenantID: cfg.TenantId, - ResourceGroups: cfg.ResourceGroups, - Audience: cfg.Audience, - DisableCustomSANs: cfg.DisableCustomSans, - DisableTrustOnFirstUse: cfg.DisableTrustOnFirstUse, - Claims: claims, - Options: options, - }, nil - default: - return nil, fmt.Errorf("provisioner %s not implemented", p.Type) - } -} - -func parseInstanceAge(age string) (provisioner.Duration, error) { - var instanceAge provisioner.Duration - if age != "" { - iap, err := provisioner.NewDuration(age) - if err != nil { - return instanceAge, err - } - instanceAge = *iap - } - return instanceAge, nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/root.go b/vendor/github.com/smallstep/certificates/authority/root.go deleted file mode 100644 index f391997f..00000000 --- a/vendor/github.com/smallstep/certificates/authority/root.go +++ /dev/null @@ -1,59 +0,0 @@ -package authority - -import ( - "crypto/x509" - - "github.com/smallstep/certificates/errs" -) - -// Root returns the certificate corresponding to the given SHA sum argument. -func (a *Authority) Root(sum string) (*x509.Certificate, error) { - val, ok := a.certificates.Load(sum) - if !ok { - return nil, errs.NotFound("certificate with fingerprint %s was not found", sum) - } - - crt, ok := val.(*x509.Certificate) - if !ok { - return nil, errs.InternalServer("stored value is not a *x509.Certificate") - } - return crt, nil -} - -// GetRootCertificate returns the server root certificate. -func (a *Authority) GetRootCertificate() *x509.Certificate { - return a.rootX509Certs[0] -} - -// GetRootCertificates returns the server root certificates. -// -// In the Authority interface we also have a similar method, GetRoots, at the -// moment the functionality of these two methods are almost identical, but this -// method is intended to be used internally by CA HTTP server to load the roots -// that will be set in the tls.Config while GetRoots will be used by the -// Authority interface and might have extra checks in the future. -func (a *Authority) GetRootCertificates() []*x509.Certificate { - return a.rootX509Certs -} - -// GetRoots returns all the root certificates for this CA. -// This method implements the Authority interface. -func (a *Authority) GetRoots() ([]*x509.Certificate, error) { - return a.rootX509Certs, nil -} - -// GetFederation returns all the root certificates in the federation. -// This method implements the Authority interface. -func (a *Authority) GetFederation() (federation []*x509.Certificate, err error) { - a.certificates.Range(func(k, v interface{}) bool { - crt, ok := v.(*x509.Certificate) - if !ok { - federation = nil - err = errs.InternalServer("stored value is not a *x509.Certificate") - return false - } - federation = append(federation, crt) - return true - }) - return -} diff --git a/vendor/github.com/smallstep/certificates/authority/ssh.go b/vendor/github.com/smallstep/certificates/authority/ssh.go deleted file mode 100644 index 335b6702..00000000 --- a/vendor/github.com/smallstep/certificates/authority/ssh.go +++ /dev/null @@ -1,517 +0,0 @@ -package authority - -import ( - "context" - "crypto/rand" - "crypto/x509" - "encoding/binary" - "net/http" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/config" - "github.com/smallstep/certificates/authority/provisioner" - "github.com/smallstep/certificates/db" - "github.com/smallstep/certificates/errs" - "github.com/smallstep/certificates/templates" - "go.step.sm/crypto/randutil" - "go.step.sm/crypto/sshutil" - "golang.org/x/crypto/ssh" -) - -const ( - // SSHAddUserPrincipal is the principal that will run the add user command. - // Defaults to "provisioner" but it can be changed in the configuration. - SSHAddUserPrincipal = "provisioner" - - // SSHAddUserCommand is the default command to run to add a new user. - // Defaults to "sudo useradd -m ; nc -q0 localhost 22" but it can be changed in the - // configuration. The string "" will be replace by the new - // principal to add. - SSHAddUserCommand = "sudo useradd -m ; nc -q0 localhost 22" -) - -// GetSSHRoots returns the SSH User and Host public keys. -func (a *Authority) GetSSHRoots(context.Context) (*config.SSHKeys, error) { - return &config.SSHKeys{ - HostKeys: a.sshCAHostCerts, - UserKeys: a.sshCAUserCerts, - }, nil -} - -// GetSSHFederation returns the public keys for federated SSH signers. -func (a *Authority) GetSSHFederation(context.Context) (*config.SSHKeys, error) { - return &config.SSHKeys{ - HostKeys: a.sshCAHostFederatedCerts, - UserKeys: a.sshCAUserFederatedCerts, - }, nil -} - -// GetSSHConfig returns rendered templates for clients (user) or servers (host). -func (a *Authority) GetSSHConfig(ctx context.Context, typ string, data map[string]string) ([]templates.Output, error) { - if a.sshCAUserCertSignKey == nil && a.sshCAHostCertSignKey == nil { - return nil, errs.NotFound("getSSHConfig: ssh is not configured") - } - - if a.templates == nil { - return nil, errs.NotFound("getSSHConfig: ssh templates are not configured") - } - - var ts []templates.Template - switch typ { - case provisioner.SSHUserCert: - if a.templates != nil && a.templates.SSH != nil { - ts = a.templates.SSH.User - } - case provisioner.SSHHostCert: - if a.templates != nil && a.templates.SSH != nil { - ts = a.templates.SSH.Host - } - default: - return nil, errs.BadRequest("getSSHConfig: type %s is not valid", typ) - } - - // Merge user and default data - var mergedData map[string]interface{} - - if len(data) == 0 { - mergedData = a.templates.Data - } else { - mergedData = make(map[string]interface{}, len(a.templates.Data)+1) - mergedData["User"] = data - for k, v := range a.templates.Data { - mergedData[k] = v - } - } - - // Render templates - output := []templates.Output{} - for _, t := range ts { - if err := t.Load(); err != nil { - return nil, err - } - - // Check for required variables. - if err := t.ValidateRequiredData(data); err != nil { - return nil, errs.BadRequestErr(err, errs.WithMessage("%v, please use `--set ` flag", err)) - } - - o, err := t.Output(mergedData) - if err != nil { - return nil, err - } - output = append(output, o) - } - return output, nil -} - -// GetSSHBastion returns the bastion configuration, for the given pair user, -// hostname. -func (a *Authority) GetSSHBastion(ctx context.Context, user string, hostname string) (*config.Bastion, error) { - if a.sshBastionFunc != nil { - bs, err := a.sshBastionFunc(ctx, user, hostname) - return bs, errs.Wrap(http.StatusInternalServerError, err, "authority.GetSSHBastion") - } - if a.config.SSH != nil { - if a.config.SSH.Bastion != nil && a.config.SSH.Bastion.Hostname != "" { - // Do not return a bastion for a bastion host. - // - // This condition might fail if a different name or IP is used. - // Trying to resolve hostnames to IPs and compare them won't be a - // complete solution because it depends on the network - // configuration, of the CA and clients and can also return false - // positives. Although not perfect, this simple solution will work - // in most cases. - if !strings.EqualFold(hostname, a.config.SSH.Bastion.Hostname) { - return a.config.SSH.Bastion, nil - } - } - return nil, nil - } - return nil, errs.NotFound("authority.GetSSHBastion; ssh is not configured") -} - -// SignSSH creates a signed SSH certificate with the given public key and options. -func (a *Authority) SignSSH(ctx context.Context, key ssh.PublicKey, opts provisioner.SignSSHOptions, signOpts ...provisioner.SignOption) (*ssh.Certificate, error) { - var ( - certOptions []sshutil.Option - mods []provisioner.SSHCertModifier - validators []provisioner.SSHCertValidator - ) - - // Validate given options. - if err := opts.Validate(); err != nil { - return nil, errs.Wrap(http.StatusBadRequest, err, "authority.SignSSH") - } - - // Set backdate with the configured value - opts.Backdate = a.config.AuthorityConfig.Backdate.Duration - - for _, op := range signOpts { - switch o := op.(type) { - // add options to NewCertificate - case provisioner.SSHCertificateOptions: - certOptions = append(certOptions, o.Options(opts)...) - - // modify the ssh.Certificate - case provisioner.SSHCertModifier: - mods = append(mods, o) - - // validate the ssh.Certificate - case provisioner.SSHCertValidator: - validators = append(validators, o) - - // validate the given SSHOptions - case provisioner.SSHCertOptionsValidator: - if err := o.Valid(opts); err != nil { - return nil, errs.Wrap(http.StatusForbidden, err, "authority.SignSSH") - } - - default: - return nil, errs.InternalServer("authority.SignSSH: invalid extra option type %T", o) - } - } - - // Simulated certificate request with request options. - cr := sshutil.CertificateRequest{ - Type: opts.CertType, - KeyID: opts.KeyID, - Principals: opts.Principals, - Key: key, - } - - // Create certificate from template. - certificate, err := sshutil.NewCertificate(cr, certOptions...) - if err != nil { - if _, ok := err.(*sshutil.TemplateError); ok { - return nil, errs.NewErr(http.StatusBadRequest, err, - errs.WithMessage(err.Error()), - errs.WithKeyVal("signOptions", signOpts), - ) - } - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.SignSSH") - } - - // Get actual *ssh.Certificate and continue with provisioner modifiers. - certTpl := certificate.GetCertificate() - - // Use SignSSHOptions to modify the certificate validity. It will be later - // checked or set if not defined. - if err := opts.ModifyValidity(certTpl); err != nil { - return nil, errs.Wrap(http.StatusBadRequest, err, "authority.SignSSH") - } - - // Use provisioner modifiers. - for _, m := range mods { - if err := m.Modify(certTpl, opts); err != nil { - return nil, errs.Wrap(http.StatusForbidden, err, "authority.SignSSH") - } - } - - // Get signer from authority keys - var signer ssh.Signer - switch certTpl.CertType { - case ssh.UserCert: - if a.sshCAUserCertSignKey == nil { - return nil, errs.NotImplemented("authority.SignSSH: user certificate signing is not enabled") - } - signer = a.sshCAUserCertSignKey - case ssh.HostCert: - if a.sshCAHostCertSignKey == nil { - return nil, errs.NotImplemented("authority.SignSSH: host certificate signing is not enabled") - } - signer = a.sshCAHostCertSignKey - default: - return nil, errs.InternalServer("authority.SignSSH: unexpected ssh certificate type: %d", certTpl.CertType) - } - - // Sign certificate. - cert, err := sshutil.CreateCertificate(certTpl, signer) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.SignSSH: error signing certificate") - } - - // User provisioners validators. - for _, v := range validators { - if err := v.Valid(cert, opts); err != nil { - return nil, errs.Wrap(http.StatusForbidden, err, "authority.SignSSH") - } - } - - if err = a.db.StoreSSHCertificate(cert); err != nil && err != db.ErrNotImplemented { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.SignSSH: error storing certificate in db") - } - - return cert, nil -} - -// RenewSSH creates a signed SSH certificate using the old SSH certificate as a template. -func (a *Authority) RenewSSH(ctx context.Context, oldCert *ssh.Certificate) (*ssh.Certificate, error) { - if oldCert.ValidAfter == 0 || oldCert.ValidBefore == 0 { - return nil, errs.BadRequest("rewnewSSH: cannot renew certificate without validity period") - } - - backdate := a.config.AuthorityConfig.Backdate.Duration - duration := time.Duration(oldCert.ValidBefore-oldCert.ValidAfter) * time.Second - now := time.Now() - va := now.Add(-1 * backdate) - vb := now.Add(duration - backdate) - - // Build base certificate with the old key. - // Nonce and serial will be automatically generated on signing. - certTpl := &ssh.Certificate{ - Key: oldCert.Key, - CertType: oldCert.CertType, - KeyId: oldCert.KeyId, - ValidPrincipals: oldCert.ValidPrincipals, - Permissions: oldCert.Permissions, - Reserved: oldCert.Reserved, - ValidAfter: uint64(va.Unix()), - ValidBefore: uint64(vb.Unix()), - } - - // Get signer from authority keys - var signer ssh.Signer - switch certTpl.CertType { - case ssh.UserCert: - if a.sshCAUserCertSignKey == nil { - return nil, errs.NotImplemented("renewSSH: user certificate signing is not enabled") - } - signer = a.sshCAUserCertSignKey - case ssh.HostCert: - if a.sshCAHostCertSignKey == nil { - return nil, errs.NotImplemented("renewSSH: host certificate signing is not enabled") - } - signer = a.sshCAHostCertSignKey - default: - return nil, errs.InternalServer("renewSSH: unexpected ssh certificate type: %d", certTpl.CertType) - } - - // Sign certificate. - cert, err := sshutil.CreateCertificate(certTpl, signer) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "signSSH: error signing certificate") - } - - if err = a.db.StoreSSHCertificate(cert); err != nil && err != db.ErrNotImplemented { - return nil, errs.Wrap(http.StatusInternalServerError, err, "renewSSH: error storing certificate in db") - } - - return cert, nil -} - -// RekeySSH creates a signed SSH certificate using the old SSH certificate as a template. -func (a *Authority) RekeySSH(ctx context.Context, oldCert *ssh.Certificate, pub ssh.PublicKey, signOpts ...provisioner.SignOption) (*ssh.Certificate, error) { - var validators []provisioner.SSHCertValidator - - for _, op := range signOpts { - switch o := op.(type) { - // validate the ssh.Certificate - case provisioner.SSHCertValidator: - validators = append(validators, o) - default: - return nil, errs.InternalServer("rekeySSH; invalid extra option type %T", o) - } - } - - if oldCert.ValidAfter == 0 || oldCert.ValidBefore == 0 { - return nil, errs.BadRequest("rekeySSH; cannot rekey certificate without validity period") - } - - backdate := a.config.AuthorityConfig.Backdate.Duration - duration := time.Duration(oldCert.ValidBefore-oldCert.ValidAfter) * time.Second - now := time.Now() - va := now.Add(-1 * backdate) - vb := now.Add(duration - backdate) - - // Build base certificate with the new key. - // Nonce and serial will be automatically generated on signing. - cert := &ssh.Certificate{ - Key: pub, - CertType: oldCert.CertType, - KeyId: oldCert.KeyId, - ValidPrincipals: oldCert.ValidPrincipals, - Permissions: oldCert.Permissions, - Reserved: oldCert.Reserved, - ValidAfter: uint64(va.Unix()), - ValidBefore: uint64(vb.Unix()), - } - - // Get signer from authority keys - var signer ssh.Signer - switch cert.CertType { - case ssh.UserCert: - if a.sshCAUserCertSignKey == nil { - return nil, errs.NotImplemented("rekeySSH; user certificate signing is not enabled") - } - signer = a.sshCAUserCertSignKey - case ssh.HostCert: - if a.sshCAHostCertSignKey == nil { - return nil, errs.NotImplemented("rekeySSH; host certificate signing is not enabled") - } - signer = a.sshCAHostCertSignKey - default: - return nil, errs.BadRequest("rekeySSH; unexpected ssh certificate type: %d", cert.CertType) - } - - var err error - // Sign certificate. - cert, err = sshutil.CreateCertificate(cert, signer) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "signSSH: error signing certificate") - } - - // Apply validators from provisioner. - for _, v := range validators { - if err := v.Valid(cert, provisioner.SignSSHOptions{Backdate: backdate}); err != nil { - return nil, errs.Wrap(http.StatusForbidden, err, "rekeySSH") - } - } - - if err = a.db.StoreSSHCertificate(cert); err != nil && err != db.ErrNotImplemented { - return nil, errs.Wrap(http.StatusInternalServerError, err, "rekeySSH; error storing certificate in db") - } - - return cert, nil -} - -// IsValidForAddUser checks if a user provisioner certificate can be issued to -// the given certificate. -func IsValidForAddUser(cert *ssh.Certificate) error { - if cert.CertType != ssh.UserCert { - return errors.New("certificate is not a user certificate") - } - - switch len(cert.ValidPrincipals) { - case 0: - return errors.New("certificate does not have any principals") - case 1: - return nil - case 2: - // OIDC provisioners adds a second principal with the email address. - // @ cannot be the first character. - if strings.Index(cert.ValidPrincipals[1], "@") > 0 { - return nil - } - return errors.New("certificate does not have only one principal") - default: - return errors.New("certificate does not have only one principal") - } -} - -// SignSSHAddUser signs a certificate that provisions a new user in a server. -func (a *Authority) SignSSHAddUser(ctx context.Context, key ssh.PublicKey, subject *ssh.Certificate) (*ssh.Certificate, error) { - if a.sshCAUserCertSignKey == nil { - return nil, errs.NotImplemented("signSSHAddUser: user certificate signing is not enabled") - } - if err := IsValidForAddUser(subject); err != nil { - return nil, errs.Wrap(http.StatusForbidden, err, "signSSHAddUser") - } - - nonce, err := randutil.ASCII(32) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "signSSHAddUser") - } - - var serial uint64 - if err := binary.Read(rand.Reader, binary.BigEndian, &serial); err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "signSSHAddUser: error reading random number") - } - - signer := a.sshCAUserCertSignKey - principal := subject.ValidPrincipals[0] - addUserPrincipal := a.getAddUserPrincipal() - - cert := &ssh.Certificate{ - Nonce: []byte(nonce), - Key: key, - Serial: serial, - CertType: ssh.UserCert, - KeyId: principal + "-" + addUserPrincipal, - ValidPrincipals: []string{addUserPrincipal}, - ValidAfter: subject.ValidAfter, - ValidBefore: subject.ValidBefore, - Permissions: ssh.Permissions{ - CriticalOptions: map[string]string{ - "force-command": a.getAddUserCommand(principal), - }, - }, - SignatureKey: signer.PublicKey(), - } - - // Get bytes for signing trailing the signature length. - data := cert.Marshal() - data = data[:len(data)-4] - - // Sign the certificate - sig, err := signer.Sign(rand.Reader, data) - if err != nil { - return nil, err - } - cert.Signature = sig - - if err = a.db.StoreSSHCertificate(cert); err != nil && err != db.ErrNotImplemented { - return nil, errs.Wrap(http.StatusInternalServerError, err, "signSSHAddUser: error storing certificate in db") - } - - return cert, nil -} - -// CheckSSHHost checks the given principal has been registered before. -func (a *Authority) CheckSSHHost(ctx context.Context, principal string, token string) (bool, error) { - if a.sshCheckHostFunc != nil { - exists, err := a.sshCheckHostFunc(ctx, principal, token, a.GetRootCertificates()) - if err != nil { - return false, errs.Wrap(http.StatusInternalServerError, err, - "checkSSHHost: error from injected checkSSHHost func") - } - return exists, nil - } - exists, err := a.db.IsSSHHost(principal) - if err != nil { - if err == db.ErrNotImplemented { - return false, errs.Wrap(http.StatusNotImplemented, err, - "checkSSHHost: isSSHHost is not implemented") - } - return false, errs.Wrap(http.StatusInternalServerError, err, - "checkSSHHost: error checking if hosts exists") - } - - return exists, nil -} - -// GetSSHHosts returns a list of valid host principals. -func (a *Authority) GetSSHHosts(ctx context.Context, cert *x509.Certificate) ([]config.Host, error) { - if a.sshGetHostsFunc != nil { - hosts, err := a.sshGetHostsFunc(ctx, cert) - return hosts, errs.Wrap(http.StatusInternalServerError, err, "getSSHHosts") - } - hostnames, err := a.db.GetSSHHostPrincipals() - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "getSSHHosts") - } - - hosts := make([]config.Host, len(hostnames)) - for i, hn := range hostnames { - hosts[i] = config.Host{Hostname: hn} - } - return hosts, nil -} - -func (a *Authority) getAddUserPrincipal() (cmd string) { - if a.config.SSH.AddUserPrincipal == "" { - return SSHAddUserPrincipal - } - return a.config.SSH.AddUserPrincipal -} - -func (a *Authority) getAddUserCommand(principal string) string { - var cmd string - if a.config.SSH.AddUserCommand == "" { - cmd = SSHAddUserCommand - } else { - cmd = a.config.SSH.AddUserCommand - } - return strings.Replace(cmd, "", principal, -1) -} diff --git a/vendor/github.com/smallstep/certificates/authority/tls.go b/vendor/github.com/smallstep/certificates/authority/tls.go deleted file mode 100644 index a3dd95d3..00000000 --- a/vendor/github.com/smallstep/certificates/authority/tls.go +++ /dev/null @@ -1,493 +0,0 @@ -package authority - -import ( - "context" - "crypto" - "crypto/tls" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/pem" - "net/http" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/authority/config" - "github.com/smallstep/certificates/authority/provisioner" - casapi "github.com/smallstep/certificates/cas/apiv1" - "github.com/smallstep/certificates/db" - "github.com/smallstep/certificates/errs" - "go.step.sm/crypto/jose" - "go.step.sm/crypto/keyutil" - "go.step.sm/crypto/pemutil" - "go.step.sm/crypto/x509util" -) - -// GetTLSOptions returns the tls options configured. -func (a *Authority) GetTLSOptions() *config.TLSOptions { - return a.config.TLS -} - -var oidAuthorityKeyIdentifier = asn1.ObjectIdentifier{2, 5, 29, 35} -var oidSubjectKeyIdentifier = asn1.ObjectIdentifier{2, 5, 29, 14} - -func withDefaultASN1DN(def *config.ASN1DN) provisioner.CertificateModifierFunc { - return func(crt *x509.Certificate, opts provisioner.SignOptions) error { - if def == nil { - return errors.New("default ASN1DN template cannot be nil") - } - - if len(crt.Subject.Country) == 0 && def.Country != "" { - crt.Subject.Country = append(crt.Subject.Country, def.Country) - } - if len(crt.Subject.Organization) == 0 && def.Organization != "" { - crt.Subject.Organization = append(crt.Subject.Organization, def.Organization) - } - if len(crt.Subject.OrganizationalUnit) == 0 && def.OrganizationalUnit != "" { - crt.Subject.OrganizationalUnit = append(crt.Subject.OrganizationalUnit, def.OrganizationalUnit) - } - if len(crt.Subject.Locality) == 0 && def.Locality != "" { - crt.Subject.Locality = append(crt.Subject.Locality, def.Locality) - } - if len(crt.Subject.Province) == 0 && def.Province != "" { - crt.Subject.Province = append(crt.Subject.Province, def.Province) - } - if len(crt.Subject.StreetAddress) == 0 && def.StreetAddress != "" { - crt.Subject.StreetAddress = append(crt.Subject.StreetAddress, def.StreetAddress) - } - - return nil - } -} - -// Sign creates a signed certificate from a certificate signing request. -func (a *Authority) Sign(csr *x509.CertificateRequest, signOpts provisioner.SignOptions, extraOpts ...provisioner.SignOption) ([]*x509.Certificate, error) { - var ( - certOptions []x509util.Option - certValidators []provisioner.CertificateValidator - certModifiers []provisioner.CertificateModifier - certEnforcers []provisioner.CertificateEnforcer - ) - - opts := []interface{}{errs.WithKeyVal("csr", csr), errs.WithKeyVal("signOptions", signOpts)} - if err := csr.CheckSignature(); err != nil { - return nil, errs.Wrap(http.StatusBadRequest, err, "authority.Sign; invalid certificate request", opts...) - } - - // Set backdate with the configured value - signOpts.Backdate = a.config.AuthorityConfig.Backdate.Duration - - for _, op := range extraOpts { - switch k := op.(type) { - // Adds new options to NewCertificate - case provisioner.CertificateOptions: - certOptions = append(certOptions, k.Options(signOpts)...) - - // Validate the given certificate request. - case provisioner.CertificateRequestValidator: - if err := k.Valid(csr); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.Sign", opts...) - } - - // Validates the unsigned certificate template. - case provisioner.CertificateValidator: - certValidators = append(certValidators, k) - - // Modifies a certificate before validating it. - case provisioner.CertificateModifier: - certModifiers = append(certModifiers, k) - - // Modifies a certificate after validating it. - case provisioner.CertificateEnforcer: - certEnforcers = append(certEnforcers, k) - - default: - return nil, errs.InternalServer("authority.Sign; invalid extra option type %T", append([]interface{}{k}, opts...)...) - } - } - - cert, err := x509util.NewCertificate(csr, certOptions...) - if err != nil { - if _, ok := err.(*x509util.TemplateError); ok { - return nil, errs.NewErr(http.StatusBadRequest, err, - errs.WithMessage(err.Error()), - errs.WithKeyVal("csr", csr), - errs.WithKeyVal("signOptions", signOpts), - ) - } - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.Sign", opts...) - } - - // Certificate modifiers before validation - leaf := cert.GetCertificate() - - // Set default subject - if err := withDefaultASN1DN(a.config.AuthorityConfig.Template).Modify(leaf, signOpts); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.Sign", opts...) - } - - for _, m := range certModifiers { - if err := m.Modify(leaf, signOpts); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.Sign", opts...) - } - } - - // Certificate validation. - for _, v := range certValidators { - if err := v.Valid(leaf, signOpts); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.Sign", opts...) - } - } - - // Certificate modifiers after validation - for _, m := range certEnforcers { - if err := m.Enforce(leaf); err != nil { - return nil, errs.Wrap(http.StatusUnauthorized, err, "authority.Sign", opts...) - } - } - - lifetime := leaf.NotAfter.Sub(leaf.NotBefore.Add(signOpts.Backdate)) - resp, err := a.x509CAService.CreateCertificate(&casapi.CreateCertificateRequest{ - Template: leaf, - CSR: csr, - Lifetime: lifetime, - Backdate: signOpts.Backdate, - }) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.Sign; error creating certificate", opts...) - } - - fullchain := append([]*x509.Certificate{resp.Certificate}, resp.CertificateChain...) - if err = a.storeCertificate(fullchain); err != nil { - if err != db.ErrNotImplemented { - return nil, errs.Wrap(http.StatusInternalServerError, err, - "authority.Sign; error storing certificate in db", opts...) - } - } - - return fullchain, nil -} - -// Renew creates a new Certificate identical to the old certificate, except -// with a validity window that begins 'now'. -func (a *Authority) Renew(oldCert *x509.Certificate) ([]*x509.Certificate, error) { - return a.Rekey(oldCert, nil) -} - -// Rekey is used for rekeying and renewing based on the public key. -// If the public key is 'nil' then it's assumed that the cert should be renewed -// using the existing public key. If the public key is not 'nil' then it's -// assumed that the cert should be rekeyed. -// For both Rekey and Renew all other attributes of the new certificate should -// match the old certificate. The exceptions are 'AuthorityKeyId' (which may -// have changed), 'SubjectKeyId' (different in case of rekey), and -// 'NotBefore/NotAfter' (the validity duration of the new certificate should be -// equal to the old one, but starting 'now'). -func (a *Authority) Rekey(oldCert *x509.Certificate, pk crypto.PublicKey) ([]*x509.Certificate, error) { - isRekey := (pk != nil) - opts := []interface{}{errs.WithKeyVal("serialNumber", oldCert.SerialNumber.String())} - - // Check step provisioner extensions - if err := a.authorizeRenew(oldCert); err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.Rekey", opts...) - } - - // Durations - backdate := a.config.AuthorityConfig.Backdate.Duration - duration := oldCert.NotAfter.Sub(oldCert.NotBefore) - lifetime := duration - backdate - - // Create new certificate from previous values. - // Issuer, NotBefore, NotAfter and SubjectKeyId will be set by the CAS. - newCert := &x509.Certificate{ - Subject: oldCert.Subject, - KeyUsage: oldCert.KeyUsage, - UnhandledCriticalExtensions: oldCert.UnhandledCriticalExtensions, - ExtKeyUsage: oldCert.ExtKeyUsage, - UnknownExtKeyUsage: oldCert.UnknownExtKeyUsage, - BasicConstraintsValid: oldCert.BasicConstraintsValid, - IsCA: oldCert.IsCA, - MaxPathLen: oldCert.MaxPathLen, - MaxPathLenZero: oldCert.MaxPathLenZero, - OCSPServer: oldCert.OCSPServer, - IssuingCertificateURL: oldCert.IssuingCertificateURL, - PermittedDNSDomainsCritical: oldCert.PermittedDNSDomainsCritical, - PermittedEmailAddresses: oldCert.PermittedEmailAddresses, - DNSNames: oldCert.DNSNames, - EmailAddresses: oldCert.EmailAddresses, - IPAddresses: oldCert.IPAddresses, - URIs: oldCert.URIs, - PermittedDNSDomains: oldCert.PermittedDNSDomains, - ExcludedDNSDomains: oldCert.ExcludedDNSDomains, - PermittedIPRanges: oldCert.PermittedIPRanges, - ExcludedIPRanges: oldCert.ExcludedIPRanges, - ExcludedEmailAddresses: oldCert.ExcludedEmailAddresses, - PermittedURIDomains: oldCert.PermittedURIDomains, - ExcludedURIDomains: oldCert.ExcludedURIDomains, - CRLDistributionPoints: oldCert.CRLDistributionPoints, - PolicyIdentifiers: oldCert.PolicyIdentifiers, - } - - if isRekey { - newCert.PublicKey = pk - } else { - newCert.PublicKey = oldCert.PublicKey - } - - // Copy all extensions except: - // - // 1. Authority Key Identifier - This one might be different if we rotate - // the intermediate certificate and it will cause a TLS bad certificate - // error. - // - // 2. Subject Key Identifier, if rekey - For rekey, SubjectKeyIdentifier - // extension will be calculated for the new public key by - // x509util.CreateCertificate() - for _, ext := range oldCert.Extensions { - if ext.Id.Equal(oidAuthorityKeyIdentifier) { - continue - } - if ext.Id.Equal(oidSubjectKeyIdentifier) && isRekey { - newCert.SubjectKeyId = nil - continue - } - newCert.ExtraExtensions = append(newCert.ExtraExtensions, ext) - } - - resp, err := a.x509CAService.RenewCertificate(&casapi.RenewCertificateRequest{ - Template: newCert, - Lifetime: lifetime, - Backdate: backdate, - }) - if err != nil { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.Rekey", opts...) - } - - fullchain := append([]*x509.Certificate{resp.Certificate}, resp.CertificateChain...) - if err = a.storeRenewedCertificate(oldCert, fullchain); err != nil { - if err != db.ErrNotImplemented { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.Rekey; error storing certificate in db", opts...) - } - } - - return fullchain, nil -} - -// storeCertificate allows to use an extension of the db.AuthDB interface that -// can log the full chain of certificates. -// -// TODO: at some point we should replace the db.AuthDB interface to implement -// `StoreCertificate(...*x509.Certificate) error` instead of just -// `StoreCertificate(*x509.Certificate) error`. -func (a *Authority) storeCertificate(fullchain []*x509.Certificate) error { - if s, ok := a.db.(interface { - StoreCertificateChain(...*x509.Certificate) error - }); ok { - return s.StoreCertificateChain(fullchain...) - } - return a.db.StoreCertificate(fullchain[0]) -} - -// storeRenewedCertificate allows to use an extension of the db.AuthDB interface -// that can log if a certificate has been renewed or rekeyed. -// -// TODO: at some point we should implement this in the standard implementation. -func (a *Authority) storeRenewedCertificate(oldCert *x509.Certificate, fullchain []*x509.Certificate) error { - if s, ok := a.db.(interface { - StoreRenewedCertificate(*x509.Certificate, ...*x509.Certificate) error - }); ok { - return s.StoreRenewedCertificate(oldCert, fullchain...) - } - return a.db.StoreCertificate(fullchain[0]) -} - -// RevokeOptions are the options for the Revoke API. -type RevokeOptions struct { - Serial string - Reason string - ReasonCode int - PassiveOnly bool - MTLS bool - Crt *x509.Certificate - OTT string -} - -// Revoke revokes a certificate. -// -// NOTE: Only supports passive revocation - prevent existing certificates from -// being renewed. -// -// TODO: Add OCSP and CRL support. -func (a *Authority) Revoke(ctx context.Context, revokeOpts *RevokeOptions) error { - opts := []interface{}{ - errs.WithKeyVal("serialNumber", revokeOpts.Serial), - errs.WithKeyVal("reasonCode", revokeOpts.ReasonCode), - errs.WithKeyVal("reason", revokeOpts.Reason), - errs.WithKeyVal("passiveOnly", revokeOpts.PassiveOnly), - errs.WithKeyVal("MTLS", revokeOpts.MTLS), - errs.WithKeyVal("context", provisioner.MethodFromContext(ctx).String()), - } - if revokeOpts.MTLS { - opts = append(opts, errs.WithKeyVal("certificate", base64.StdEncoding.EncodeToString(revokeOpts.Crt.Raw))) - } else { - opts = append(opts, errs.WithKeyVal("token", revokeOpts.OTT)) - } - - rci := &db.RevokedCertificateInfo{ - Serial: revokeOpts.Serial, - ReasonCode: revokeOpts.ReasonCode, - Reason: revokeOpts.Reason, - MTLS: revokeOpts.MTLS, - RevokedAt: time.Now().UTC(), - } - - var ( - p provisioner.Interface - err error - ) - // If not mTLS then get the TokenID of the token. - if !revokeOpts.MTLS { - token, err := jose.ParseSigned(revokeOpts.OTT) - if err != nil { - return errs.Wrap(http.StatusUnauthorized, err, - "authority.Revoke; error parsing token", opts...) - } - - // Get claims w/out verification. - var claims Claims - if err = token.UnsafeClaimsWithoutVerification(&claims); err != nil { - return errs.Wrap(http.StatusUnauthorized, err, "authority.Revoke", opts...) - } - - // This method will also validate the audiences for JWK provisioners. - p, err = a.LoadProvisionerByToken(token, &claims.Claims) - if err != nil { - return err - } - rci.ProvisionerID = p.GetID() - rci.TokenID, err = p.GetTokenID(revokeOpts.OTT) - if err != nil && !errors.Is(err, provisioner.ErrAllowTokenReuse) { - return errs.Wrap(http.StatusInternalServerError, err, - "authority.Revoke; could not get ID for token") - } - opts = append(opts, errs.WithKeyVal("provisionerID", rci.ProvisionerID)) - opts = append(opts, errs.WithKeyVal("tokenID", rci.TokenID)) - } else { - // Load the Certificate provisioner if one exists. - if p, err = a.LoadProvisionerByCertificate(revokeOpts.Crt); err == nil { - rci.ProvisionerID = p.GetID() - opts = append(opts, errs.WithKeyVal("provisionerID", rci.ProvisionerID)) - } - } - - if provisioner.MethodFromContext(ctx) == provisioner.SSHRevokeMethod { - err = a.db.RevokeSSH(rci) - } else { - // Revoke an X.509 certificate using CAS. If the certificate is not - // provided we will try to read it from the db. If the read fails we - // won't throw an error as it will be responsibility of the CAS - // implementation to require a certificate. - var revokedCert *x509.Certificate - if revokeOpts.Crt != nil { - revokedCert = revokeOpts.Crt - } else if rci.Serial != "" { - revokedCert, _ = a.db.GetCertificate(rci.Serial) - } - - // CAS operation, note that SoftCAS (default) is a noop. - // The revoke happens when this is stored in the db. - _, err = a.x509CAService.RevokeCertificate(&casapi.RevokeCertificateRequest{ - Certificate: revokedCert, - SerialNumber: rci.Serial, - Reason: rci.Reason, - ReasonCode: rci.ReasonCode, - PassiveOnly: revokeOpts.PassiveOnly, - }) - if err != nil { - return errs.Wrap(http.StatusInternalServerError, err, "authority.Revoke", opts...) - } - - // Save as revoked in the Db. - err = a.db.Revoke(rci) - } - switch err { - case nil: - return nil - case db.ErrNotImplemented: - return errs.NotImplemented("authority.Revoke; no persistence layer configured", opts...) - case db.ErrAlreadyExists: - return errs.BadRequest("authority.Revoke; certificate with serial "+ - "number %s has already been revoked", append([]interface{}{rci.Serial}, opts...)...) - default: - return errs.Wrap(http.StatusInternalServerError, err, "authority.Revoke", opts...) - } -} - -// GetTLSCertificate creates a new leaf certificate to be used by the CA HTTPS server. -func (a *Authority) GetTLSCertificate() (*tls.Certificate, error) { - fatal := func(err error) (*tls.Certificate, error) { - return nil, errs.Wrap(http.StatusInternalServerError, err, "authority.GetTLSCertificate") - } - - // Generate default key. - priv, err := keyutil.GenerateDefaultKey() - if err != nil { - return fatal(err) - } - signer, ok := priv.(crypto.Signer) - if !ok { - return fatal(errors.New("private key is not a crypto.Signer")) - } - - // Create initial certificate request. - cr, err := x509util.CreateCertificateRequest("Step Online CA", a.config.DNSNames, signer) - if err != nil { - return fatal(err) - } - - // Generate certificate template directly from the certificate request. - template, err := x509util.NewCertificate(cr) - if err != nil { - return fatal(err) - } - - // Get x509 certificate template, set validity and sign it. - now := time.Now() - certTpl := template.GetCertificate() - certTpl.NotBefore = now.Add(-1 * time.Minute) - certTpl.NotAfter = now.Add(24 * time.Hour) - - resp, err := a.x509CAService.CreateCertificate(&casapi.CreateCertificateRequest{ - Template: certTpl, - CSR: cr, - Lifetime: 24 * time.Hour, - Backdate: 1 * time.Minute, - }) - if err != nil { - return fatal(err) - } - - // Generate PEM blocks to create tls.Certificate - pemBlocks := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: resp.Certificate.Raw, - }) - for _, crt := range resp.CertificateChain { - pemBlocks = append(pemBlocks, pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: crt.Raw, - })...) - } - keyPEM, err := pemutil.Serialize(priv) - if err != nil { - return fatal(err) - } - - tlsCrt, err := tls.X509KeyPair(pemBlocks, pem.EncodeToMemory(keyPEM)) - if err != nil { - return fatal(err) - } - // Set leaf certificate - tlsCrt.Leaf = resp.Certificate - return &tlsCrt, nil -} diff --git a/vendor/github.com/smallstep/certificates/authority/version.go b/vendor/github.com/smallstep/certificates/authority/version.go deleted file mode 100644 index 41e1be80..00000000 --- a/vendor/github.com/smallstep/certificates/authority/version.go +++ /dev/null @@ -1,17 +0,0 @@ -package authority - -// GlobalVersion stores the version information of the server. -var GlobalVersion = Version{ - Version: "0.0.0", -} - -// Version defines the -type Version struct { - Version string - RequireClientAuthentication bool -} - -// Version returns the version information of the server. -func (a *Authority) Version() Version { - return GlobalVersion -} diff --git a/vendor/github.com/smallstep/certificates/cas/apiv1/extension.go b/vendor/github.com/smallstep/certificates/cas/apiv1/extension.go deleted file mode 100644 index bbe2525a..00000000 --- a/vendor/github.com/smallstep/certificates/cas/apiv1/extension.go +++ /dev/null @@ -1,62 +0,0 @@ -package apiv1 - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - - "github.com/pkg/errors" -) - -var ( - oidStepRoot = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 37476, 9000, 64} - oidStepCertificateAuthority = append(asn1.ObjectIdentifier(nil), append(oidStepRoot, 2)...) -) - -// CertificateAuthorityExtension type is used to encode the certificate -// authority extension. -type CertificateAuthorityExtension struct { - Type string - CertificateID string `asn1:"optional,omitempty"` - KeyValuePairs []string `asn1:"optional,omitempty"` -} - -// CreateCertificateAuthorityExtension returns a X.509 extension that shows the -// CAS type, id and a list of optional key value pairs. -func CreateCertificateAuthorityExtension(typ Type, certificateID string, keyValuePairs ...string) (pkix.Extension, error) { - b, err := asn1.Marshal(CertificateAuthorityExtension{ - Type: typ.String(), - CertificateID: certificateID, - KeyValuePairs: keyValuePairs, - }) - if err != nil { - return pkix.Extension{}, errors.Wrapf(err, "error marshaling certificate id extension") - } - return pkix.Extension{ - Id: oidStepCertificateAuthority, - Critical: false, - Value: b, - }, nil -} - -// FindCertificateAuthorityExtension returns the certificate authority extension -// from a signed certificate. -func FindCertificateAuthorityExtension(cert *x509.Certificate) (pkix.Extension, bool) { - for _, ext := range cert.Extensions { - if ext.Id.Equal(oidStepCertificateAuthority) { - return ext, true - } - } - return pkix.Extension{}, false -} - -// RemoveCertificateAuthorityExtension removes the certificate authority -// extension from a certificate template. -func RemoveCertificateAuthorityExtension(cert *x509.Certificate) { - for i, ext := range cert.ExtraExtensions { - if ext.Id.Equal(oidStepCertificateAuthority) { - cert.ExtraExtensions = append(cert.ExtraExtensions[:i], cert.ExtraExtensions[i+1:]...) - return - } - } -} diff --git a/vendor/github.com/smallstep/certificates/cas/apiv1/options.go b/vendor/github.com/smallstep/certificates/cas/apiv1/options.go deleted file mode 100644 index 61cac9a2..00000000 --- a/vendor/github.com/smallstep/certificates/cas/apiv1/options.go +++ /dev/null @@ -1,90 +0,0 @@ -package apiv1 - -import ( - "crypto" - "crypto/x509" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/kms" -) - -// Options represents the configuration options used to select and configure the -// CertificateAuthorityService (CAS) to use. -type Options struct { - // The type of the CAS to use. - Type string `json:"type"` - - // CertificateAuthority reference: - // In StepCAS the value is the CA url, e.g. "https://ca.smallstep.com:9000". - // In CloudCAS the format is "projects/*/locations/*/certificateAuthorities/*". - CertificateAuthority string `json:"certificateAuthority,omitempty"` - - // CertificateAuthorityFingerprint is the root fingerprint used to - // authenticate the connection to the CA when using StepCAS. - CertificateAuthorityFingerprint string `json:"certificateAuthorityFingerprint,omitempty"` - - // CertificateIssuer contains the configuration used in StepCAS. - CertificateIssuer *CertificateIssuer `json:"certificateIssuer,omitempty"` - - // Path to the credentials file used in CloudCAS. If not defined the default - // authentication mechanism provided by Google SDK will be used. See - // https://cloud.google.com/docs/authentication. - CredentialsFile string `json:"credentialsFile,omitempty"` - - // Certificate and signer are the issuer certificate, along with any other - // bundled certificates to be returned in the chain for consumers, and - // signer used in SoftCAS. They are configured in ca.json crt and key - // properties. - CertificateChain []*x509.Certificate `json:"-"` - Signer crypto.Signer `json:"-"` - - // IsCreator is set to true when we're creating a certificate authority. Is - // used to skip some validations when initializing a CertificateAuthority. - IsCreator bool `json:"-"` - - // KeyManager is the KMS used to generate keys in SoftCAS. - KeyManager kms.KeyManager `json:"-"` - - // Project, Location, CaPool and GCSBucket are parameters used in CloudCAS - // to create a new certificate authority. If a CaPool does not exist it will - // be created. GCSBucket is optional, if not provided GCloud will create a - // managed bucket. - Project string `json:"-"` - Location string `json:"-"` - CaPool string `json:"-"` - CaPoolTier string `json:"-"` - GCSBucket string `json:"-"` -} - -// CertificateIssuer contains the properties used to use the StepCAS certificate -// authority service. -type CertificateIssuer struct { - Type string `json:"type"` - Provisioner string `json:"provisioner,omitempty"` - Certificate string `json:"crt,omitempty"` - Key string `json:"key,omitempty"` - Password string `json:"password,omitempty"` -} - -// Validate checks the fields in Options. -func (o *Options) Validate() error { - var typ Type - if o == nil { - typ = Type(SoftCAS) - } else { - typ = Type(o.Type) - } - // Check that the type can be loaded. - if _, ok := LoadCertificateAuthorityServiceNewFunc(typ); !ok { - return errors.Errorf("unsupported cas type %s", typ) - } - return nil -} - -// Is returns if the options have the given type. -func (o *Options) Is(t Type) bool { - if o == nil { - return t.String() == SoftCAS - } - return Type(o.Type).String() == t.String() -} diff --git a/vendor/github.com/smallstep/certificates/cas/apiv1/registry.go b/vendor/github.com/smallstep/certificates/cas/apiv1/registry.go deleted file mode 100644 index 5876e9d7..00000000 --- a/vendor/github.com/smallstep/certificates/cas/apiv1/registry.go +++ /dev/null @@ -1,29 +0,0 @@ -package apiv1 - -import ( - "context" - "sync" -) - -var ( - registry = new(sync.Map) -) - -// CertificateAuthorityServiceNewFunc is the type that represents the method to initialize a new -// CertificateAuthorityService. -type CertificateAuthorityServiceNewFunc func(ctx context.Context, opts Options) (CertificateAuthorityService, error) - -// Register adds to the registry a method to create a KeyManager of type t. -func Register(t Type, fn CertificateAuthorityServiceNewFunc) { - registry.Store(t.String(), fn) -} - -// LoadCertificateAuthorityServiceNewFunc returns the function to initialize a KeyManager. -func LoadCertificateAuthorityServiceNewFunc(t Type) (CertificateAuthorityServiceNewFunc, bool) { - v, ok := registry.Load(t.String()) - if !ok { - return nil, false - } - fn, ok := v.(CertificateAuthorityServiceNewFunc) - return fn, ok -} diff --git a/vendor/github.com/smallstep/certificates/cas/apiv1/requests.go b/vendor/github.com/smallstep/certificates/cas/apiv1/requests.go deleted file mode 100644 index b47a9c13..00000000 --- a/vendor/github.com/smallstep/certificates/cas/apiv1/requests.go +++ /dev/null @@ -1,142 +0,0 @@ -package apiv1 - -import ( - "crypto" - "crypto/x509" - "time" - - "github.com/smallstep/certificates/kms/apiv1" -) - -// CertificateAuthorityType indicates the type of Certificate Authority to -// create. -type CertificateAuthorityType int - -const ( - // RootCA is the type used to create a self-signed certificate suitable for - // use as a root CA. - RootCA CertificateAuthorityType = iota + 1 - - // IntermediateCA is the type used to create a subordinated certificate that - // can be used to sign additional leaf certificates. - IntermediateCA -) - -// SignatureAlgorithm used for cryptographic signing. -type SignatureAlgorithm int - -const ( - // Not specified. - UnspecifiedSignAlgorithm SignatureAlgorithm = iota - // RSASSA-PKCS1-v1_5 key and a SHA256 digest. - SHA256WithRSA - // RSASSA-PKCS1-v1_5 key and a SHA384 digest. - SHA384WithRSA - // RSASSA-PKCS1-v1_5 key and a SHA512 digest. - SHA512WithRSA - // RSASSA-PSS key with a SHA256 digest. - SHA256WithRSAPSS - // RSASSA-PSS key with a SHA384 digest. - SHA384WithRSAPSS - // RSASSA-PSS key with a SHA512 digest. - SHA512WithRSAPSS - // ECDSA on the NIST P-256 curve with a SHA256 digest. - ECDSAWithSHA256 - // ECDSA on the NIST P-384 curve with a SHA384 digest. - ECDSAWithSHA384 - // ECDSA on the NIST P-521 curve with a SHA512 digest. - ECDSAWithSHA512 - // EdDSA on Curve25519 with a SHA512 digest. - PureEd25519 -) - -// CreateCertificateRequest is the request used to sign a new certificate. -type CreateCertificateRequest struct { - Template *x509.Certificate - CSR *x509.CertificateRequest - Lifetime time.Duration - Backdate time.Duration - RequestID string -} - -// CreateCertificateResponse is the response to a create certificate request. -type CreateCertificateResponse struct { - Certificate *x509.Certificate - CertificateChain []*x509.Certificate -} - -// RenewCertificateRequest is the request used to re-sign a certificate. -type RenewCertificateRequest struct { - Template *x509.Certificate - CSR *x509.CertificateRequest - Lifetime time.Duration - Backdate time.Duration - RequestID string -} - -// RenewCertificateResponse is the response to a renew certificate request. -type RenewCertificateResponse struct { - Certificate *x509.Certificate - CertificateChain []*x509.Certificate -} - -// RevokeCertificateRequest is the request used to revoke a certificate. -type RevokeCertificateRequest struct { - Certificate *x509.Certificate - SerialNumber string - Reason string - ReasonCode int - PassiveOnly bool - RequestID string -} - -// RevokeCertificateResponse is the response to a revoke certificate request. -type RevokeCertificateResponse struct { - Certificate *x509.Certificate - CertificateChain []*x509.Certificate -} - -// GetCertificateAuthorityRequest is the request used to get the root -// certificate from a CAS. -type GetCertificateAuthorityRequest struct { - Name string -} - -// GetCertificateAuthorityResponse is the response that contains -// the root certificate. -type GetCertificateAuthorityResponse struct { - RootCertificate *x509.Certificate -} - -// CreateCertificateAuthorityRequest is the request used to generate a root or -// intermediate certificate. -type CreateCertificateAuthorityRequest struct { - Name string - Type CertificateAuthorityType - Template *x509.Certificate - Lifetime time.Duration - Backdate time.Duration - RequestID string - Project string - Location string - - // Parent is the signer of the new CertificateAuthority. - Parent *CreateCertificateAuthorityResponse - - // CreateKey defines the KMS CreateKeyRequest to use when creating a new - // CertificateAuthority. If CreateKey is nil, a default algorithm will be - // used. - CreateKey *apiv1.CreateKeyRequest -} - -// CreateCertificateAuthorityResponse is the response for -// CreateCertificateAuthority method and contains the root or intermediate -// certificate generated as well as the CA chain. -type CreateCertificateAuthorityResponse struct { - Name string - Certificate *x509.Certificate - CertificateChain []*x509.Certificate - PublicKey crypto.PublicKey - PrivateKey crypto.PrivateKey - Signer crypto.Signer -} diff --git a/vendor/github.com/smallstep/certificates/cas/apiv1/services.go b/vendor/github.com/smallstep/certificates/cas/apiv1/services.go deleted file mode 100644 index d4dd3c8c..00000000 --- a/vendor/github.com/smallstep/certificates/cas/apiv1/services.go +++ /dev/null @@ -1,71 +0,0 @@ -package apiv1 - -import ( - "net/http" - "strings" -) - -// CertificateAuthorityService is the interface implemented to support external -// certificate authorities. -type CertificateAuthorityService interface { - CreateCertificate(req *CreateCertificateRequest) (*CreateCertificateResponse, error) - RenewCertificate(req *RenewCertificateRequest) (*RenewCertificateResponse, error) - RevokeCertificate(req *RevokeCertificateRequest) (*RevokeCertificateResponse, error) -} - -// CertificateAuthorityGetter is an interface implemented by a -// CertificateAuthorityService that has a method to get the root certificate. -type CertificateAuthorityGetter interface { - GetCertificateAuthority(req *GetCertificateAuthorityRequest) (*GetCertificateAuthorityResponse, error) -} - -// CertificateAuthorityCreator is an interface implamented by a -// CertificateAuthorityService that has a method to create a new certificate -// authority. -type CertificateAuthorityCreator interface { - CreateCertificateAuthority(req *CreateCertificateAuthorityRequest) (*CreateCertificateAuthorityResponse, error) -} - -// Type represents the CAS type used. -type Type string - -const ( - // DefaultCAS is a CertificateAuthorityService using software. - DefaultCAS = "" - // SoftCAS is a CertificateAuthorityService using software. - SoftCAS = "softcas" - // CloudCAS is a CertificateAuthorityService using Google Cloud CAS. - CloudCAS = "cloudcas" - // StepCAS is a CertificateAuthorityService using another step-ca instance. - StepCAS = "stepcas" -) - -// String returns a string from the type. It will always return the lower case -// version of the Type, as we need a standard type to compare and use as the -// registry key. -func (t Type) String() string { - if t == "" { - return SoftCAS - } - return strings.ToLower(string(t)) -} - -// ErrNotImplemented is the type of error returned if an operation is not -// implemented. -type ErrNotImplemented struct { - Message string -} - -// ErrNotImplemented implements the error interface. -func (e ErrNotImplemented) Error() string { - if e.Message != "" { - return e.Message - } - return "not implemented" -} - -// StatusCode implements the StatusCoder interface and returns the HTTP 501 -// error. -func (e ErrNotImplemented) StatusCode() int { - return http.StatusNotImplemented -} diff --git a/vendor/github.com/smallstep/certificates/cas/cas.go b/vendor/github.com/smallstep/certificates/cas/cas.go deleted file mode 100644 index b564c8d2..00000000 --- a/vendor/github.com/smallstep/certificates/cas/cas.go +++ /dev/null @@ -1,58 +0,0 @@ -package cas - -import ( - "context" - "strings" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/cas/apiv1" - - // Enable default implementation - _ "github.com/smallstep/certificates/cas/softcas" -) - -// CertificateAuthorityService is the interface implemented by all the CAS. -type CertificateAuthorityService = apiv1.CertificateAuthorityService - -// CertificateAuthorityCreator is the interface implemented by all CAS that can create a new authority. -type CertificateAuthorityCreator = apiv1.CertificateAuthorityCreator - -// New creates a new CertificateAuthorityService using the given options. -func New(ctx context.Context, opts apiv1.Options) (CertificateAuthorityService, error) { - if err := opts.Validate(); err != nil { - return nil, err - } - - t := apiv1.Type(strings.ToLower(opts.Type)) - if t == apiv1.DefaultCAS { - t = apiv1.SoftCAS - } - - fn, ok := apiv1.LoadCertificateAuthorityServiceNewFunc(t) - if !ok { - return nil, errors.Errorf("unsupported cas type '%s'", t) - } - return fn(ctx, opts) -} - -// NewCreator creates a new CertificateAuthorityCreator using the given options. -func NewCreator(ctx context.Context, opts apiv1.Options) (CertificateAuthorityCreator, error) { - opts.IsCreator = true - - t := apiv1.Type(strings.ToLower(opts.Type)) - if t == apiv1.DefaultCAS { - t = apiv1.SoftCAS - } - - svc, err := New(ctx, opts) - if err != nil { - return nil, err - } - - creator, ok := svc.(CertificateAuthorityCreator) - if !ok { - return nil, errors.Errorf("cas type '%s' does not implements CertificateAuthorityCreator", t) - } - - return creator, nil -} diff --git a/vendor/github.com/smallstep/certificates/cas/softcas/softcas.go b/vendor/github.com/smallstep/certificates/cas/softcas/softcas.go deleted file mode 100644 index 21760490..00000000 --- a/vendor/github.com/smallstep/certificates/cas/softcas/softcas.go +++ /dev/null @@ -1,212 +0,0 @@ -package softcas - -import ( - "context" - "crypto" - "crypto/x509" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/cas/apiv1" - "github.com/smallstep/certificates/kms" - kmsapi "github.com/smallstep/certificates/kms/apiv1" - "go.step.sm/crypto/x509util" -) - -func init() { - apiv1.Register(apiv1.SoftCAS, func(ctx context.Context, opts apiv1.Options) (apiv1.CertificateAuthorityService, error) { - return New(ctx, opts) - }) -} - -var now = func() time.Time { - return time.Now() -} - -// SoftCAS implements a Certificate Authority Service using Golang or KMS -// crypto. This is the default CAS used in step-ca. -type SoftCAS struct { - CertificateChain []*x509.Certificate - Signer crypto.Signer - KeyManager kms.KeyManager -} - -// New creates a new CertificateAuthorityService implementation using Golang or KMS -// crypto. -func New(ctx context.Context, opts apiv1.Options) (*SoftCAS, error) { - if !opts.IsCreator { - switch { - case len(opts.CertificateChain) == 0: - return nil, errors.New("softCAS 'CertificateChain' cannot be nil") - case opts.Signer == nil: - return nil, errors.New("softCAS 'signer' cannot be nil") - } - } - return &SoftCAS{ - CertificateChain: opts.CertificateChain, - Signer: opts.Signer, - KeyManager: opts.KeyManager, - }, nil -} - -// CreateCertificate signs a new certificate using Golang or KMS crypto. -func (c *SoftCAS) CreateCertificate(req *apiv1.CreateCertificateRequest) (*apiv1.CreateCertificateResponse, error) { - switch { - case req.Template == nil: - return nil, errors.New("createCertificateRequest `template` cannot be nil") - case req.Lifetime == 0: - return nil, errors.New("createCertificateRequest `lifetime` cannot be 0") - } - - t := now() - // Provisioners can also set specific values. - if req.Template.NotBefore.IsZero() { - req.Template.NotBefore = t.Add(-1 * req.Backdate) - } - if req.Template.NotAfter.IsZero() { - req.Template.NotAfter = t.Add(req.Lifetime) - } - req.Template.Issuer = c.CertificateChain[0].Subject - - cert, err := x509util.CreateCertificate(req.Template, c.CertificateChain[0], req.Template.PublicKey, c.Signer) - if err != nil { - return nil, err - } - - return &apiv1.CreateCertificateResponse{ - Certificate: cert, - CertificateChain: c.CertificateChain, - }, nil -} - -// RenewCertificate signs the given certificate template using Golang or KMS crypto. -func (c *SoftCAS) RenewCertificate(req *apiv1.RenewCertificateRequest) (*apiv1.RenewCertificateResponse, error) { - switch { - case req.Template == nil: - return nil, errors.New("createCertificateRequest `template` cannot be nil") - case req.Lifetime == 0: - return nil, errors.New("createCertificateRequest `lifetime` cannot be 0") - } - - t := now() - req.Template.NotBefore = t.Add(-1 * req.Backdate) - req.Template.NotAfter = t.Add(req.Lifetime) - req.Template.Issuer = c.CertificateChain[0].Subject - - cert, err := x509util.CreateCertificate(req.Template, c.CertificateChain[0], req.Template.PublicKey, c.Signer) - if err != nil { - return nil, err - } - - return &apiv1.RenewCertificateResponse{ - Certificate: cert, - CertificateChain: c.CertificateChain, - }, nil -} - -// RevokeCertificate revokes the given certificate in step-ca. In SoftCAS this -// operation is a no-op as the actual revoke will happen when we store the entry -// in the db. -func (c *SoftCAS) RevokeCertificate(req *apiv1.RevokeCertificateRequest) (*apiv1.RevokeCertificateResponse, error) { - return &apiv1.RevokeCertificateResponse{ - Certificate: req.Certificate, - CertificateChain: c.CertificateChain, - }, nil -} - -// CreateCertificateAuthority creates a root or an intermediate certificate. -func (c *SoftCAS) CreateCertificateAuthority(req *apiv1.CreateCertificateAuthorityRequest) (*apiv1.CreateCertificateAuthorityResponse, error) { - switch { - case req.Template == nil: - return nil, errors.New("createCertificateAuthorityRequest `template` cannot be nil") - case req.Lifetime == 0: - return nil, errors.New("createCertificateAuthorityRequest `lifetime` cannot be 0") - case req.Type == apiv1.IntermediateCA && req.Parent == nil: - return nil, errors.New("createCertificateAuthorityRequest `parent` cannot be nil") - case req.Type == apiv1.IntermediateCA && req.Parent.Certificate == nil: - return nil, errors.New("createCertificateAuthorityRequest `parent.template` cannot be nil") - case req.Type == apiv1.IntermediateCA && req.Parent.Signer == nil: - return nil, errors.New("createCertificateAuthorityRequest `parent.signer` cannot be nil") - } - - key, err := c.createKey(req.CreateKey) - if err != nil { - return nil, err - } - - signer, err := c.createSigner(&key.CreateSignerRequest) - if err != nil { - return nil, err - } - - t := now() - if req.Template.NotBefore.IsZero() { - req.Template.NotBefore = t.Add(-1 * req.Backdate) - } - if req.Template.NotAfter.IsZero() { - req.Template.NotAfter = t.Add(req.Lifetime) - } - - var cert *x509.Certificate - switch req.Type { - case apiv1.RootCA: - cert, err = x509util.CreateCertificate(req.Template, req.Template, signer.Public(), signer) - if err != nil { - return nil, err - } - case apiv1.IntermediateCA: - cert, err = x509util.CreateCertificate(req.Template, req.Parent.Certificate, signer.Public(), req.Parent.Signer) - if err != nil { - return nil, err - } - default: - return nil, errors.Errorf("createCertificateAuthorityRequest `type=%d' is invalid or not supported", req.Type) - } - - // Add the parent - var chain []*x509.Certificate - if req.Parent != nil { - chain = append(chain, req.Parent.Certificate) - chain = append(chain, req.Parent.CertificateChain...) - } - - return &apiv1.CreateCertificateAuthorityResponse{ - Name: cert.Subject.CommonName, - Certificate: cert, - CertificateChain: chain, - PublicKey: key.PublicKey, - PrivateKey: key.PrivateKey, - Signer: signer, - }, nil -} - -// initializeKeyManager initiazes the default key manager if was not given. -func (c *SoftCAS) initializeKeyManager() (err error) { - if c.KeyManager == nil { - c.KeyManager, err = kms.New(context.Background(), kmsapi.Options{ - Type: string(kmsapi.DefaultKMS), - }) - } - return -} - -// createKey uses the configured kms to create a key. -func (c *SoftCAS) createKey(req *kmsapi.CreateKeyRequest) (*kmsapi.CreateKeyResponse, error) { - if err := c.initializeKeyManager(); err != nil { - return nil, err - } - if req == nil { - req = &kmsapi.CreateKeyRequest{ - SignatureAlgorithm: kmsapi.ECDSAWithSHA256, - } - } - return c.KeyManager.CreateKey(req) -} - -// createSigner uses the configured kms to create a singer -func (c *SoftCAS) createSigner(req *kmsapi.CreateSignerRequest) (crypto.Signer, error) { - if err := c.initializeKeyManager(); err != nil { - return nil, err - } - return c.KeyManager.CreateSigner(req) -} diff --git a/vendor/github.com/smallstep/certificates/db/db.go b/vendor/github.com/smallstep/certificates/db/db.go deleted file mode 100644 index 2643e577..00000000 --- a/vendor/github.com/smallstep/certificates/db/db.go +++ /dev/null @@ -1,505 +0,0 @@ -package db - -import ( - "crypto/x509" - "encoding/json" - "strconv" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/nosql" - "github.com/smallstep/nosql/database" - "golang.org/x/crypto/ssh" -) - -var ( - certsTable = []byte("x509_certs") - revokedCertsTable = []byte("revoked_x509_certs") - revokedSSHCertsTable = []byte("revoked_ssh_certs") - usedOTTTable = []byte("used_ott") - sshCertsTable = []byte("ssh_certs") - sshHostsTable = []byte("ssh_hosts") - sshUsersTable = []byte("ssh_users") - sshHostPrincipalsTable = []byte("ssh_host_principals") -) - -// ErrAlreadyExists can be returned if the DB attempts to set a key that has -// been previously set. -var ErrAlreadyExists = errors.New("already exists") - -// Config represents the JSON attributes used for configuring a step-ca DB. -type Config struct { - Type string `json:"type"` - DataSource string `json:"dataSource"` - ValueDir string `json:"valueDir,omitempty"` - Database string `json:"database,omitempty"` - - // BadgerFileLoadingMode can be set to 'FileIO' (instead of the default - // 'MemoryMap') to avoid memory-mapping log files. This can be useful - // in environments with low RAM - BadgerFileLoadingMode string `json:"badgerFileLoadingMode"` -} - -// AuthDB is an interface over an Authority DB client that implements a nosql.DB interface. -type AuthDB interface { - IsRevoked(sn string) (bool, error) - IsSSHRevoked(sn string) (bool, error) - Revoke(rci *RevokedCertificateInfo) error - RevokeSSH(rci *RevokedCertificateInfo) error - GetCertificate(serialNumber string) (*x509.Certificate, error) - StoreCertificate(crt *x509.Certificate) error - UseToken(id, tok string) (bool, error) - IsSSHHost(name string) (bool, error) - StoreSSHCertificate(crt *ssh.Certificate) error - GetSSHHostPrincipals() ([]string, error) - Shutdown() error -} - -// DB is a wrapper over the nosql.DB interface. -type DB struct { - nosql.DB - isUp bool -} - -// New returns a new database client that implements the AuthDB interface. -func New(c *Config) (AuthDB, error) { - if c == nil { - return newSimpleDB(c) - } - - opts := []nosql.Option{nosql.WithDatabase(c.Database), - nosql.WithValueDir(c.ValueDir)} - if len(c.BadgerFileLoadingMode) > 0 { - opts = append(opts, nosql.WithBadgerFileLoadingMode(c.BadgerFileLoadingMode)) - } - - db, err := nosql.New(c.Type, c.DataSource, opts...) - if err != nil { - return nil, errors.Wrapf(err, "Error opening database of Type %s with source %s", c.Type, c.DataSource) - } - - tables := [][]byte{ - revokedCertsTable, certsTable, usedOTTTable, - sshCertsTable, sshHostsTable, sshHostPrincipalsTable, sshUsersTable, - revokedSSHCertsTable, - } - for _, b := range tables { - if err := db.CreateTable(b); err != nil { - return nil, errors.Wrapf(err, "error creating table %s", - string(b)) - } - } - - return &DB{db, true}, nil -} - -// RevokedCertificateInfo contains information regarding the certificate -// revocation action. -type RevokedCertificateInfo struct { - Serial string - ProvisionerID string - ReasonCode int - Reason string - RevokedAt time.Time - TokenID string - MTLS bool -} - -// IsRevoked returns whether or not a certificate with the given identifier -// has been revoked. -// In the case of an X509 Certificate the `id` should be the Serial Number of -// the Certificate. -func (db *DB) IsRevoked(sn string) (bool, error) { - // If the DB is nil then act as pass through. - if db == nil { - return false, nil - } - - // If the error is `Not Found` then the certificate has not been revoked. - // Any other error should be propagated to the caller. - if _, err := db.Get(revokedCertsTable, []byte(sn)); err != nil { - if nosql.IsErrNotFound(err) { - return false, nil - } - return false, errors.Wrap(err, "error checking revocation bucket") - } - - // This certificate has been revoked. - return true, nil -} - -// IsSSHRevoked returns whether or not a certificate with the given identifier -// has been revoked. -// In the case of an X509 Certificate the `id` should be the Serial Number of -// the Certificate. -func (db *DB) IsSSHRevoked(sn string) (bool, error) { - // If the DB is nil then act as pass through. - if db == nil { - return false, nil - } - - // If the error is `Not Found` then the certificate has not been revoked. - // Any other error should be propagated to the caller. - if _, err := db.Get(revokedSSHCertsTable, []byte(sn)); err != nil { - if nosql.IsErrNotFound(err) { - return false, nil - } - return false, errors.Wrap(err, "error checking revocation bucket") - } - - // This certificate has been revoked. - return true, nil -} - -// Revoke adds a certificate to the revocation table. -func (db *DB) Revoke(rci *RevokedCertificateInfo) error { - rcib, err := json.Marshal(rci) - if err != nil { - return errors.Wrap(err, "error marshaling revoked certificate info") - } - - _, swapped, err := db.CmpAndSwap(revokedCertsTable, []byte(rci.Serial), nil, rcib) - switch { - case err != nil: - return errors.Wrap(err, "error AuthDB CmpAndSwap") - case !swapped: - return ErrAlreadyExists - default: - return nil - } -} - -// RevokeSSH adds a SSH certificate to the revocation table. -func (db *DB) RevokeSSH(rci *RevokedCertificateInfo) error { - rcib, err := json.Marshal(rci) - if err != nil { - return errors.Wrap(err, "error marshaling revoked certificate info") - } - - _, swapped, err := db.CmpAndSwap(revokedSSHCertsTable, []byte(rci.Serial), nil, rcib) - switch { - case err != nil: - return errors.Wrap(err, "error AuthDB CmpAndSwap") - case !swapped: - return ErrAlreadyExists - default: - return nil - } -} - -// GetCertificate retrieves a certificate by the serial number. -func (db *DB) GetCertificate(serialNumber string) (*x509.Certificate, error) { - asn1Data, err := db.Get(certsTable, []byte(serialNumber)) - if err != nil { - return nil, errors.Wrap(err, "database Get error") - } - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - return nil, errors.Wrapf(err, "error parsing certificate with serial number %s", serialNumber) - } - return cert, nil -} - -// StoreCertificate stores a certificate PEM. -func (db *DB) StoreCertificate(crt *x509.Certificate) error { - if err := db.Set(certsTable, []byte(crt.SerialNumber.String()), crt.Raw); err != nil { - return errors.Wrap(err, "database Set error") - } - return nil -} - -// UseToken returns true if we were able to successfully store the token for -// for the first time, false otherwise. -func (db *DB) UseToken(id, tok string) (bool, error) { - _, swapped, err := db.CmpAndSwap(usedOTTTable, []byte(id), nil, []byte(tok)) - if err != nil { - return false, errors.Wrapf(err, "error storing used token %s/%s", - string(usedOTTTable), id) - } - return swapped, nil -} - -// IsSSHHost returns if a principal is present in the ssh hosts table. -func (db *DB) IsSSHHost(principal string) (bool, error) { - if _, err := db.Get(sshHostsTable, []byte(strings.ToLower(principal))); err != nil { - if database.IsErrNotFound(err) { - return false, nil - } - return false, errors.Wrap(err, "database Get error") - } - return true, nil -} - -type sshHostPrincipalData struct { - Serial string - Expiry uint64 -} - -// StoreSSHCertificate stores an SSH certificate. -func (db *DB) StoreSSHCertificate(crt *ssh.Certificate) error { - serial := strconv.FormatUint(crt.Serial, 10) - tx := new(database.Tx) - tx.Set(sshCertsTable, []byte(serial), crt.Marshal()) - if crt.CertType == ssh.HostCert { - for _, p := range crt.ValidPrincipals { - hostPrincipalData, err := json.Marshal(sshHostPrincipalData{ - Serial: serial, - Expiry: crt.ValidBefore, - }) - if err != nil { - return err - } - tx.Set(sshHostsTable, []byte(strings.ToLower(p)), []byte(serial)) - tx.Set(sshHostPrincipalsTable, []byte(strings.ToLower(p)), hostPrincipalData) - } - } else { - for _, p := range crt.ValidPrincipals { - tx.Set(sshUsersTable, []byte(strings.ToLower(p)), []byte(serial)) - } - } - if err := db.Update(tx); err != nil { - return errors.Wrap(err, "database Update error") - } - return nil -} - -// GetSSHHostPrincipals gets a list of all valid host principals. -func (db *DB) GetSSHHostPrincipals() ([]string, error) { - entries, err := db.List(sshHostPrincipalsTable) - if err != nil { - return nil, err - } - var principals []string - for _, e := range entries { - var data sshHostPrincipalData - if err := json.Unmarshal(e.Value, &data); err != nil { - return nil, err - } - if time.Unix(int64(data.Expiry), 0).After(time.Now()) { - principals = append(principals, string(e.Key)) - } - } - return principals, nil -} - -// Shutdown sends a shutdown message to the database. -func (db *DB) Shutdown() error { - if db.isUp { - if err := db.Close(); err != nil { - return errors.Wrap(err, "database shutdown error") - } - db.isUp = false - } - return nil -} - -// MockAuthDB mocks the AuthDB interface. // -type MockAuthDB struct { - Err error - Ret1 interface{} - MIsRevoked func(string) (bool, error) - MIsSSHRevoked func(string) (bool, error) - MRevoke func(rci *RevokedCertificateInfo) error - MRevokeSSH func(rci *RevokedCertificateInfo) error - MGetCertificate func(serialNumber string) (*x509.Certificate, error) - MStoreCertificate func(crt *x509.Certificate) error - MUseToken func(id, tok string) (bool, error) - MIsSSHHost func(principal string) (bool, error) - MStoreSSHCertificate func(crt *ssh.Certificate) error - MGetSSHHostPrincipals func() ([]string, error) - MShutdown func() error -} - -// IsRevoked mock. -func (m *MockAuthDB) IsRevoked(sn string) (bool, error) { - if m.MIsRevoked != nil { - return m.MIsRevoked(sn) - } - return m.Ret1.(bool), m.Err -} - -// IsSSHRevoked mock. -func (m *MockAuthDB) IsSSHRevoked(sn string) (bool, error) { - if m.MIsSSHRevoked != nil { - return m.MIsSSHRevoked(sn) - } - return m.Ret1.(bool), m.Err -} - -// UseToken mock. -func (m *MockAuthDB) UseToken(id, tok string) (bool, error) { - if m.MUseToken != nil { - return m.MUseToken(id, tok) - } - if m.Ret1 == nil { - return false, m.Err - } - return m.Ret1.(bool), m.Err -} - -// Revoke mock. -func (m *MockAuthDB) Revoke(rci *RevokedCertificateInfo) error { - if m.MRevoke != nil { - return m.MRevoke(rci) - } - return m.Err -} - -// RevokeSSH mock. -func (m *MockAuthDB) RevokeSSH(rci *RevokedCertificateInfo) error { - if m.MRevokeSSH != nil { - return m.MRevokeSSH(rci) - } - return m.Err -} - -// GetCertificate mock. -func (m *MockAuthDB) GetCertificate(serialNumber string) (*x509.Certificate, error) { - if m.MGetCertificate != nil { - return m.MGetCertificate(serialNumber) - } - return m.Ret1.(*x509.Certificate), m.Err -} - -// StoreCertificate mock. -func (m *MockAuthDB) StoreCertificate(crt *x509.Certificate) error { - if m.MStoreCertificate != nil { - return m.MStoreCertificate(crt) - } - return m.Err -} - -// IsSSHHost mock. -func (m *MockAuthDB) IsSSHHost(principal string) (bool, error) { - if m.MIsSSHHost != nil { - return m.MIsSSHHost(principal) - } - return m.Ret1.(bool), m.Err -} - -// StoreSSHCertificate mock. -func (m *MockAuthDB) StoreSSHCertificate(crt *ssh.Certificate) error { - if m.MStoreSSHCertificate != nil { - return m.MStoreSSHCertificate(crt) - } - return m.Err -} - -// GetSSHHostPrincipals mock. -func (m *MockAuthDB) GetSSHHostPrincipals() ([]string, error) { - if m.MGetSSHHostPrincipals != nil { - return m.MGetSSHHostPrincipals() - } - return m.Ret1.([]string), m.Err -} - -// Shutdown mock. -func (m *MockAuthDB) Shutdown() error { - if m.MShutdown != nil { - return m.MShutdown() - } - return m.Err -} - -// MockNoSQLDB // -type MockNoSQLDB struct { - Err error - Ret1, Ret2 interface{} - MGet func(bucket, key []byte) ([]byte, error) - MSet func(bucket, key, value []byte) error - MOpen func(dataSourceName string, opt ...database.Option) error - MClose func() error - MCreateTable func(bucket []byte) error - MDeleteTable func(bucket []byte) error - MDel func(bucket, key []byte) error - MList func(bucket []byte) ([]*database.Entry, error) - MUpdate func(tx *database.Tx) error - MCmpAndSwap func(bucket, key, old, newval []byte) ([]byte, bool, error) -} - -// CmpAndSwap mock -func (m *MockNoSQLDB) CmpAndSwap(bucket, key, old, newval []byte) ([]byte, bool, error) { - if m.MCmpAndSwap != nil { - return m.MCmpAndSwap(bucket, key, old, newval) - } - if m.Ret1 == nil { - return nil, false, m.Err - } - return m.Ret1.([]byte), m.Ret2.(bool), m.Err -} - -// Get mock -func (m *MockNoSQLDB) Get(bucket, key []byte) ([]byte, error) { - if m.MGet != nil { - return m.MGet(bucket, key) - } - if m.Ret1 == nil { - return nil, m.Err - } - return m.Ret1.([]byte), m.Err -} - -// Set mock -func (m *MockNoSQLDB) Set(bucket, key, value []byte) error { - if m.MSet != nil { - return m.MSet(bucket, key, value) - } - return m.Err -} - -// Open mock -func (m *MockNoSQLDB) Open(dataSourceName string, opt ...database.Option) error { - if m.MOpen != nil { - return m.MOpen(dataSourceName, opt...) - } - return m.Err -} - -// Close mock -func (m *MockNoSQLDB) Close() error { - if m.MClose != nil { - return m.MClose() - } - return m.Err -} - -// CreateTable mock -func (m *MockNoSQLDB) CreateTable(bucket []byte) error { - if m.MCreateTable != nil { - return m.MCreateTable(bucket) - } - return m.Err -} - -// DeleteTable mock -func (m *MockNoSQLDB) DeleteTable(bucket []byte) error { - if m.MDeleteTable != nil { - return m.MDeleteTable(bucket) - } - return m.Err -} - -// Del mock -func (m *MockNoSQLDB) Del(bucket, key []byte) error { - if m.MDel != nil { - return m.MDel(bucket, key) - } - return m.Err -} - -// List mock -func (m *MockNoSQLDB) List(bucket []byte) ([]*database.Entry, error) { - if m.MList != nil { - return m.MList(bucket) - } - return m.Ret1.([]*database.Entry), m.Err -} - -// Update mock -func (m *MockNoSQLDB) Update(tx *database.Tx) error { - if m.MUpdate != nil { - return m.MUpdate(tx) - } - return m.Err -} diff --git a/vendor/github.com/smallstep/certificates/db/simple.go b/vendor/github.com/smallstep/certificates/db/simple.go deleted file mode 100644 index 0e5426ec..00000000 --- a/vendor/github.com/smallstep/certificates/db/simple.go +++ /dev/null @@ -1,149 +0,0 @@ -package db - -import ( - "crypto/x509" - "sync" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" - "golang.org/x/crypto/ssh" -) - -// ErrNotImplemented is an error returned when an operation is Not Implemented. -var ErrNotImplemented = errors.Errorf("not implemented") - -// SimpleDB is a barebones implementation of the DB interface. It is NOT an -// in memory implementation of the DB, but rather the bare minimum of -// functionality that the CA requires to operate securely. -type SimpleDB struct { - usedTokens *sync.Map -} - -func newSimpleDB(c *Config) (AuthDB, error) { - db := &SimpleDB{} - db.usedTokens = new(sync.Map) - return db, nil -} - -// IsRevoked noop -func (s *SimpleDB) IsRevoked(sn string) (bool, error) { - return false, nil -} - -// IsSSHRevoked noop -func (s *SimpleDB) IsSSHRevoked(sn string) (bool, error) { - return false, nil -} - -// Revoke returns a "NotImplemented" error. -func (s *SimpleDB) Revoke(rci *RevokedCertificateInfo) error { - return ErrNotImplemented -} - -// RevokeSSH returns a "NotImplemented" error. -func (s *SimpleDB) RevokeSSH(rci *RevokedCertificateInfo) error { - return ErrNotImplemented -} - -// GetCertificate returns a "NotImplemented" error. -func (s *SimpleDB) GetCertificate(serialNumber string) (*x509.Certificate, error) { - return nil, ErrNotImplemented -} - -// StoreCertificate returns a "NotImplemented" error. -func (s *SimpleDB) StoreCertificate(crt *x509.Certificate) error { - return ErrNotImplemented -} - -type usedToken struct { - UsedAt int64 `json:"ua,omitempty"` - Token string `json:"tok,omitempty"` -} - -// UseToken returns a "NotImplemented" error. -func (s *SimpleDB) UseToken(id, tok string) (bool, error) { - if _, ok := s.usedTokens.LoadOrStore(id, &usedToken{ - UsedAt: time.Now().Unix(), - Token: tok, - }); ok { - // Token already exists in DB. - return false, nil - } - // Successfully stored token. - return true, nil -} - -// IsSSHHost returns a "NotImplemented" error. -func (s *SimpleDB) IsSSHHost(principal string) (bool, error) { - return false, ErrNotImplemented -} - -// StoreSSHCertificate returns a "NotImplemented" error. -func (s *SimpleDB) StoreSSHCertificate(crt *ssh.Certificate) error { - return ErrNotImplemented -} - -// GetSSHHostPrincipals returns a "NotImplemented" error. -func (s *SimpleDB) GetSSHHostPrincipals() ([]string, error) { - return nil, ErrNotImplemented -} - -// Shutdown returns nil -func (s *SimpleDB) Shutdown() error { - return nil -} - -// nosql.DB interface implementation // - -// Open opens the database available with the given options. -func (s *SimpleDB) Open(dataSourceName string, opt ...database.Option) error { - return ErrNotImplemented -} - -// Close closes the current database. -func (s *SimpleDB) Close() error { - return ErrNotImplemented -} - -// Get returns the value stored in the given table/bucket and key. -func (s *SimpleDB) Get(bucket, key []byte) ([]byte, error) { - return nil, ErrNotImplemented -} - -// Set sets the given value in the given table/bucket and key. -func (s *SimpleDB) Set(bucket, key, value []byte) error { - return ErrNotImplemented -} - -// CmpAndSwap swaps the value at the given bucket and key if the current -// value is equivalent to the oldValue input. Returns 'true' if the -// swap was successful and 'false' otherwise. -func (s *SimpleDB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - return nil, false, ErrNotImplemented -} - -// Del deletes the data in the given table/bucket and key. -func (s *SimpleDB) Del(bucket, key []byte) error { - return ErrNotImplemented -} - -// List returns a list of all the entries in a given table/bucket. -func (s *SimpleDB) List(bucket []byte) ([]*database.Entry, error) { - return nil, ErrNotImplemented -} - -// Update performs a transaction with multiple read-write commands. -func (s *SimpleDB) Update(tx *database.Tx) error { - return ErrNotImplemented -} - -// CreateTable creates a table or a bucket in the database. -func (s *SimpleDB) CreateTable(bucket []byte) error { - return ErrNotImplemented -} - -// DeleteTable deletes a table or a bucket in the database. -func (s *SimpleDB) DeleteTable(bucket []byte) error { - return ErrNotImplemented -} diff --git a/vendor/github.com/smallstep/certificates/errs/error.go b/vendor/github.com/smallstep/certificates/errs/error.go deleted file mode 100644 index ebcf0894..00000000 --- a/vendor/github.com/smallstep/certificates/errs/error.go +++ /dev/null @@ -1,335 +0,0 @@ -package errs - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/pkg/errors" -) - -// StatusCoder interface is used by errors that returns the HTTP response code. -type StatusCoder interface { - StatusCode() int -} - -// StackTracer must be by those errors that return an stack trace. -type StackTracer interface { - StackTrace() errors.StackTrace -} - -// Option modifies the Error type. -type Option func(e *Error) error - -// withDefaultMessage returns an Option that modifies the error by overwriting the -// message only if it is empty. -func withDefaultMessage(format string, args ...interface{}) Option { - return func(e *Error) error { - if len(e.Msg) > 0 { - return e - } - e.Msg = fmt.Sprintf(format, args...) - return e - } -} - -// WithMessage returns an Option that modifies the error by overwriting the -// message only if it is empty. -func WithMessage(format string, args ...interface{}) Option { - return func(e *Error) error { - e.Msg = fmt.Sprintf(format, args...) - return e - } -} - -// WithKeyVal returns an Option that adds the given key-value pair to the -// Error details. This is helpful for debugging errors. -func WithKeyVal(key string, val interface{}) Option { - return func(e *Error) error { - if e.Details == nil { - e.Details = make(map[string]interface{}) - } - e.Details[key] = val - return e - } -} - -// Error represents the CA API errors. -type Error struct { - Status int - Err error - Msg string - Details map[string]interface{} -} - -// ErrorResponse represents an error in JSON format. -type ErrorResponse struct { - Status int `json:"status"` - Message string `json:"message"` -} - -// Cause implements the errors.Causer interface and returns the original error. -func (e *Error) Cause() error { - return e.Err -} - -// Error implements the error interface and returns the error string. -func (e *Error) Error() string { - return e.Err.Error() -} - -// StatusCode implements the StatusCoder interface and returns the HTTP response -// code. -func (e *Error) StatusCode() int { - return e.Status -} - -// Message returns a user friendly error, if one is set. -func (e *Error) Message() string { - if len(e.Msg) > 0 { - return e.Msg - } - return e.Err.Error() -} - -// Wrap returns an error annotating err with a stack trace at the point Wrap is -// called, and the supplied message. If err is nil, Wrap returns nil. -func Wrap(status int, e error, m string, args ...interface{}) error { - if e == nil { - return nil - } - _, opts := splitOptionArgs(args) - if err, ok := e.(*Error); ok { - err.Err = errors.Wrap(err.Err, m) - e = err - } else { - e = errors.Wrap(e, m) - } - return StatusCodeError(status, e, opts...) -} - -// Wrapf returns an error annotating err with a stack trace at the point Wrap is -// called, and the supplied message. If err is nil, Wrap returns nil. -func Wrapf(status int, e error, format string, args ...interface{}) error { - if e == nil { - return nil - } - as, opts := splitOptionArgs(args) - if err, ok := e.(*Error); ok { - err.Err = errors.Wrapf(err.Err, format, args...) - e = err - } else { - e = errors.Wrapf(e, format, as...) - } - return StatusCodeError(status, e, opts...) -} - -// MarshalJSON implements json.Marshaller interface for the Error struct. -func (e *Error) MarshalJSON() ([]byte, error) { - var msg string - if len(e.Msg) > 0 { - msg = e.Msg - } else { - msg = http.StatusText(e.Status) - } - return json.Marshal(&ErrorResponse{Status: e.Status, Message: msg}) -} - -// UnmarshalJSON implements json.Unmarshaler interface for the Error struct. -func (e *Error) UnmarshalJSON(data []byte) error { - var er ErrorResponse - if err := json.Unmarshal(data, &er); err != nil { - return err - } - e.Status = er.Status - e.Err = fmt.Errorf(er.Message) - return nil -} - -// Format implements the fmt.Formatter interface. -func (e *Error) Format(f fmt.State, c rune) { - if err, ok := e.Err.(fmt.Formatter); ok { - err.Format(f, c) - return - } - fmt.Fprint(f, e.Err.Error()) -} - -// Messenger is a friendly message interface that errors can implement. -type Messenger interface { - Message() string -} - -// StatusCodeError selects the proper error based on the status code. -func StatusCodeError(code int, e error, opts ...Option) error { - switch code { - case http.StatusBadRequest: - return BadRequestErr(e, opts...) - case http.StatusUnauthorized: - return UnauthorizedErr(e, opts...) - case http.StatusForbidden: - return ForbiddenErr(e, opts...) - case http.StatusInternalServerError: - return InternalServerErr(e, opts...) - case http.StatusNotImplemented: - return NotImplementedErr(e, opts...) - default: - return UnexpectedErr(code, e, opts...) - } -} - -var ( - seeLogs = "Please see the certificate authority logs for more info." - // BadRequestDefaultMsg 400 default msg - BadRequestDefaultMsg = "The request could not be completed; malformed or missing data. " + seeLogs - // UnauthorizedDefaultMsg 401 default msg - UnauthorizedDefaultMsg = "The request lacked necessary authorization to be completed. " + seeLogs - // ForbiddenDefaultMsg 403 default msg - ForbiddenDefaultMsg = "The request was forbidden by the certificate authority. " + seeLogs - // NotFoundDefaultMsg 404 default msg - NotFoundDefaultMsg = "The requested resource could not be found. " + seeLogs - // InternalServerErrorDefaultMsg 500 default msg - InternalServerErrorDefaultMsg = "The certificate authority encountered an Internal Server Error. " + seeLogs - // NotImplementedDefaultMsg 501 default msg - NotImplementedDefaultMsg = "The requested method is not implemented by the certificate authority. " + seeLogs -) - -// splitOptionArgs splits the variadic length args into string formatting args -// and Option(s) to apply to an Error. -func splitOptionArgs(args []interface{}) ([]interface{}, []Option) { - indexOptionStart := -1 - for i, a := range args { - if _, ok := a.(Option); ok { - indexOptionStart = i - break - } - } - - if indexOptionStart < 0 { - return args, []Option{} - } - opts := []Option{} - // Ignore any non-Option args that come after the first Option. - for _, o := range args[indexOptionStart:] { - if opt, ok := o.(Option); ok { - opts = append(opts, opt) - } - } - return args[:indexOptionStart], opts -} - -// NewErr returns a new Error. If the given error implements the StatusCoder -// interface we will ignore the given status. -func NewErr(status int, err error, opts ...Option) error { - var ( - e *Error - ok bool - ) - if e, ok = err.(*Error); !ok { - if sc, ok := err.(StatusCoder); ok { - e = &Error{Status: sc.StatusCode(), Err: err} - } else { - cause := errors.Cause(err) - if sc, ok := cause.(StatusCoder); ok { - e = &Error{Status: sc.StatusCode(), Err: err} - } else { - e = &Error{Status: status, Err: err} - } - } - } - for _, o := range opts { - o(e) - } - return e -} - -// Errorf creates a new error using the given format and status code. -func Errorf(code int, format string, args ...interface{}) error { - as, opts := splitOptionArgs(args) - opts = append(opts, withDefaultMessage(NotImplementedDefaultMsg)) - e := &Error{Status: code, Err: fmt.Errorf(format, as...)} - for _, o := range opts { - o(e) - } - return e -} - -// InternalServer creates a 500 error with the given format and arguments. -func InternalServer(format string, args ...interface{}) error { - args = append(args, withDefaultMessage(InternalServerErrorDefaultMsg)) - return Errorf(http.StatusInternalServerError, format, args...) -} - -// InternalServerErr returns a 500 error with the given error. -func InternalServerErr(err error, opts ...Option) error { - opts = append(opts, withDefaultMessage(InternalServerErrorDefaultMsg)) - return NewErr(http.StatusInternalServerError, err, opts...) -} - -// NotImplemented creates a 501 error with the given format and arguments. -func NotImplemented(format string, args ...interface{}) error { - args = append(args, withDefaultMessage(NotImplementedDefaultMsg)) - return Errorf(http.StatusNotImplemented, format, args...) -} - -// NotImplementedErr returns a 501 error with the given error. -func NotImplementedErr(err error, opts ...Option) error { - opts = append(opts, withDefaultMessage(NotImplementedDefaultMsg)) - return NewErr(http.StatusNotImplemented, err, opts...) -} - -// BadRequest creates a 400 error with the given format and arguments. -func BadRequest(format string, args ...interface{}) error { - args = append(args, withDefaultMessage(BadRequestDefaultMsg)) - return Errorf(http.StatusBadRequest, format, args...) -} - -// BadRequestErr returns an 400 error with the given error. -func BadRequestErr(err error, opts ...Option) error { - opts = append(opts, withDefaultMessage(BadRequestDefaultMsg)) - return NewErr(http.StatusBadRequest, err, opts...) -} - -// Unauthorized creates a 401 error with the given format and arguments. -func Unauthorized(format string, args ...interface{}) error { - args = append(args, withDefaultMessage(UnauthorizedDefaultMsg)) - return Errorf(http.StatusUnauthorized, format, args...) -} - -// UnauthorizedErr returns an 401 error with the given error. -func UnauthorizedErr(err error, opts ...Option) error { - opts = append(opts, withDefaultMessage(UnauthorizedDefaultMsg)) - return NewErr(http.StatusUnauthorized, err, opts...) -} - -// Forbidden creates a 403 error with the given format and arguments. -func Forbidden(format string, args ...interface{}) error { - args = append(args, withDefaultMessage(ForbiddenDefaultMsg)) - return Errorf(http.StatusForbidden, format, args...) -} - -// ForbiddenErr returns an 403 error with the given error. -func ForbiddenErr(err error, opts ...Option) error { - opts = append(opts, withDefaultMessage(ForbiddenDefaultMsg)) - return NewErr(http.StatusForbidden, err, opts...) -} - -// NotFound creates a 404 error with the given format and arguments. -func NotFound(format string, args ...interface{}) error { - args = append(args, withDefaultMessage(NotFoundDefaultMsg)) - return Errorf(http.StatusNotFound, format, args...) -} - -// NotFoundErr returns an 404 error with the given error. -func NotFoundErr(err error, opts ...Option) error { - opts = append(opts, withDefaultMessage(NotFoundDefaultMsg)) - return NewErr(http.StatusNotFound, err, opts...) -} - -// UnexpectedErr will be used when the certificate authority makes an outgoing -// request and receives an unhandled status code. -func UnexpectedErr(code int, err error, opts ...Option) error { - opts = append(opts, withDefaultMessage("The certificate authority received an "+ - "unexpected HTTP status code - '%d'. "+seeLogs, code)) - return NewErr(code, err, opts...) -} diff --git a/vendor/github.com/smallstep/certificates/kms/apiv1/options.go b/vendor/github.com/smallstep/certificates/kms/apiv1/options.go deleted file mode 100644 index 7cc7f748..00000000 --- a/vendor/github.com/smallstep/certificates/kms/apiv1/options.go +++ /dev/null @@ -1,128 +0,0 @@ -package apiv1 - -import ( - "crypto" - "crypto/x509" - "strings" - - "github.com/pkg/errors" -) - -// KeyManager is the interface implemented by all the KMS. -type KeyManager interface { - GetPublicKey(req *GetPublicKeyRequest) (crypto.PublicKey, error) - CreateKey(req *CreateKeyRequest) (*CreateKeyResponse, error) - CreateSigner(req *CreateSignerRequest) (crypto.Signer, error) - Close() error -} - -// Decrypter is an interface implemented by KMSes that are used -// in operations that require decryption -type Decrypter interface { - CreateDecrypter(req *CreateDecrypterRequest) (crypto.Decrypter, error) -} - -// CertificateManager is the interface implemented by the KMS that can load and -// store x509.Certificates. -type CertificateManager interface { - LoadCertificate(req *LoadCertificateRequest) (*x509.Certificate, error) - StoreCertificate(req *StoreCertificateRequest) error -} - -// ErrNotImplemented is the type of error returned if an operation is not -// implemented. -type ErrNotImplemented struct { - Message string -} - -func (e ErrNotImplemented) Error() string { - if e.Message != "" { - return e.Message - } - return "not implemented" -} - -// ErrAlreadyExists is the type of error returned if a key already exists. This -// is currently only implmented on pkcs11. -type ErrAlreadyExists struct { - Message string -} - -func (e ErrAlreadyExists) Error() string { - if e.Message != "" { - return e.Message - } - return "key already exists" -} - -// Type represents the KMS type used. -type Type string - -const ( - // DefaultKMS is a KMS implementation using software. - DefaultKMS Type = "" - // SoftKMS is a KMS implementation using software. - SoftKMS Type = "softkms" - // CloudKMS is a KMS implementation using Google's Cloud KMS. - CloudKMS Type = "cloudkms" - // AmazonKMS is a KMS implementation using Amazon AWS KMS. - AmazonKMS Type = "awskms" - // PKCS11 is a KMS implementation using the PKCS11 standard. - PKCS11 Type = "pkcs11" - // YubiKey is a KMS implementation using a YubiKey PIV. - YubiKey Type = "yubikey" - // SSHAgentKMS is a KMS implementation using ssh-agent to access keys. - SSHAgentKMS Type = "sshagentkms" -) - -// Options are the KMS options. They represent the kms object in the ca.json. -type Options struct { - // The type of the KMS to use. - Type string `json:"type"` - - // Path to the credentials file used in CloudKMS and AmazonKMS. - CredentialsFile string `json:"credentialsFile"` - - // URI is based on the PKCS #11 URI Scheme defined in - // https://tools.ietf.org/html/rfc7512 and represents the configuration used - // to connect to the KMS. - // - // Used by: pkcs11 - URI string `json:"uri"` - - // Pin used to access the PKCS11 module. It can be defined in the URI using - // the pin-value or pin-source properties. - Pin string `json:"pin"` - - // ManagementKey used in YubiKeys. Default management key is the hexadecimal - // string 010203040506070801020304050607080102030405060708: - // []byte{ - // 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - // 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - // 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - // } - ManagementKey string `json:"managementKey"` - - // Region to use in AmazonKMS. - Region string `json:"region"` - - // Profile to use in AmazonKMS. - Profile string `json:"profile"` -} - -// Validate checks the fields in Options. -func (o *Options) Validate() error { - if o == nil { - return nil - } - - switch Type(strings.ToLower(o.Type)) { - case DefaultKMS, SoftKMS: // Go crypto based kms. - case CloudKMS, AmazonKMS, SSHAgentKMS: // Cloud based kms. - case YubiKey, PKCS11: // Hardware based kms. - default: - return errors.Errorf("unsupported kms type %s", o.Type) - } - - return nil -} diff --git a/vendor/github.com/smallstep/certificates/kms/apiv1/registry.go b/vendor/github.com/smallstep/certificates/kms/apiv1/registry.go deleted file mode 100644 index 5a8cf4db..00000000 --- a/vendor/github.com/smallstep/certificates/kms/apiv1/registry.go +++ /dev/null @@ -1,27 +0,0 @@ -package apiv1 - -import ( - "context" - "sync" -) - -var registry = new(sync.Map) - -// KeyManagerNewFunc is the type that represents the method to initialize a new -// KeyManager. -type KeyManagerNewFunc func(ctx context.Context, opts Options) (KeyManager, error) - -// Register adds to the registry a method to create a KeyManager of type t. -func Register(t Type, fn KeyManagerNewFunc) { - registry.Store(t, fn) -} - -// LoadKeyManagerNewFunc returns the function initialize a KayManager. -func LoadKeyManagerNewFunc(t Type) (KeyManagerNewFunc, bool) { - v, ok := registry.Load(t) - if !ok { - return nil, false - } - fn, ok := v.(KeyManagerNewFunc) - return fn, ok -} diff --git a/vendor/github.com/smallstep/certificates/kms/apiv1/requests.go b/vendor/github.com/smallstep/certificates/kms/apiv1/requests.go deleted file mode 100644 index f6fe7dd2..00000000 --- a/vendor/github.com/smallstep/certificates/kms/apiv1/requests.go +++ /dev/null @@ -1,155 +0,0 @@ -package apiv1 - -import ( - "crypto" - "crypto/x509" - "fmt" -) - -// ProtectionLevel specifies on some KMS how cryptographic operations are -// performed. -type ProtectionLevel int - -const ( - // Protection level not specified. - UnspecifiedProtectionLevel ProtectionLevel = iota - // Crypto operations are performed in software. - Software - // Crypto operations are performed in a Hardware Security Module. - HSM -) - -// String returns a string representation of p. -func (p ProtectionLevel) String() string { - switch p { - case UnspecifiedProtectionLevel: - return "unspecified" - case Software: - return "software" - case HSM: - return "hsm" - default: - return fmt.Sprintf("unknown(%d)", p) - } -} - -// SignatureAlgorithm used for cryptographic signing. -type SignatureAlgorithm int - -const ( - // Not specified. - UnspecifiedSignAlgorithm SignatureAlgorithm = iota - // RSASSA-PKCS1-v1_5 key and a SHA256 digest. - SHA256WithRSA - // RSASSA-PKCS1-v1_5 key and a SHA384 digest. - SHA384WithRSA - // RSASSA-PKCS1-v1_5 key and a SHA512 digest. - SHA512WithRSA - // RSASSA-PSS key with a SHA256 digest. - SHA256WithRSAPSS - // RSASSA-PSS key with a SHA384 digest. - SHA384WithRSAPSS - // RSASSA-PSS key with a SHA512 digest. - SHA512WithRSAPSS - // ECDSA on the NIST P-256 curve with a SHA256 digest. - ECDSAWithSHA256 - // ECDSA on the NIST P-384 curve with a SHA384 digest. - ECDSAWithSHA384 - // ECDSA on the NIST P-521 curve with a SHA512 digest. - ECDSAWithSHA512 - // EdDSA on Curve25519 with a SHA512 digest. - PureEd25519 -) - -// String returns a string representation of s. -func (s SignatureAlgorithm) String() string { - switch s { - case UnspecifiedSignAlgorithm: - return "unspecified" - case SHA256WithRSA: - return "SHA256-RSA" - case SHA384WithRSA: - return "SHA384-RSA" - case SHA512WithRSA: - return "SHA512-RSA" - case SHA256WithRSAPSS: - return "SHA256-RSAPSS" - case SHA384WithRSAPSS: - return "SHA384-RSAPSS" - case SHA512WithRSAPSS: - return "SHA512-RSAPSS" - case ECDSAWithSHA256: - return "ECDSA-SHA256" - case ECDSAWithSHA384: - return "ECDSA-SHA384" - case ECDSAWithSHA512: - return "ECDSA-SHA512" - case PureEd25519: - return "Ed25519" - default: - return fmt.Sprintf("unknown(%d)", s) - } -} - -// GetPublicKeyRequest is the parameter used in the kms.GetPublicKey method. -type GetPublicKeyRequest struct { - Name string -} - -// CreateKeyRequest is the parameter used in the kms.CreateKey method. -type CreateKeyRequest struct { - // Name represents the key name or label used to identify a key. - // - // Used by: awskms, cloudkms, pkcs11, yubikey. - Name string - - // SignatureAlgorithm represents the type of key to create. - SignatureAlgorithm SignatureAlgorithm - - // Bits is the number of bits on RSA keys. - Bits int - - // ProtectionLevel specifies how cryptographic operations are performed. - // Used by: cloudkms - ProtectionLevel ProtectionLevel -} - -// CreateKeyResponse is the response value of the kms.CreateKey method. -type CreateKeyResponse struct { - Name string - PublicKey crypto.PublicKey - PrivateKey crypto.PrivateKey - CreateSignerRequest CreateSignerRequest -} - -// CreateSignerRequest is the parameter used in the kms.CreateSigner method. -type CreateSignerRequest struct { - Signer crypto.Signer - SigningKey string - SigningKeyPEM []byte - TokenLabel string - PublicKey string - PublicKeyPEM []byte - Password []byte -} - -// CreateDecrypterRequest is the parameter used in the kms.Decrypt method. -type CreateDecrypterRequest struct { - Decrypter crypto.Decrypter - DecryptionKey string - DecryptionKeyPEM []byte - Password []byte -} - -// LoadCertificateRequest is the parameter used in the LoadCertificate method of -// a CertificateManager. -type LoadCertificateRequest struct { - Name string -} - -// StoreCertificateRequest is the parameter used in the StoreCertificate method -// of a CertificateManager. -type StoreCertificateRequest struct { - Name string - Certificate *x509.Certificate -} diff --git a/vendor/github.com/smallstep/certificates/kms/kms.go b/vendor/github.com/smallstep/certificates/kms/kms.go deleted file mode 100644 index 3eddca93..00000000 --- a/vendor/github.com/smallstep/certificates/kms/kms.go +++ /dev/null @@ -1,37 +0,0 @@ -package kms - -import ( - "context" - "strings" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/kms/apiv1" - - // Enable default implementation - _ "github.com/smallstep/certificates/kms/softkms" -) - -// KeyManager is the interface implemented by all the KMS. -type KeyManager = apiv1.KeyManager - -// CertificateManager is the interface implemented by the KMS that can load and -// store x509.Certificates. -type CertificateManager = apiv1.CertificateManager - -// New initializes a new KMS from the given type. -func New(ctx context.Context, opts apiv1.Options) (KeyManager, error) { - if err := opts.Validate(); err != nil { - return nil, err - } - - t := apiv1.Type(strings.ToLower(opts.Type)) - if t == apiv1.DefaultKMS { - t = apiv1.SoftKMS - } - - fn, ok := apiv1.LoadKeyManagerNewFunc(t) - if !ok { - return nil, errors.Errorf("unsupported kms type '%s'", t) - } - return fn(ctx, opts) -} diff --git a/vendor/github.com/smallstep/certificates/kms/softkms/softkms.go b/vendor/github.com/smallstep/certificates/kms/softkms/softkms.go deleted file mode 100644 index a2f43c31..00000000 --- a/vendor/github.com/smallstep/certificates/kms/softkms/softkms.go +++ /dev/null @@ -1,183 +0,0 @@ -package softkms - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/kms/apiv1" - "go.step.sm/cli-utils/ui" - "go.step.sm/crypto/keyutil" - "go.step.sm/crypto/pemutil" -) - -type algorithmAttributes struct { - Type string - Curve string -} - -// DefaultRSAKeySize is the default size for RSA keys. -const DefaultRSAKeySize = 3072 - -var signatureAlgorithmMapping = map[apiv1.SignatureAlgorithm]algorithmAttributes{ - apiv1.UnspecifiedSignAlgorithm: {"EC", "P-256"}, - apiv1.SHA256WithRSA: {"RSA", ""}, - apiv1.SHA384WithRSA: {"RSA", ""}, - apiv1.SHA512WithRSA: {"RSA", ""}, - apiv1.SHA256WithRSAPSS: {"RSA", ""}, - apiv1.SHA384WithRSAPSS: {"RSA", ""}, - apiv1.SHA512WithRSAPSS: {"RSA", ""}, - apiv1.ECDSAWithSHA256: {"EC", "P-256"}, - apiv1.ECDSAWithSHA384: {"EC", "P-384"}, - apiv1.ECDSAWithSHA512: {"EC", "P-521"}, - apiv1.PureEd25519: {"OKP", "Ed25519"}, -} - -// generateKey is used for testing purposes. -var generateKey = func(kty, crv string, size int) (interface{}, interface{}, error) { - if kty == "RSA" && size == 0 { - size = DefaultRSAKeySize - } - return keyutil.GenerateKeyPair(kty, crv, size) -} - -// SoftKMS is a key manager that uses keys stored in disk. -type SoftKMS struct{} - -// New returns a new SoftKMS. -func New(ctx context.Context, opts apiv1.Options) (*SoftKMS, error) { - return &SoftKMS{}, nil -} - -func init() { - pemutil.PromptPassword = func(msg string) ([]byte, error) { - return ui.PromptPassword(msg) - } - apiv1.Register(apiv1.SoftKMS, func(ctx context.Context, opts apiv1.Options) (apiv1.KeyManager, error) { - return New(ctx, opts) - }) -} - -// Close is a noop that just returns nil. -func (k *SoftKMS) Close() error { - return nil -} - -// CreateSigner returns a new signer configured with the given signing key. -func (k *SoftKMS) CreateSigner(req *apiv1.CreateSignerRequest) (crypto.Signer, error) { - var opts []pemutil.Options - if req.Password != nil { - opts = append(opts, pemutil.WithPassword(req.Password)) - } - - switch { - case req.Signer != nil: - return req.Signer, nil - case len(req.SigningKeyPEM) != 0: - v, err := pemutil.ParseKey(req.SigningKeyPEM, opts...) - if err != nil { - return nil, err - } - sig, ok := v.(crypto.Signer) - if !ok { - return nil, errors.New("signingKeyPEM is not a crypto.Signer") - } - return sig, nil - case req.SigningKey != "": - v, err := pemutil.Read(req.SigningKey, opts...) - if err != nil { - return nil, err - } - sig, ok := v.(crypto.Signer) - if !ok { - return nil, errors.New("signingKey is not a crypto.Signer") - } - return sig, nil - default: - return nil, errors.New("failed to load softKMS: please define signingKeyPEM or signingKey") - } -} - -// CreateKey generates a new key using Golang crypto and returns both public and -// private key. -func (k *SoftKMS) CreateKey(req *apiv1.CreateKeyRequest) (*apiv1.CreateKeyResponse, error) { - v, ok := signatureAlgorithmMapping[req.SignatureAlgorithm] - if !ok { - return nil, errors.Errorf("softKMS does not support signature algorithm '%s'", req.SignatureAlgorithm) - } - - pub, priv, err := generateKey(v.Type, v.Curve, req.Bits) - if err != nil { - return nil, err - } - signer, ok := priv.(crypto.Signer) - if !ok { - return nil, errors.Errorf("softKMS createKey result is not a crypto.Signer: type %T", priv) - } - - return &apiv1.CreateKeyResponse{ - Name: req.Name, - PublicKey: pub, - PrivateKey: priv, - CreateSignerRequest: apiv1.CreateSignerRequest{ - Signer: signer, - }, - }, nil -} - -// GetPublicKey returns the public key from the file passed in the request name. -func (k *SoftKMS) GetPublicKey(req *apiv1.GetPublicKeyRequest) (crypto.PublicKey, error) { - v, err := pemutil.Read(req.Name) - if err != nil { - return nil, err - } - - switch vv := v.(type) { - case *x509.Certificate: - return vv.PublicKey, nil - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: - return vv, nil - default: - return nil, errors.Errorf("unsupported public key type %T", v) - } -} - -// CreateDecrypter creates a new crypto.Decrypter backed by disk/software -func (k *SoftKMS) CreateDecrypter(req *apiv1.CreateDecrypterRequest) (crypto.Decrypter, error) { - - var opts []pemutil.Options - if req.Password != nil { - opts = append(opts, pemutil.WithPassword(req.Password)) - } - - switch { - case req.Decrypter != nil: - return req.Decrypter, nil - case len(req.DecryptionKeyPEM) != 0: - v, err := pemutil.ParseKey(req.DecryptionKeyPEM, opts...) - if err != nil { - return nil, err - } - decrypter, ok := v.(crypto.Decrypter) - if !ok { - return nil, errors.New("decryptorKeyPEM is not a crypto.Decrypter") - } - return decrypter, nil - case req.DecryptionKey != "": - v, err := pemutil.Read(req.DecryptionKey, opts...) - if err != nil { - return nil, err - } - decrypter, ok := v.(crypto.Decrypter) - if !ok { - return nil, errors.New("decryptionKey is not a crypto.Decrypter") - } - return decrypter, nil - default: - return nil, errors.New("failed to load softKMS: please define decryptionKeyPEM or decryptionKey") - } -} diff --git a/vendor/github.com/smallstep/certificates/kms/sshagentkms/sshagentkms.go b/vendor/github.com/smallstep/certificates/kms/sshagentkms/sshagentkms.go deleted file mode 100644 index b3627a08..00000000 --- a/vendor/github.com/smallstep/certificates/kms/sshagentkms/sshagentkms.go +++ /dev/null @@ -1,206 +0,0 @@ -package sshagentkms - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - "io" - "net" - "os" - "strings" - - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" - - "github.com/pkg/errors" - "github.com/smallstep/certificates/kms/apiv1" - - "go.step.sm/crypto/pemutil" -) - -// SSHAgentKMS is a key manager that uses keys provided by ssh-agent -type SSHAgentKMS struct { - agentClient agent.Agent -} - -// New returns a new SSHAgentKMS. -func New(ctx context.Context, opts apiv1.Options) (*SSHAgentKMS, error) { - socket := os.Getenv("SSH_AUTH_SOCK") - conn, err := net.Dial("unix", socket) - if err != nil { - return nil, errors.Wrap(err, "failed to open SSH_AUTH_SOCK") - } - - agentClient := agent.NewClient(conn) - - return &SSHAgentKMS{ - agentClient: agentClient, - }, nil -} - -// NewFromAgent initializes an SSHAgentKMS from a given agent, this method is -// used for testing purposes. -func NewFromAgent(ctx context.Context, opts apiv1.Options, agentClient agent.Agent) (*SSHAgentKMS, error) { - return &SSHAgentKMS{ - agentClient: agentClient, - }, nil -} - -func init() { - apiv1.Register(apiv1.SSHAgentKMS, func(ctx context.Context, opts apiv1.Options) (apiv1.KeyManager, error) { - return New(ctx, opts) - }) -} - -// Close closes the agent. This is a noop for the SSHAgentKMS. -func (k *SSHAgentKMS) Close() error { - return nil -} - -// WrappedSSHSigner is a utility type to wrap a ssh.Signer as a crypto.Signer -type WrappedSSHSigner struct { - Sshsigner ssh.Signer -} - -// Public returns the agent public key. The type of this public key is -// *agent.Key. -func (s *WrappedSSHSigner) Public() crypto.PublicKey { - return s.Sshsigner.PublicKey() -} - -// Sign signs the given digest using the ssh agent and returns the signature. -func (s *WrappedSSHSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { - sig, err := s.Sshsigner.Sign(rand, digest) - if err != nil { - return nil, err - } - return sig.Blob, nil -} - -// NewWrappedSignerFromSSHSigner returns a new crypto signer wrapping the given -// one. -func NewWrappedSignerFromSSHSigner(signer ssh.Signer) crypto.Signer { - return &WrappedSSHSigner{signer} -} - -func (k *SSHAgentKMS) findKey(signingKey string) (target int, err error) { - if strings.HasPrefix(signingKey, "sshagentkms:") { - var key = strings.TrimPrefix(signingKey, "sshagentkms:") - - l, err := k.agentClient.List() - if err != nil { - return -1, err - } - for i, s := range l { - if s.Comment == key { - return i, nil - } - } - } - - return -1, errors.Errorf("SSHAgentKMS couldn't find %s", signingKey) -} - -// CreateSigner returns a new signer configured with the given signing key. -func (k *SSHAgentKMS) CreateSigner(req *apiv1.CreateSignerRequest) (crypto.Signer, error) { - if req.Signer != nil { - return req.Signer, nil - } - if strings.HasPrefix(req.SigningKey, "sshagentkms:") { - target, err := k.findKey(req.SigningKey) - - if err != nil { - return nil, err - } - s, err := k.agentClient.Signers() - if err != nil { - return nil, err - } - return NewWrappedSignerFromSSHSigner(s[target]), nil - } - // OK: We don't actually care about non-ssh certificates, - // but we can't disable it in step-ca so this code is copy-pasted from - // softkms just to keep step-ca happy. - var opts []pemutil.Options - if req.Password != nil { - opts = append(opts, pemutil.WithPassword(req.Password)) - } - switch { - case len(req.SigningKeyPEM) != 0: - v, err := pemutil.ParseKey(req.SigningKeyPEM, opts...) - if err != nil { - return nil, err - } - sig, ok := v.(crypto.Signer) - if !ok { - return nil, errors.New("signingKeyPEM is not a crypto.Signer") - } - return sig, nil - case req.SigningKey != "": - v, err := pemutil.Read(req.SigningKey, opts...) - if err != nil { - return nil, err - } - sig, ok := v.(crypto.Signer) - if !ok { - return nil, errors.New("signingKey is not a crypto.Signer") - } - return sig, nil - default: - return nil, errors.New("failed to load softKMS: please define signingKeyPEM or signingKey") - } -} - -// CreateKey generates a new key and returns both public and private key. -func (k *SSHAgentKMS) CreateKey(req *apiv1.CreateKeyRequest) (*apiv1.CreateKeyResponse, error) { - return nil, errors.Errorf("SSHAgentKMS doesn't support generating keys") -} - -// GetPublicKey returns the public key from the file passed in the request name. -func (k *SSHAgentKMS) GetPublicKey(req *apiv1.GetPublicKeyRequest) (crypto.PublicKey, error) { - var v crypto.PublicKey - if strings.HasPrefix(req.Name, "sshagentkms:") { - target, err := k.findKey(req.Name) - - if err != nil { - return nil, err - } - - s, err := k.agentClient.Signers() - if err != nil { - return nil, err - } - - sshPub := s[target].PublicKey() - - sshPubBytes := sshPub.Marshal() - - parsed, err := ssh.ParsePublicKey(sshPubBytes) - if err != nil { - return nil, err - } - - parsedCryptoKey := parsed.(ssh.CryptoPublicKey) - - // Then, we can call CryptoPublicKey() to get the actual crypto.PublicKey - v = parsedCryptoKey.CryptoPublicKey() - } else { - var err error - v, err = pemutil.Read(req.Name) - if err != nil { - return nil, err - } - } - - switch vv := v.(type) { - case *x509.Certificate: - return vv.PublicKey, nil - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: - return vv, nil - default: - return nil, errors.Errorf("unsupported public key type %T", v) - } -} diff --git a/vendor/github.com/smallstep/certificates/logging/clf.go b/vendor/github.com/smallstep/certificates/logging/clf.go deleted file mode 100644 index cee6c982..00000000 --- a/vendor/github.com/smallstep/certificates/logging/clf.go +++ /dev/null @@ -1,77 +0,0 @@ -package logging - -import ( - "bytes" - "fmt" - "strconv" - "time" - - "github.com/sirupsen/logrus" -) - -var clfFields = [...]string{ - "request-id", "remote-address", "name", "user-id", "time", "duration", "method", "path", "protocol", "status", "size", -} - -// CommonLogFormat implements the logrus.Formatter interface it writes logrus -// entries using a CLF format prepended by the request-id. -type CommonLogFormat struct{} - -// Format implements the logrus.Formatter interface. It returns the given -// logrus entry as a CLF line with the following format: -//

    - // ... - //
    - if data[0] == '<' { - if i := p.html(data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.extensions&Titleblock != 0 { - if data[0] == '%' { - if i := p.titleBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(data):] - continue - } - - // fenced code block: - // - // ``` go - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.addBlock(HorizontalRule, nil) - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.extensions&Tables != 0 { - if i := p.table(data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(data, ListTypeOrdered):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(data, ListTypeDefinition):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headings, too - data = data[p.paragraph(data):] - } - - p.nesting-- -} - -func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { - p.closeUnmatchedBlocks() - container := p.addChild(typ, 0) - container.content = content - return container -} - -func (p *Markdown) isPrefixHeading(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.extensions&SpaceHeadings != 0 { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - if level == len(data) || data[level] != ' ' { - return false - } - } - return true -} - -func (p *Markdown) prefixHeading(data []byte) int { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[i:end])) - } - block := p.addBlock(Heading, data[i:end]) - block.HeadingID = id - block.Level = level - } - return skip -} - -func (p *Markdown) isUnderlinedHeading(data []byte) int { - // test of level 1 heading - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 1 - } - return 0 - } - - // test of level 2 heading - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 2 - } - return 0 - } - - return 0 -} - -func (p *Markdown) titleBlock(data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - consumed := len(data) - data = bytes.TrimPrefix(data, []byte("% ")) - data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) - block := p.addBlock(Heading, data) - block.Level = 1 - block.IsTitleblock = true - - return consumed -} - -func (p *Markdown) html(data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(data, doRender); size > 0 { - return size - } - - // check for an
    tag - if size := p.htmlHr(data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - - return i -} - -func finalizeHTMLBlock(block *Node) { - block.Literal = block.content - block.content = nil -} - -// HTML comment, lax form -func (p *Markdown) htmlComment(data []byte, doRender bool) int { - i := p.inlineHTMLComment(data) - // needs to end with a blank line - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - block := p.addBlock(HTMLBlock, data[:end]) - finalizeHTMLBlock(block) - } - return size - } - return 0 -} - -// HR, which is the only self-closing block tag considered -func (p *Markdown) htmlHr(data []byte, doRender bool) int { - if len(data) < 4 { - return 0 - } - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
    tag after all; at least not a valid one - return 0 - } - i := 3 - for i < len(data) && data[i] != '>' && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '>' { - i++ - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - return size - } - } - return 0 -} - -func (p *Markdown) htmlFindTag(data []byte) (string, bool) { - i := 0 - for i < len(data) && isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *Markdown) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - if tag == "hr" { - return 2 - } - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.extensions&LaxHTMLBlocks != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*Markdown) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - if i < len(data) && data[i] == '\n' { - i++ - } - return i -} - -func (*Markdown) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for i < len(data) && data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If syntax is not nil, it gets set to the syntax specified in the fence line. -func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the syntax, and discard it if the caller doesn't care. - if syntax != nil { - syn := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if i == len(data) { - return i, marker - } - return 0, "" - } - - syntaxStart := i - - if data[i] == '{' { - i++ - syntaxStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - syn++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for syn > 0 && isspace(data[syntaxStart]) { - syntaxStart++ - syn-- - } - - for syn > 0 && isspace(data[syntaxStart+syn-1]) { - syn-- - } - - i++ - } else { - for i < len(data) && !isspace(data[i]) { - syn++ - i++ - } - } - - *syntax = string(data[syntaxStart : syntaxStart+syn]) - } - - i = skipChar(data, i, ' ') - if i >= len(data) || data[i] != '\n' { - if i == len(data) { - return i, marker - } - return 0, "" - } - return i + 1, marker // Take newline into account. -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { - var syntax string - beg, marker := isFenceLine(data, &syntax, "") - if beg == 0 || beg >= len(data) { - return 0 - } - - var work bytes.Buffer - work.Write([]byte(syntax)) - work.WriteByte('\n') - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - fenceEnd, _ := isFenceLine(data[beg:], nil, marker) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = true - finalizeCodeBlock(block) - } - - return beg -} - -func unescapeChar(str []byte) []byte { - if str[0] == '\\' { - return []byte{str[1]} - } - return []byte(html.UnescapeString(string(str))) -} - -func unescapeString(str []byte) []byte { - if reBackslashOrAmp.Match(str) { - return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) - } - return str -} - -func finalizeCodeBlock(block *Node) { - if block.IsFenced { - newlinePos := bytes.IndexByte(block.content, '\n') - firstLine := block.content[:newlinePos] - rest := block.content[newlinePos+1:] - block.Info = unescapeString(bytes.Trim(firstLine, "\n")) - block.Literal = rest - } else { - block.Literal = block.content - } - block.content = nil -} - -func (p *Markdown) table(data []byte) int { - table := p.addBlock(Table, nil) - i, columns := p.tableHeader(data) - if i == 0 { - p.tip = table.Parent - table.Unlink() - return 0 - } - - p.addBlock(TableBody, nil) - - for i < len(data) { - pipes, rowStart := 0, i - for ; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - if i < len(data) && data[i] == '\n' { - i++ - } - p.tableRow(data[rowStart:i], columns, false) - } - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { - i := 0 - colCount := 1 - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - j := i - if j < len(data) && data[j] == '\n' { - j++ - } - header := data[:j] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]CellAlignFlags, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for i < len(data) && data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TableAlignmentLeft - dashes++ - } - for i < len(data) && data[i] == '-' { - i++ - dashes++ - } - if i < len(data) && data[i] == ':' { - i++ - columns[col] |= TableAlignmentRight - dashes++ - } - for i < len(data) && data[i] == ' ' { - i++ - } - if i == len(data) { - return - } - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for i < len(data) && data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && i < len(data) && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.addBlock(TableHead, nil) - p.tableRow(header, columns, true) - size = i - if size < len(data) && data[size] == '\n' { - size++ - } - return -} - -func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { - p.addBlock(TableRow, nil) - i, col := 0, 0 - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for i < len(data) && data[i] == ' ' { - i++ - } - - cellStart := i - - for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { - cellEnd-- - } - - cell := p.addBlock(TableCell, data[cellStart:cellEnd]) - cell.IsHeader = header - cell.Align = columns[col] - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - cell := p.addBlock(TableCell, nil) - cell.IsHeader = header - cell.Align = columns[col] - } - - // silently ignore rows with too many cells -} - -// returns blockquote prefix length -func (p *Markdown) quotePrefix(data []byte) int { - i := 0 - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - if i < len(data) && data[i] == '>' { - if i+1 < len(data) && data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *Markdown) quote(data []byte) int { - block := p.addBlock(BlockQuote, nil) - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - if end < len(data) && data[end] == '\n' { - end++ - } - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - p.block(raw.Bytes()) - p.finalize(block) - return end -} - -// returns prefix length for block code -func (p *Markdown) codePrefix(data []byte) int { - if len(data) >= 1 && data[0] == '\t' { - return 1 - } - if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *Markdown) code(data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for i < len(data) && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '\n' { - i++ - } - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffer - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = false - finalizeCodeBlock(block) - - return i -} - -// returns unordered list item prefix -func (p *Markdown) uliPrefix(data []byte) int { - i := 0 - // start with up to 3 spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - if i >= len(data)-1 { - return 0 - } - // need one of {'*', '+', '-'} followed by a space or a tab - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - (data[i+1] != ' ' && data[i+1] != '\t') { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *Markdown) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for i < len(data) && data[i] >= '0' && data[i] <= '9' { - i++ - } - if start == i || i >= len(data)-1 { - return 0 - } - - // we need >= 1 digits followed by a dot and a space or a tab - if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *Markdown) dliPrefix(data []byte) int { - if len(data) < 2 { - return 0 - } - i := 0 - // need a ':' followed by a space or a tab - if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - for i < len(data) && data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *Markdown) list(data []byte, flags ListType) int { - i := 0 - flags |= ListItemBeginningOfList - block := p.addBlock(List, nil) - block.ListFlags = flags - block.Tight = true - - for i < len(data) { - skip := p.listItem(data[i:], &flags) - if flags&ListItemContainsBlock != 0 { - block.ListData.Tight = false - } - i += skip - if skip == 0 || flags&ListItemEndOfList != 0 { - break - } - flags &= ^ListItemBeginningOfList - } - - above := block.Parent - finalizeList(block) - p.tip = above - return i -} - -// Returns true if block ends with a blank line, descending if needed -// into lists and sublists. -func endsWithBlankLine(block *Node) bool { - // TODO: figure this out. Always false now. - for block != nil { - //if block.lastLineBlank { - //return true - //} - t := block.Type - if t == List || t == Item { - block = block.LastChild - } else { - break - } - } - return false -} - -func finalizeList(block *Node) { - block.open = false - item := block.FirstChild - for item != nil { - // check for non-final list item ending with blank line: - if endsWithBlankLine(item) && item.Next != nil { - block.ListData.Tight = false - break - } - // recurse into children of list item, to see if there are spaces - // between any of them: - subItem := item.FirstChild - for subItem != nil { - if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { - block.ListData.Tight = false - break - } - subItem = subItem.Next - } - item = item.Next - } -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *Markdown) listItem(data []byte, flags *ListType) int { - // keep track of the indentation of the first line - itemIndent := 0 - if data[0] == '\t' { - itemIndent += 4 - } else { - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - } - - var bulletChar byte = '*' - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } else { - bulletChar = data[i-2] - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^ListTypeTerm - } - } - if i == 0 { - // if in definition list, set term flag and continue - if *flags&ListTypeDefinition != 0 { - *flags |= ListTypeTerm - } else { - return 0 - } - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - lastChunkSize := 0 - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - line = i - continue - } - - // calculate the indentation - indent := 0 - indentIndex := 0 - if data[line] == '\t' { - indentIndex++ - indent += 4 - } else { - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - indentIndex++ - } - } - - chunk := data[line+indentIndex : i] - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - if containsBlankLine { - *flags |= ListItemContainsBlock - } - - // to be a nested list, it must be indented more - // if not, it is the next item in the same list - if indent <= itemIndent { - break gatherlines - } - - // is this the first item in the nested list? - if sublist == 0 { - if p.dliPrefix(chunk) > 0 { - sublist = raw.Len() - lastChunkSize - } else { - sublist = raw.Len() - } - } - - // is this a nested prefix heading? - case p.isPrefixHeading(chunk): - // if the heading is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= ListItemEndOfList - break gatherlines - } - *flags |= ListItemContainsBlock - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&ListTypeDefinition != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for next < len(data) && data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= ListItemEndOfList - } - } else { - *flags |= ListItemEndOfList - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - raw.WriteByte('\n') - *flags |= ListItemContainsBlock - } - - // if this line was preceded by one or more blanks, - // re-introduce the blank into the buffer - if containsBlankLine { - containsBlankLine = false - raw.WriteByte('\n') - } - - // add the line into the working buffer without prefix - raw.Write(data[line+indentIndex : i]) - - // remember how much was written into raw, if this turns out to be a - // definition list we'll need this number to know where the sublist starts - lastChunkSize = i - (line + indentIndex) - - line = i - } - - rawBytes := raw.Bytes() - - block := p.addBlock(Item, nil) - block.ListFlags = *flags - block.Tight = false - block.BulletChar = bulletChar - block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark - - // render the contents of the list item - if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(rawBytes[:sublist]) - p.block(rawBytes[sublist:]) - } else { - p.block(rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - child := p.addChild(Paragraph, 0) - child.content = rawBytes[:sublist] - p.block(rawBytes[sublist:]) - } else { - child := p.addChild(Paragraph, 0) - child.content = rawBytes - } - } - return line -} - -// render a single paragraph that has already been parsed out -func (p *Markdown) renderParagraph(data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - end := len(data) - // trim trailing newline - if data[len(data)-1] == '\n' { - end-- - } - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - p.addBlock(Paragraph, data[beg:end]) -} - -func (p *Markdown) paragraph(data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - tabSize := TabSizeDefault - if p.extensions&TabSizeEight != 0 { - tabSize = TabSizeDouble - } - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a reference or a footnote? If so, end a paragraph - // preceding it and report that we have consumed up to the end of that - // reference: - if refEnd := isReference(p, current, tabSize); refEnd > 0 { - p.renderParagraph(data[:i]) - return i + refEnd - } - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.extensions&DefinitionLists != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(data[prev:], ListTypeDefinition) - } - } - - p.renderParagraph(data[:i]) - return i + n - } - - // an underline under some text marks a heading, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeading(current); level > 0 { - // render the paragraph - p.renderParagraph(data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - id := "" - if p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[prev:eol])) - } - - block := p.addBlock(Heading, data[prev:eol]) - block.Level = level - block.HeadingID = id - - // find the end of the underline - for i < len(data) && data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.extensions&LaxHTMLBlocks != 0 { - if data[i] == '<' && p.html(current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a prefixed heading or a horizontal rule after this, paragraph is over - if p.isPrefixHeading(current) || p.isHRule(current) { - p.renderParagraph(data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.extensions&FencedCode != 0 { - if p.fencedCodeBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ListTypeDefinition) - return ret - } - } - - // if there's a list after this, paragraph is over - if p.extensions&NoEmptyLineBeforeBlock != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - nl := bytes.IndexByte(data[i:], '\n') - if nl >= 0 { - i += nl + 1 - } else { - i += len(data[i:]) - } - } - - p.renderParagraph(data[:i]) - return i -} - -func skipChar(data []byte, start int, char byte) int { - i := start - for i < len(data) && data[i] == char { - i++ - } - return i -} - -func skipUntilChar(text []byte, start int, char byte) int { - i := start - for i < len(text) && text[i] != char { - i++ - } - return i -} diff --git a/vendor/github.com/smallstep/cli/pkg/blackfriday/doc.go b/vendor/github.com/smallstep/cli/pkg/blackfriday/doc.go deleted file mode 100644 index 5b3fa987..00000000 --- a/vendor/github.com/smallstep/cli/pkg/blackfriday/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package blackfriday is a markdown processor. -// -// It translates plain text with simple formatting rules into an AST, which can -// then be further processed to HTML (provided by Blackfriday itself) or other -// formats (provided by the community). -// -// The simplest way to invoke Blackfriday is to call the Run function. It will -// take a text input and produce a text output in HTML (or other format). -// -// A slightly more sophisticated way to use Blackfriday is to create a Markdown -// processor and to call Parse, which returns a syntax tree for the input -// document. You can leverage Blackfriday's parsing for content extraction from -// markdown documents. You can assign a custom renderer and set various options -// to the Markdown processor. -// -// If you're interested in calling Blackfriday from command line, see -// https://github.com/russross/blackfriday-tool. -package blackfriday diff --git a/vendor/github.com/smallstep/cli/pkg/blackfriday/esc.go b/vendor/github.com/smallstep/cli/pkg/blackfriday/esc.go deleted file mode 100644 index 6385f27c..00000000 --- a/vendor/github.com/smallstep/cli/pkg/blackfriday/esc.go +++ /dev/null @@ -1,34 +0,0 @@ -package blackfriday - -import ( - "html" - "io" -) - -var htmlEscaper = [256][]byte{ - '&': []byte("&"), - '<': []byte("<"), - '>': []byte(">"), - '"': []byte("""), -} - -func escapeHTML(w io.Writer, s []byte) { - var start, end int - for end < len(s) { - escSeq := htmlEscaper[s[end]] - if escSeq != nil { - w.Write(s[start:end]) - w.Write(escSeq) - start = end + 1 - } - end++ - } - if start < len(s) && end <= len(s) { - w.Write(s[start:end]) - } -} - -func escLink(w io.Writer, text []byte) { - unesc := html.UnescapeString(string(text)) - escapeHTML(w, []byte(unesc)) -} diff --git a/vendor/github.com/smallstep/cli/pkg/blackfriday/html.go b/vendor/github.com/smallstep/cli/pkg/blackfriday/html.go deleted file mode 100644 index 25fb185e..00000000 --- a/vendor/github.com/smallstep/cli/pkg/blackfriday/html.go +++ /dev/null @@ -1,940 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// HTMLFlags control optional behavior of HTML renderer. -type HTMLFlags int - -// HTML renderer configuration options. -const ( - HTMLFlagsNone HTMLFlags = 0 - SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks - SkipImages // Skip embedded images - SkipLinks // Skip all links - Safelink // Only link to trusted protocols - NofollowLinks // Only link with rel="nofollow" - NoreferrerLinks // Only link with rel="noreferrer" - HrefTargetBlank // Add a blank target - CompletePage // Generate a complete HTML page - UseXHTML // Generate XHTML output instead of HTML - FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source - Smartypants // Enable smart punctuation substitutions - SmartypantsFractions // Enable smart fractions (with Smartypants) - SmartypantsDashes // Enable smart dashes (with Smartypants) - SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) - SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering - SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) - TOC // Generate a table of contents -) - -var ( - htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) -) - -const ( - htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + - processingInstruction + "|" + declaration + "|" + cdata + ")" - closeTag = "]" - openTag = "<" + tagName + attribute + "*" + "\\s*/?>" - attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" - attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" - attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" - attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" - cdata = "" - declaration = "]*>" - doubleQuotedValue = "\"[^\"]*\"" - htmlComment = "|" - processingInstruction = "[<][?].*?[?][>]" - singleQuotedValue = "'[^']*'" - tagName = "[A-Za-z][A-Za-z0-9-]*" - unquotedValue = "[^\"'=<>`\\x00-\\x20]+" -) - -// HTMLRendererParameters is a collection of supplementary parameters tweaking -// the behavior of various parts of HTML renderer. -type HTMLRendererParameters struct { - // Prepend this text to each relative URL. - AbsolutePrefix string - // Add this text to each footnote anchor, to ensure uniqueness. - FootnoteAnchorPrefix string - // Show this text inside the tag for a footnote return link, if the - // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string - // [return] is used. - FootnoteReturnLinkContents string - // If set, add this text to the front of each Heading ID, to ensure - // uniqueness. - HeadingIDPrefix string - // If set, add this text to the back of each Heading ID, to ensure uniqueness. - HeadingIDSuffix string - - Title string // Document title (used if CompletePage is set) - CSS string // Optional CSS file URL (used if CompletePage is set) - Icon string // Optional icon file URL (used if CompletePage is set) - - Flags HTMLFlags // Flags allow customizing this renderer's behavior -} - -// HTMLRenderer is a type that implements the Renderer interface for HTML output. -// -// Do not create this directly, instead use the NewHTMLRenderer function. -type HTMLRenderer struct { - HTMLRendererParameters - - closeTag string // how to end singleton tags: either " />" or ">" - - // Track heading IDs to prevent ID collision in a single generation. - headingIDs map[string]int - - lastOutputLen int - disableTags int - - sr *SPRenderer -} - -const ( - xhtmlClose = " />" - htmlClose = ">" -) - -// NewHTMLRenderer creates and configures an HTMLRenderer object, which -// satisfies the Renderer interface. -func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { - // configure the rendering engine - closeTag := htmlClose - if params.Flags&UseXHTML != 0 { - closeTag = xhtmlClose - } - - if params.FootnoteReturnLinkContents == "" { - params.FootnoteReturnLinkContents = `[return]` - } - - return &HTMLRenderer{ - HTMLRendererParameters: params, - - closeTag: closeTag, - headingIDs: make(map[string]int), - - sr: NewSmartypantsRenderer(params.Flags), - } -} - -func isHTMLTag(tag []byte, tagname string) bool { - found, _ := findHTMLTagPos(tag, tagname) - return found -} - -// Look for a character, but ignore it when it's in any kind of quotes, it -// might be JavaScript -func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { - inSingleQuote := false - inDoubleQuote := false - inGraveQuote := false - i := start - for i < len(html) { - switch { - case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: - return i - case html[i] == '\'': - inSingleQuote = !inSingleQuote - case html[i] == '"': - inDoubleQuote = !inDoubleQuote - case html[i] == '`': - inGraveQuote = !inGraveQuote - } - i++ - } - return start -} - -func findHTMLTagPos(tag []byte, tagname string) (bool, int) { - i := 0 - if i < len(tag) && tag[0] != '<' { - return false, -1 - } - i++ - i = skipSpace(tag, i) - - if i < len(tag) && tag[i] == '/' { - i++ - } - - i = skipSpace(tag, i) - j := 0 - for ; i < len(tag); i, j = i+1, j+1 { - if j >= len(tagname) { - break - } - - if strings.ToLower(string(tag[i]))[0] != tagname[j] { - return false, -1 - } - } - - if i == len(tag) { - return false, -1 - } - - rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') - if rightAngle >= i { - return true, rightAngle - } - - return false, -1 -} - -func skipSpace(tag []byte, i int) int { - for i < len(tag) && isspace(tag[i]) { - i++ - } - return i -} - -func isRelativeLink(link []byte) (yes bool) { - // a tag begin with '#' - if link[0] == '#' { - return true - } - - // link begin with '/' but not '//', the second maybe a protocol relative link - if len(link) >= 2 && link[0] == '/' && link[1] != '/' { - return true - } - - // only the root '/' - if len(link) == 1 && link[0] == '/' { - return true - } - - // current directory : begin with "./" - if bytes.HasPrefix(link, []byte("./")) { - return true - } - - // parent directory : begin with "../" - if bytes.HasPrefix(link, []byte("../")) { - return true - } - - return false -} - -func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { - for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { - tmp := fmt.Sprintf("%s-%d", id, count+1) - - if _, tmpFound := r.headingIDs[tmp]; !tmpFound { - r.headingIDs[id] = count + 1 - id = tmp - } else { - id = id + "-1" - } - } - - if _, found := r.headingIDs[id]; !found { - r.headingIDs[id] = 0 - } - - return id -} - -func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { - if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { - newDest := r.AbsolutePrefix - if link[0] != '/' { - newDest += "/" - } - newDest += string(link) - return []byte(newDest) - } - return link -} - -func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { - if isRelativeLink(link) { - return attrs - } - val := []string{} - if flags&NofollowLinks != 0 { - val = append(val, "nofollow") - } - if flags&NoreferrerLinks != 0 { - val = append(val, "noreferrer") - } - if flags&HrefTargetBlank != 0 { - attrs = append(attrs, "target=\"_blank\"") - } - if len(val) == 0 { - return attrs - } - attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) - return append(attrs, attr) -} - -func isMailto(link []byte) bool { - return bytes.HasPrefix(link, []byte("mailto:")) -} - -func needSkipLink(flags HTMLFlags, dest []byte) bool { - if flags&SkipLinks != 0 { - return true - } - return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) -} - -func isSmartypantable(node *Node) bool { - pt := node.Parent.Type - return pt != Link && pt != CodeBlock && pt != Code -} - -func appendLanguageAttr(attrs []string, info []byte) []string { - if len(info) == 0 { - return attrs - } - endOfLang := bytes.IndexAny(info, "\t ") - if endOfLang < 0 { - endOfLang = len(info) - } - return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) -} - -func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { - w.Write(name) - if len(attrs) > 0 { - w.Write(spaceBytes) - w.Write([]byte(strings.Join(attrs, " "))) - } - w.Write(gtBytes) - r.lastOutputLen = 1 -} - -func footnoteRef(prefix string, node *Node) []byte { - urlFrag := prefix + string(slugify(node.Destination)) - anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) - return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) -} - -func footnoteItem(prefix string, slug []byte) []byte { - return []byte(fmt.Sprintf(`
  • `, prefix, slug)) -} - -func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { - const format = ` %s` - return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) -} - -func itemOpenCR(node *Node) bool { - if node.Prev == nil { - return false - } - ld := node.Parent.ListData - return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 -} - -func skipParagraphTags(node *Node) bool { - grandparent := node.Parent.Parent - if grandparent == nil || grandparent.Type != List { - return false - } - tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 - return grandparent.Type == List && tightOrTerm -} - -func cellAlignment(align CellAlignFlags) string { - switch align { - case TableAlignmentLeft: - return "left" - case TableAlignmentRight: - return "right" - case TableAlignmentCenter: - return "center" - default: - return "" - } -} - -func (r *HTMLRenderer) out(w io.Writer, text []byte) { - if r.disableTags > 0 { - w.Write(htmlTagRe.ReplaceAll(text, []byte{})) - } else { - w.Write(text) - } - r.lastOutputLen = len(text) -} - -func (r *HTMLRenderer) cr(w io.Writer) { - if r.lastOutputLen > 0 { - r.out(w, nlBytes) - } -} - -var ( - nlBytes = []byte{'\n'} - gtBytes = []byte{'>'} - spaceBytes = []byte{' '} -) - -var ( - brTag = []byte("
    ") - brXHTMLTag = []byte("
    ") - emTag = []byte("") - emCloseTag = []byte("") - strongTag = []byte("") - strongCloseTag = []byte("") - delTag = []byte("") - delCloseTag = []byte("") - ttTag = []byte("") - ttCloseTag = []byte("") - aTag = []byte("") - preTag = []byte("
    ")
    -	preCloseTag        = []byte("
    ") - codeTag = []byte("") - codeCloseTag = []byte("") - pTag = []byte("

    ") - pCloseTag = []byte("

    ") - blockquoteTag = []byte("
    ") - blockquoteCloseTag = []byte("
    ") - hrTag = []byte("
    ") - hrXHTMLTag = []byte("
    ") - ulTag = []byte("
      ") - ulCloseTag = []byte("
    ") - olTag = []byte("
      ") - olCloseTag = []byte("
    ") - dlTag = []byte("
    ") - dlCloseTag = []byte("
    ") - liTag = []byte("
  • ") - liCloseTag = []byte("
  • ") - ddTag = []byte("
    ") - ddCloseTag = []byte("
    ") - dtTag = []byte("
    ") - dtCloseTag = []byte("
    ") - tableTag = []byte("") - tableCloseTag = []byte("
    ") - tdTag = []byte("") - thTag = []byte("") - theadTag = []byte("") - theadCloseTag = []byte("") - tbodyTag = []byte("") - tbodyCloseTag = []byte("") - trTag = []byte("") - trCloseTag = []byte("") - h1Tag = []byte("") - h2Tag = []byte("") - h3Tag = []byte("") - h4Tag = []byte("") - h5Tag = []byte("") - h6Tag = []byte("") - - footnotesDivBytes = []byte("\n
    \n\n") - footnotesCloseDivBytes = []byte("\n
    \n") -) - -func headingTagsFromLevel(level int) ([]byte, []byte) { - switch level { - case 1: - return h1Tag, h1CloseTag - case 2: - return h2Tag, h2CloseTag - case 3: - return h3Tag, h3CloseTag - case 4: - return h4Tag, h4CloseTag - case 5: - return h5Tag, h5CloseTag - default: - return h6Tag, h6CloseTag - } -} - -func (r *HTMLRenderer) outHRTag(w io.Writer) { - if r.Flags&UseXHTML == 0 { - r.out(w, hrTag) - } else { - r.out(w, hrXHTMLTag) - } -} - -// RenderNode is a default renderer of a single node of a syntax tree. For -// block nodes it will be called twice: first time with entering=true, second -// time with entering=false, so that it could know when it's working on an open -// tag and when on close. It writes the result to w. -// -// The return value is a way to tell the calling walker to adjust its walk -// pattern: e.g. it can terminate the traversal by returning Terminate. Or it -// can ask the walker to skip a subtree of this node by returning SkipChildren. -// The typical behavior is to return GoToNext, which asks for the usual -// traversal to the next node. -func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { - attrs := []string{} - switch node.Type { - case Text: - if r.Flags&Smartypants != 0 { - var tmp bytes.Buffer - escapeHTML(&tmp, node.Literal) - r.sr.Process(w, tmp.Bytes()) - } else { - if node.Parent.Type == Link { - escLink(w, node.Literal) - } else { - escapeHTML(w, node.Literal) - } - } - case Softbreak: - r.cr(w) - // TODO: make it configurable via out(renderer.softbreak) - case Hardbreak: - if r.Flags&UseXHTML == 0 { - r.out(w, brTag) - } else { - r.out(w, brXHTMLTag) - } - r.cr(w) - case Emph: - if entering { - r.out(w, emTag) - } else { - r.out(w, emCloseTag) - } - case Strong: - if entering { - r.out(w, strongTag) - } else { - r.out(w, strongCloseTag) - } - case Del: - if entering { - r.out(w, delTag) - } else { - r.out(w, delCloseTag) - } - case HTMLSpan: - if r.Flags&SkipHTML != 0 { - break - } - r.out(w, node.Literal) - case Link: - // mark it but don't link it if it is not a safe link: no smartypants - dest := node.LinkData.Destination - if needSkipLink(r.Flags, dest) { - if entering { - r.out(w, ttTag) - } else { - r.out(w, ttCloseTag) - } - } else { - if entering { - dest = r.addAbsPrefix(dest) - var hrefBuf bytes.Buffer - hrefBuf.WriteString("href=\"") - escLink(&hrefBuf, dest) - hrefBuf.WriteByte('"') - attrs = append(attrs, hrefBuf.String()) - if node.NoteID != 0 { - r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) - break - } - attrs = appendLinkAttrs(attrs, r.Flags, dest) - if len(node.LinkData.Title) > 0 { - var titleBuff bytes.Buffer - titleBuff.WriteString("title=\"") - escapeHTML(&titleBuff, node.LinkData.Title) - titleBuff.WriteByte('"') - attrs = append(attrs, titleBuff.String()) - } - r.tag(w, aTag, attrs) - } else { - if node.NoteID != 0 { - break - } - r.out(w, aCloseTag) - } - } - case Image: - if r.Flags&SkipImages != 0 { - return SkipChildren - } - if entering { - dest := node.LinkData.Destination - dest = r.addAbsPrefix(dest) - if r.disableTags == 0 { - //if options.safe && potentiallyUnsafe(dest) { - //out(w, ``)
-				//} else {
-				r.out(w, []byte(`<img src=`)) - } - } - case Code: - r.out(w, codeTag) - escapeHTML(w, node.Literal) - r.out(w, codeCloseTag) - case Document: - break - case Paragraph: - if skipParagraphTags(node) { - break - } - if entering { - // TODO: untangle this clusterfuck about when the newlines need - // to be added and when not. - if node.Prev != nil { - switch node.Prev.Type { - case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: - r.cr(w) - } - } - if node.Parent.Type == BlockQuote && node.Prev == nil { - r.cr(w) - } - r.out(w, pTag) - } else { - r.out(w, pCloseTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case BlockQuote: - if entering { - r.cr(w) - r.out(w, blockquoteTag) - } else { - r.out(w, blockquoteCloseTag) - r.cr(w) - } - case HTMLBlock: - if r.Flags&SkipHTML != 0 { - break - } - r.cr(w) - r.out(w, node.Literal) - r.cr(w) - case Heading: - openTag, closeTag := headingTagsFromLevel(node.Level) - if entering { - if node.IsTitleblock { - attrs = append(attrs, `class="title"`) - } - if node.HeadingID != "" { - id := r.ensureUniqueHeadingID(node.HeadingID) - if r.HeadingIDPrefix != "" { - id = r.HeadingIDPrefix + id - } - if r.HeadingIDSuffix != "" { - id = id + r.HeadingIDSuffix - } - attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) - } - r.cr(w) - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case HorizontalRule: - r.cr(w) - r.outHRTag(w) - r.cr(w) - case List: - openTag := ulTag - closeTag := ulCloseTag - if node.ListFlags&ListTypeOrdered != 0 { - openTag = olTag - closeTag = olCloseTag - } - if node.ListFlags&ListTypeDefinition != 0 { - openTag = dlTag - closeTag = dlCloseTag - } - if entering { - if node.IsFootnotesList { - r.out(w, footnotesDivBytes) - r.outHRTag(w) - r.cr(w) - } - r.cr(w) - if node.Parent.Type == Item && node.Parent.Parent.Tight { - r.cr(w) - } - r.tag(w, openTag[:len(openTag)-1], attrs) - r.cr(w) - } else { - r.out(w, closeTag) - //cr(w) - //if node.parent.Type != Item { - // cr(w) - //} - if node.Parent.Type == Item && node.Next != nil { - r.cr(w) - } - if node.Parent.Type == Document || node.Parent.Type == BlockQuote { - r.cr(w) - } - if node.IsFootnotesList { - r.out(w, footnotesCloseDivBytes) - } - } - case Item: - openTag := liTag - closeTag := liCloseTag - if node.ListFlags&ListTypeDefinition != 0 { - openTag = ddTag - closeTag = ddCloseTag - } - if node.ListFlags&ListTypeTerm != 0 { - openTag = dtTag - closeTag = dtCloseTag - } - if entering { - if itemOpenCR(node) { - r.cr(w) - } - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) - break - } - r.out(w, openTag) - } else { - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - if r.Flags&FootnoteReturnLinks != 0 { - r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) - } - } - r.out(w, closeTag) - r.cr(w) - } - case CodeBlock: - attrs = appendLanguageAttr(attrs, node.Info) - r.cr(w) - r.out(w, preTag) - r.tag(w, codeTag[:len(codeTag)-1], attrs) - escapeHTML(w, node.Literal) - r.out(w, codeCloseTag) - r.out(w, preCloseTag) - if node.Parent.Type != Item { - r.cr(w) - } - case Table: - if entering { - r.cr(w) - r.out(w, tableTag) - } else { - r.out(w, tableCloseTag) - r.cr(w) - } - case TableCell: - openTag := tdTag - closeTag := tdCloseTag - if node.IsHeader { - openTag = thTag - closeTag = thCloseTag - } - if entering { - align := cellAlignment(node.Align) - if align != "" { - attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) - } - if node.Prev == nil { - r.cr(w) - } - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - r.cr(w) - } - case TableHead: - if entering { - r.cr(w) - r.out(w, theadTag) - } else { - r.out(w, theadCloseTag) - r.cr(w) - } - case TableBody: - if entering { - r.cr(w) - r.out(w, tbodyTag) - // XXX: this is to adhere to a rather silly test. Should fix test. - if node.FirstChild == nil { - r.cr(w) - } - } else { - r.out(w, tbodyCloseTag) - r.cr(w) - } - case TableRow: - if entering { - r.cr(w) - r.out(w, trTag) - } else { - r.out(w, trCloseTag) - r.cr(w) - } - default: - panic("Unknown node type " + node.Type.String()) - } - return GoToNext -} - -// RenderHeader writes HTML document preamble and TOC if requested. -func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { - r.writeDocumentHeader(w) - if r.Flags&TOC != 0 { - r.writeTOC(w, ast) - } -} - -// RenderFooter writes HTML document footer. -func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { - if r.Flags&CompletePage == 0 { - return - } - io.WriteString(w, "\n\n\n") -} - -func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { - if r.Flags&CompletePage == 0 { - return - } - ending := "" - if r.Flags&UseXHTML != 0 { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - ending = " /" - } else { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - } - io.WriteString(w, "\n") - io.WriteString(w, " ") - if r.Flags&Smartypants != 0 { - r.sr.Process(w, []byte(r.Title)) - } else { - escapeHTML(w, []byte(r.Title)) - } - io.WriteString(w, "\n") - io.WriteString(w, " \n") - io.WriteString(w, " \n") - if r.CSS != "" { - io.WriteString(w, " \n") - } - if r.Icon != "" { - io.WriteString(w, " \n") - } - io.WriteString(w, "\n") - io.WriteString(w, "\n\n") -} - -func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { - buf := bytes.Buffer{} - - inHeading := false - tocLevel := 0 - headingCount := 0 - - ast.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Heading && !node.HeadingData.IsTitleblock { - inHeading = entering - if entering { - node.HeadingID = fmt.Sprintf("toc_%d", headingCount) - if node.Level == tocLevel { - buf.WriteString("\n\n
  • ") - } else if node.Level < tocLevel { - for node.Level < tocLevel { - tocLevel-- - buf.WriteString("
  • \n") - } - buf.WriteString("\n\n
  • ") - } else { - for node.Level > tocLevel { - tocLevel++ - buf.WriteString("\n") - } - - if buf.Len() > 0 { - io.WriteString(w, "\n") - } - r.lastOutputLen = buf.Len() -} diff --git a/vendor/github.com/smallstep/cli/pkg/blackfriday/inline.go b/vendor/github.com/smallstep/cli/pkg/blackfriday/inline.go deleted file mode 100644 index 3d633106..00000000 --- a/vendor/github.com/smallstep/cli/pkg/blackfriday/inline.go +++ /dev/null @@ -1,1214 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse inline elements. -// - -package blackfriday - -import ( - "bytes" - "regexp" - "strconv" -) - -var ( - urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` - anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) - - // TODO: improve this regexp to catch all possible entities: - htmlEntityRe = regexp.MustCompile(`&[a-z]{2,5};`) -) - -// Functions to parse text within a block -// Each function returns the number of chars taken care of -// data is the complete block being rendered -// offset is the number of valid chars before the current cursor - -func (p *Markdown) inline(currBlock *Node, data []byte) { - // handlers might call us recursively: enforce a maximum depth - if p.nesting >= p.maxNesting || len(data) == 0 { - return - } - p.nesting++ - beg, end := 0, 0 - for end < len(data) { - handler := p.inlineCallback[data[end]] - if handler != nil { - if consumed, node := handler(p, data, end); consumed == 0 { - // No action from the callback. - end++ - } else { - // Copy inactive chars into the output. - currBlock.AppendChild(text(data[beg:end])) - if node != nil { - currBlock.AppendChild(node) - } - // Skip past whatever the callback used. - beg = end + consumed - end = beg - } - } else { - end++ - } - } - if beg < len(data) { - if data[end-1] == '\n' { - end-- - } - currBlock.AppendChild(text(data[beg:end])) - } - p.nesting-- -} - -// single and double emphasis parsing -func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - c := data[0] - - if len(data) > 2 && data[1] != c { - // whitespace cannot follow an opening emphasis; - // strikethrough only takes two characters '~~' - if c == '~' || isspace(data[1]) { - return 0, nil - } - ret, node := helperEmphasis(p, data[1:], c) - if ret == 0 { - return 0, nil - } - - return ret + 1, node - } - - if len(data) > 3 && data[1] == c && data[2] != c { - if isspace(data[2]) { - return 0, nil - } - ret, node := helperDoubleEmphasis(p, data[2:], c) - if ret == 0 { - return 0, nil - } - - return ret + 2, node - } - - if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { - if c == '~' || isspace(data[3]) { - return 0, nil - } - ret, node := helperTripleEmphasis(p, data, 3, c) - if ret == 0 { - return 0, nil - } - - return ret + 3, node - } - - return 0, nil -} - -func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - nb := 0 - - // count the number of backticks in the delimiter - for nb < len(data) && data[nb] == '`' { - nb++ - } - - // find the next delimiter - i, end := 0, 0 - for end = nb; end < len(data) && i < nb; end++ { - if data[end] == '`' { - i++ - } else { - i = 0 - } - } - - // no matching delimiter? - if i < nb && end >= len(data) { - return 0, nil - } - - // trim outside whitespace - fBegin := nb - for fBegin < end && data[fBegin] == ' ' { - fBegin++ - } - - fEnd := end - nb - for fEnd > fBegin && data[fEnd-1] == ' ' { - fEnd-- - } - - // render the code span - if fBegin != fEnd { - code := NewNode(Code) - code.Literal = data[fBegin:fEnd] - return end, code - } - - return end, nil -} - -// newline preceded by two spaces becomes
    -func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - origOffset := offset - for offset < len(data) && data[offset] == ' ' { - offset++ - } - - if offset < len(data) && data[offset] == '\n' { - if offset-origOffset >= 2 { - return offset - origOffset + 1, NewNode(Hardbreak) - } - return offset - origOffset, nil - } - return 0, nil -} - -// newline without two spaces works when HardLineBreak is enabled -func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - if p.extensions&HardLineBreak != 0 { - return 1, NewNode(Hardbreak) - } - return 0, nil -} - -type linkType int - -const ( - linkNormal linkType = iota - linkImg - linkDeferredFootnote - linkInlineFootnote -) - -func isReferenceStyleLink(data []byte, pos int, t linkType) bool { - if t == linkDeferredFootnote { - return false - } - return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' -} - -func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -// '[': parse a link or an image or a footnote -func link(p *Markdown, data []byte, offset int) (int, *Node) { - // no links allowed inside regular links, footnote, and deferred footnotes - if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { - return 0, nil - } - - var t linkType - switch { - // special case: ![^text] == deferred footnote (that follows something with - // an exclamation point) - case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': - t = linkDeferredFootnote - // ![alt] == image - case offset >= 0 && data[offset] == '!': - t = linkImg - offset++ - // ^[text] == inline footnote - // [^refId] == deferred footnote - case p.extensions&Footnotes != 0: - if offset >= 0 && data[offset] == '^' { - t = linkInlineFootnote - offset++ - } else if len(data)-1 > offset && data[offset+1] == '^' { - t = linkDeferredFootnote - } - // [text] == regular link - default: - t = linkNormal - } - - data = data[offset:] - - var ( - i = 1 - noteID int - title, link, altContent []byte - textHasNl = false - ) - - if t == linkDeferredFootnote { - i++ - } - - // look for the matching closing bracket - for level := 1; level > 0 && i < len(data); i++ { - switch { - case data[i] == '\n': - textHasNl = true - - case data[i-1] == '\\': - continue - - case data[i] == '[': - level++ - - case data[i] == ']': - level-- - if level <= 0 { - i-- // compensate for extra i++ in for loop - } - } - } - - if i >= len(data) { - return 0, nil - } - - txtE := i - i++ - var footnoteNode *Node - - // skip any amount of whitespace or newline - // (this is much more lax than original markdown syntax) - for i < len(data) && isspace(data[i]) { - i++ - } - - // inline style link - switch { - case i < len(data) && data[i] == '(': - // skip initial whitespace - i++ - - for i < len(data) && isspace(data[i]) { - i++ - } - - linkB := i - - // look for link end: ' " ) - findlinkend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')' || data[i] == '\'' || data[i] == '"': - break findlinkend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - linkE := i - - // look for title end if present - titleB, titleE := 0, 0 - if data[i] == '\'' || data[i] == '"' { - i++ - titleB = i - - findtitleend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')': - break findtitleend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - - // skip whitespace after title - titleE = i - 1 - for titleE > titleB && isspace(data[titleE]) { - titleE-- - } - - // check for closing quote presence - if data[titleE] != '\'' && data[titleE] != '"' { - titleB, titleE = 0, 0 - linkE = i - } - } - - // remove whitespace at the end of the link - for linkE > linkB && isspace(data[linkE-1]) { - linkE-- - } - - // remove optional angle brackets around the link - if data[linkB] == '<' { - linkB++ - } - if data[linkE-1] == '>' { - linkE-- - } - - // build escaped link and title - if linkE > linkB { - link = data[linkB:linkE] - } - - if titleE > titleB { - title = data[titleB:titleE] - } - - i++ - - // reference style link - case isReferenceStyleLink(data, i, t): - var id []byte - altContentConsidered := false - - // look for the id - i++ - linkB := i - for i < len(data) && data[i] != ']' { - i++ - } - if i >= len(data) { - return 0, nil - } - linkE := i - - // find the reference - if linkB == linkE { - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - id = data[1:txtE] - altContentConsidered = true - } - } else { - id = data[linkB:linkE] - } - - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - // keep link and title from reference - link = lr.link - title = lr.title - if altContentConsidered { - altContent = lr.text - } - i++ - - // shortcut reference style link or reference or inline footnote - default: - var id []byte - - // craft the id - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - if t == linkDeferredFootnote { - id = data[2:txtE] // get rid of the ^ - } else { - id = data[1:txtE] - } - } - - footnoteNode = NewNode(Item) - if t == linkInlineFootnote { - // create a new reference - noteID = len(p.notes) + 1 - - var fragment []byte - if len(id) > 0 { - if len(id) < 16 { - fragment = make([]byte, len(id)) - } else { - fragment = make([]byte, 16) - } - copy(fragment, slugify(id)) - } else { - fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) - } - - ref := &reference{ - noteID: noteID, - hasBlock: false, - link: fragment, - title: id, - footnote: footnoteNode, - } - - p.notes = append(p.notes, ref) - - link = ref.link - title = ref.title - } else { - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - if t == linkDeferredFootnote { - lr.noteID = len(p.notes) + 1 - lr.footnote = footnoteNode - p.notes = append(p.notes, lr) - } - - // keep link and title from reference - link = lr.link - // if inline footnote, title == footnote contents - title = lr.title - noteID = lr.noteID - } - - // rewind the whitespace - i = txtE + 1 - } - - var uLink []byte - if t == linkNormal || t == linkImg { - if len(link) > 0 { - var uLinkBuf bytes.Buffer - unescapeText(&uLinkBuf, link) - uLink = uLinkBuf.Bytes() - } - - // links need something to click on and somewhere to go - if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { - return 0, nil - } - } - - // call the relevant rendering function - var linkNode *Node - switch t { - case linkNormal: - linkNode = NewNode(Link) - linkNode.Destination = normalizeURI(uLink) - linkNode.Title = title - if len(altContent) > 0 { - linkNode.AppendChild(text(altContent)) - } else { - // links cannot contain other links, so turn off link parsing - // temporarily and recurse - insideLink := p.insideLink - p.insideLink = true - p.inline(linkNode, data[1:txtE]) - p.insideLink = insideLink - } - - case linkImg: - linkNode = NewNode(Image) - linkNode.Destination = uLink - linkNode.Title = title - linkNode.AppendChild(text(data[1:txtE])) - i++ - - case linkInlineFootnote, linkDeferredFootnote: - linkNode = NewNode(Link) - linkNode.Destination = link - linkNode.Title = title - linkNode.NoteID = noteID - linkNode.Footnote = footnoteNode - if t == linkInlineFootnote { - i++ - } - - default: - return 0, nil - } - - return i, linkNode -} - -func (p *Markdown) inlineHTMLComment(data []byte) int { - if len(data) < 5 { - return 0 - } - if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { - return 0 - } - i := 5 - // scan for an end-of-comment marker, across lines if necessary - for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { - i++ - } - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return i + 1 -} - -func stripMailto(link []byte) []byte { - if bytes.HasPrefix(link, []byte("mailto://")) { - return link[9:] - } else if bytes.HasPrefix(link, []byte("mailto:")) { - return link[7:] - } else { - return link - } -} - -// autolinkType specifies a kind of autolink that gets detected. -type autolinkType int - -// These are the possible flag values for the autolink renderer. -const ( - notAutolink autolinkType = iota - normalAutolink - emailAutolink -) - -// '<' when tags or autolinks are allowed -func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - altype, end := tagLength(data) - if size := p.inlineHTMLComment(data); size > 0 { - end = size - } - if end > 2 { - if altype != notAutolink { - var uLink bytes.Buffer - unescapeText(&uLink, data[1:end+1-2]) - if uLink.Len() > 0 { - link := uLink.Bytes() - node := NewNode(Link) - node.Destination = link - if altype == emailAutolink { - node.Destination = append([]byte("mailto:"), link...) - } - node.AppendChild(text(stripMailto(link))) - return end, node - } - } else { - htmlTag := NewNode(HTMLSpan) - htmlTag.Literal = data[:end] - return end, htmlTag - } - } - - return end, nil -} - -// '\\' backslash escape -var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") - -func escape(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - if len(data) > 1 { - if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { - return 2, NewNode(Hardbreak) - } - if bytes.IndexByte(escapeChars, data[1]) < 0 { - return 0, nil - } - - return 2, text(data[1:2]) - } - - return 2, nil -} - -func unescapeText(ob *bytes.Buffer, src []byte) { - i := 0 - for i < len(src) { - org := i - for i < len(src) && src[i] != '\\' { - i++ - } - - if i > org { - ob.Write(src[org:i]) - } - - if i+1 >= len(src) { - break - } - - ob.WriteByte(src[i+1]) - i += 2 - } -} - -// '&' escaped when it doesn't belong to an entity -// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - end := 1 - - if end < len(data) && data[end] == '#' { - end++ - } - - for end < len(data) && isalnum(data[end]) { - end++ - } - - if end < len(data) && data[end] == ';' { - end++ // real entity - } else { - return 0, nil // lone '&' - } - - ent := data[:end] - // undo & escaping or it will be converted to &amp; by another - // escaper in the renderer - if bytes.Equal(ent, []byte("&")) { - ent = []byte{'&'} - } - - return end, text(ent) -} - -func linkEndsWithEntity(data []byte, linkEnd int) bool { - entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) - return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd -} - -// hasPrefixCaseInsensitive is a custom implementation of -// strings.HasPrefix(strings.ToLower(s), prefix) -// we rolled our own because ToLower pulls in a huge machinery of lowercasing -// anything from Unicode and that's very slow. Since this func will only be -// used on ASCII protocol prefixes, we can take shortcuts. -func hasPrefixCaseInsensitive(s, prefix []byte) bool { - if len(s) < len(prefix) { - return false - } - delta := byte('a' - 'A') - for i, b := range prefix { - if b != s[i] && b != s[i]+delta { - return false - } - } - return true -} - -var protocolPrefixes = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("file://"), - []byte("mailto:"), -} - -const shortestPrefix = 6 // len("ftp://"), the shortest of the above - -func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // quick check to rule out most false hits - if p.insideLink || len(data) < offset+shortestPrefix { - return 0, nil - } - for _, prefix := range protocolPrefixes { - endOfHead := offset + 8 // 8 is the len() of the longest prefix - if endOfHead > len(data) { - endOfHead = len(data) - } - if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { - return autoLink(p, data, offset) - } - } - return 0, nil -} - -func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // Now a more expensive check to see if we're not inside an anchor element - anchorStart := offset - offsetFromAnchor := 0 - for anchorStart > 0 && data[anchorStart] != '<' { - anchorStart-- - offsetFromAnchor++ - } - - anchorStr := anchorRe.Find(data[anchorStart:]) - if anchorStr != nil { - anchorClose := NewNode(HTMLSpan) - anchorClose.Literal = anchorStr[offsetFromAnchor:] - return len(anchorStr) - offsetFromAnchor, anchorClose - } - - // scan backward for a word boundary - rewind := 0 - for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { - rewind++ - } - if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters - return 0, nil - } - - origData := data - data = data[offset-rewind:] - - if !isSafeLink(data) { - return 0, nil - } - - linkEnd := 0 - for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { - linkEnd++ - } - - // Skip punctuation at the end of the link - if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { - linkEnd-- - } - - // But don't skip semicolon if it's a part of escaped entity: - if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { - linkEnd-- - } - - // See if the link finishes with a punctuation sign that can be closed. - var copen byte - switch data[linkEnd-1] { - case '"': - copen = '"' - case '\'': - copen = '\'' - case ')': - copen = '(' - case ']': - copen = '[' - case '}': - copen = '{' - default: - copen = 0 - } - - if copen != 0 { - bufEnd := offset - rewind + linkEnd - 2 - - openDelim := 1 - - /* Try to close the final punctuation sign in this same line; - * if we managed to close it outside of the URL, that means that it's - * not part of the URL. If it closes inside the URL, that means it - * is part of the URL. - * - * Examples: - * - * foo http://www.pokemon.com/Pikachu_(Electric) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo (http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric)) - * - * (foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => foo http://www.pokemon.com/Pikachu_(Electric) - */ - - for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { - if origData[bufEnd] == data[linkEnd-1] { - openDelim++ - } - - if origData[bufEnd] == copen { - openDelim-- - } - - bufEnd-- - } - - if openDelim == 0 { - linkEnd-- - } - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[:linkEnd]) - - if uLink.Len() > 0 { - node := NewNode(Link) - node.Destination = uLink.Bytes() - node.AppendChild(text(uLink.Bytes())) - return linkEnd, node - } - - return linkEnd, nil -} - -func isEndOfLink(char byte) bool { - return isspace(char) || char == '<' -} - -var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} -var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} - -func isSafeLink(link []byte) bool { - for _, path := range validPaths { - if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { - if len(link) == len(path) { - return true - } else if isalnum(link[len(path)]) { - return true - } - } - } - - for _, prefix := range validUris { - // TODO: handle unicode here - // case-insensitive prefix test - if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { - return true - } - } - - return false -} - -// return the length of the given tag, or 0 is it's not valid -func tagLength(data []byte) (autolink autolinkType, end int) { - var i, j int - - // a valid tag can't be shorter than 3 chars - if len(data) < 3 { - return notAutolink, 0 - } - - // begins with a '<' optionally followed by '/', followed by letter or number - if data[0] != '<' { - return notAutolink, 0 - } - if data[1] == '/' { - i = 2 - } else { - i = 1 - } - - if !isalnum(data[i]) { - return notAutolink, 0 - } - - // scheme test - autolink = notAutolink - - // try to find the beginning of an URI - for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { - i++ - } - - if i > 1 && i < len(data) && data[i] == '@' { - if j = isMailtoAutoLink(data[i:]); j != 0 { - return emailAutolink, i + j - } - } - - if i > 2 && i < len(data) && data[i] == ':' { - autolink = normalAutolink - i++ - } - - // complete autolink test: no whitespace or ' or " - switch { - case i >= len(data): - autolink = notAutolink - case autolink != notAutolink: - j = i - - for i < len(data) { - if data[i] == '\\' { - i += 2 - } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { - break - } else { - i++ - } - - } - - if i >= len(data) { - return autolink, 0 - } - if i > j && data[i] == '>' { - return autolink, i + 1 - } - - // one of the forbidden chars has been found - autolink = notAutolink - } - i += bytes.IndexByte(data[i:], '>') - if i < 0 { - return autolink, 0 - } - return autolink, i + 1 -} - -// look for the address part of a mail autolink and '>' -// this is less strict than the original markdown e-mail address matching -func isMailtoAutoLink(data []byte) int { - nb := 0 - - // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' - for i := 0; i < len(data); i++ { - if isalnum(data[i]) { - continue - } - - switch data[i] { - case '@': - nb++ - - case '-', '.', '_': - break - - case '>': - if nb == 1 { - return i + 1 - } - return 0 - default: - return 0 - } - } - - return 0 -} - -// look for the next emph char, skipping other constructs -func helperFindEmphChar(data []byte, c byte) int { - i := 0 - - for i < len(data) { - for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { - i++ - } - if i >= len(data) { - return 0 - } - // do not count escaped chars - if i != 0 && data[i-1] == '\\' { - i++ - continue - } - if data[i] == c { - return i - } - - if data[i] == '`' { - // skip a code span - tmpI := 0 - i++ - for i < len(data) && data[i] != '`' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } else if data[i] == '[' { - // skip a link - tmpI := 0 - i++ - for i < len(data) && data[i] != ']' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\n') { - i++ - } - if i >= len(data) { - return tmpI - } - if data[i] != '[' && data[i] != '(' { // not a link - if tmpI > 0 { - return tmpI - } - continue - } - cc := data[i] - i++ - for i < len(data) && data[i] != cc { - if tmpI == 0 && data[i] == c { - return i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } - } - return 0 -} - -func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - // skip one symbol if coming from emph3 - if len(data) > 1 && data[0] == c && data[1] == c { - i = 1 - } - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - if i >= len(data) { - return 0, nil - } - - if i+1 < len(data) && data[i+1] == c { - i++ - continue - } - - if data[i] == c && !isspace(data[i-1]) { - - if p.extensions&NoIntraEmphasis != 0 { - if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { - continue - } - } - - emph := NewNode(Emph) - p.inline(emph, data[:i]) - return i + 1, emph - } - } - - return 0, nil -} - -func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { - nodeType := Strong - if c == '~' { - nodeType = Del - } - node := NewNode(nodeType) - p.inline(node, data[:i]) - return i + 2, node - } - i++ - } - return 0, nil -} - -func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { - i := 0 - origData := data - data = data[offset:] - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - // skip whitespace preceded symbols - if data[i] != c || isspace(data[i-1]) { - continue - } - - switch { - case i+2 < len(data) && data[i+1] == c && data[i+2] == c: - // triple symbol found - strong := NewNode(Strong) - em := NewNode(Emph) - strong.AppendChild(em) - p.inline(em, data[:i]) - return i + 3, strong - case (i+1 < len(data) && data[i+1] == c): - // double symbol found, hand over to emph1 - length, node := helperEmphasis(p, origData[offset-2:], c) - if length == 0 { - return 0, nil - } - return length - 2, node - default: - // single symbol found, hand over to emph2 - length, node := helperDoubleEmphasis(p, origData[offset-1:], c) - if length == 0 { - return 0, nil - } - return length - 1, node - } - } - return 0, nil -} - -func text(s []byte) *Node { - node := NewNode(Text) - node.Literal = s - return node -} - -func normalizeURI(s []byte) []byte { - return s // TODO: implement -} diff --git a/vendor/github.com/smallstep/cli/pkg/blackfriday/markdown.go b/vendor/github.com/smallstep/cli/pkg/blackfriday/markdown.go deleted file mode 100644 index 1146a105..00000000 --- a/vendor/github.com/smallstep/cli/pkg/blackfriday/markdown.go +++ /dev/null @@ -1,940 +0,0 @@ -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "strings" - "unicode/utf8" -) - -// -// Markdown parsing and processing -// - -// Version string of the package. Appears in the rendered document when -// CompletePage flag is on. -const Version = "2.0" - -// Extensions is a bitwise or'ed collection of enabled Blackfriday's -// extensions. -type Extensions int - -// These are the supported markdown parsing extensions. -// OR these values together to select multiple extensions. -const ( - NoExtensions Extensions = 0 - NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words - Tables // Render tables - FencedCode // Render fenced code blocks - Autolink // Detect embedded URLs that are not explicitly marked - Strikethrough // Strikethrough text using ~~test~~ - LaxHTMLBlocks // Loosen up HTML block parsing rules - SpaceHeadings // Be strict about prefix heading rules - HardLineBreak // Translate newlines into line breaks - TabSizeEight // Expand tabs to eight spaces instead of four - Footnotes // Pandoc-style footnotes - NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block - HeadingIDs // specify heading IDs with {#id} - Titleblock // Titleblock ala pandoc - AutoHeadingIDs // Create the heading ID from the text - BackslashLineBreak // Translate trailing backslashes into line breaks - DefinitionLists // Render definition lists - - CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | - SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes - - CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | - Autolink | Strikethrough | SpaceHeadings | HeadingIDs | - BackslashLineBreak | DefinitionLists -) - -// ListType contains bitwise or'ed flags for list and list item objects. -type ListType int - -// These are the possible flag values for the ListItem renderer. -// Multiple flag values may be ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - ListTypeOrdered ListType = 1 << iota - ListTypeDefinition - ListTypeTerm - - ListItemContainsBlock - ListItemBeginningOfList // TODO: figure out if this is of any use now - ListItemEndOfList -) - -// CellAlignFlags holds a type of alignment in a table cell. -type CellAlignFlags int - -// These are the possible flag values for the table cell renderer. -// Only a single one of these values will be used; they are not ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - TableAlignmentLeft CellAlignFlags = 1 << iota - TableAlignmentRight - TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) -) - -// The size of a tab stop. -const ( - TabSizeDefault = 4 - TabSizeDouble = 8 -) - -// blockTags is a set of tags that are recognized as HTML block tags. -// Any of these can be included in markdown text without special escaping. -var blockTags = map[string]struct{}{ - "blockquote": {}, - "del": {}, - "div": {}, - "dl": {}, - "fieldset": {}, - "form": {}, - "h1": {}, - "h2": {}, - "h3": {}, - "h4": {}, - "h5": {}, - "h6": {}, - "iframe": {}, - "ins": {}, - "math": {}, - "noscript": {}, - "ol": {}, - "pre": {}, - "p": {}, - "script": {}, - "style": {}, - "table": {}, - "ul": {}, - - // HTML5 - "address": {}, - "article": {}, - "aside": {}, - "canvas": {}, - "figcaption": {}, - "figure": {}, - "footer": {}, - "header": {}, - "hgroup": {}, - "main": {}, - "nav": {}, - "output": {}, - "progress": {}, - "section": {}, - "video": {}, -} - -// Renderer is the rendering interface. This is mostly of interest if you are -// implementing a new rendering format. -// -// Only an HTML implementation is provided in this repository, see the README -// for external implementations. -type Renderer interface { - // RenderNode is the main rendering method. It will be called once for - // every leaf node and twice for every non-leaf node (first with - // entering=true, then with entering=false). The method should write its - // rendition of the node to the supplied writer w. - RenderNode(w io.Writer, node *Node, entering bool) WalkStatus - - // RenderHeader is a method that allows the renderer to produce some - // content preceding the main body of the output document. The header is - // understood in the broad sense here. For example, the default HTML - // renderer will write not only the HTML document preamble, but also the - // table of contents if it was requested. - // - // The method will be passed an entire document tree, in case a particular - // implementation needs to inspect it to produce output. - // - // The output should be written to the supplied writer w. If your - // implementation has no header to write, supply an empty implementation. - RenderHeader(w io.Writer, ast *Node) - - // RenderFooter is a symmetric counterpart of RenderHeader. - RenderFooter(w io.Writer, ast *Node) -} - -// Callback functions for inline parsing. One such function is defined -// for each character that triggers a response when parsing inline data. -type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) - -// Markdown is a type that holds extensions and the runtime state used by -// Parse, and the renderer. You can not use it directly, construct it with New. -type Markdown struct { - renderer Renderer - referenceOverride ReferenceOverrideFunc - refs map[string]*reference - inlineCallback [256]inlineParser - extensions Extensions - nesting int - maxNesting int - insideLink bool - - // Footnotes need to be ordered as well as available to quickly check for - // presence. If a ref is also a footnote, it's stored both in refs and here - // in notes. Slice is nil if footnotes not enabled. - notes []*reference - - doc *Node - tip *Node // = doc - oldTip *Node - lastMatchedContainer *Node // = doc - allClosed bool -} - -func (p *Markdown) getRef(refid string) (ref *reference, found bool) { - if p.referenceOverride != nil { - r, overridden := p.referenceOverride(refid) - if overridden { - if r == nil { - return nil, false - } - return &reference{ - link: []byte(r.Link), - title: []byte(r.Title), - noteID: 0, - hasBlock: false, - text: []byte(r.Text)}, true - } - } - // refs are case insensitive - ref, found = p.refs[strings.ToLower(refid)] - return ref, found -} - -func (p *Markdown) finalize(block *Node) { - above := block.Parent - block.open = false - p.tip = above -} - -func (p *Markdown) addChild(node NodeType, offset uint32) *Node { - return p.addExistingChild(NewNode(node), offset) -} - -func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { - for !p.tip.canContain(node.Type) { - p.finalize(p.tip) - } - p.tip.AppendChild(node) - p.tip = node - return node -} - -func (p *Markdown) closeUnmatchedBlocks() { - if !p.allClosed { - for p.oldTip != p.lastMatchedContainer { - parent := p.oldTip.Parent - p.finalize(p.oldTip) - p.oldTip = parent - } - p.allClosed = true - } -} - -// -// -// Public interface -// -// - -// Reference represents the details of a link. -// See the documentation in Options for more details on use-case. -type Reference struct { - // Link is usually the URL the reference points to. - Link string - // Title is the alternate text describing the link in more detail. - Title string - // Text is the optional text to override the ref with if the syntax used was - // [refid][] - Text string -} - -// ReferenceOverrideFunc is expected to be called with a reference string and -// return either a valid Reference type that the reference string maps to or -// nil. If overridden is false, the default reference logic will be executed. -// See the documentation in Options for more details on use-case. -type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) - -// New constructs a Markdown processor. You can use the same With* functions as -// for Run() to customize parser's behavior and the renderer. -func New(opts ...Option) *Markdown { - var p Markdown - for _, opt := range opts { - opt(&p) - } - p.refs = make(map[string]*reference) - p.maxNesting = 16 - p.insideLink = false - docNode := NewNode(Document) - p.doc = docNode - p.tip = docNode - p.oldTip = docNode - p.lastMatchedContainer = docNode - p.allClosed = true - // register inline parsers - p.inlineCallback[' '] = maybeLineBreak - p.inlineCallback['*'] = emphasis - p.inlineCallback['_'] = emphasis - if p.extensions&Strikethrough != 0 { - p.inlineCallback['~'] = emphasis - } - p.inlineCallback['`'] = codeSpan - p.inlineCallback['\n'] = lineBreak - p.inlineCallback['['] = link - p.inlineCallback['<'] = leftAngle - p.inlineCallback['\\'] = escape - p.inlineCallback['&'] = entity - p.inlineCallback['!'] = maybeImage - p.inlineCallback['^'] = maybeInlineFootnote - if p.extensions&Autolink != 0 { - p.inlineCallback['h'] = maybeAutoLink - p.inlineCallback['m'] = maybeAutoLink - p.inlineCallback['f'] = maybeAutoLink - p.inlineCallback['H'] = maybeAutoLink - p.inlineCallback['M'] = maybeAutoLink - p.inlineCallback['F'] = maybeAutoLink - } - if p.extensions&Footnotes != 0 { - p.notes = make([]*reference, 0) - } - return &p -} - -// Option customizes the Markdown processor's default behavior. -type Option func(*Markdown) - -// WithRenderer allows you to override the default renderer. -func WithRenderer(r Renderer) Option { - return func(p *Markdown) { - p.renderer = r - } -} - -// WithExtensions allows you to pick some of the many extensions provided by -// Blackfriday. You can bitwise OR them. -func WithExtensions(e Extensions) Option { - return func(p *Markdown) { - p.extensions = e - } -} - -// WithNoExtensions turns off all extensions and custom behavior. -func WithNoExtensions() Option { - return func(p *Markdown) { - p.extensions = NoExtensions - p.renderer = NewHTMLRenderer(HTMLRendererParameters{ - Flags: HTMLFlagsNone, - }) - } -} - -// WithRefOverride sets an optional function callback that is called every -// time a reference is resolved. -// -// In Markdown, the link reference syntax can be made to resolve a link to -// a reference instead of an inline URL, in one of the following ways: -// -// * [link text][refid] -// * [refid][] -// -// Usually, the refid is defined at the bottom of the Markdown document. If -// this override function is provided, the refid is passed to the override -// function first, before consulting the defined refids at the bottom. If -// the override function indicates an override did not occur, the refids at -// the bottom will be used to fill in the link details. -func WithRefOverride(o ReferenceOverrideFunc) Option { - return func(p *Markdown) { - p.referenceOverride = o - } -} - -// Run is the main entry point to Blackfriday. It parses and renders a -// block of markdown-encoded text. -// -// The simplest invocation of Run takes one argument, input: -// output := Run(input) -// This will parse the input with CommonExtensions enabled and render it with -// the default HTMLRenderer (with CommonHTMLFlags). -// -// Variadic arguments opts can customize the default behavior. Since Markdown -// type does not contain exported fields, you can not use it directly. Instead, -// use the With* functions. For example, this will call the most basic -// functionality, with no extensions: -// output := Run(input, WithNoExtensions()) -// -// You can use any number of With* arguments, even contradicting ones. They -// will be applied in order of appearance and the latter will override the -// former: -// output := Run(input, WithNoExtensions(), WithExtensions(exts), -// WithRenderer(yourRenderer)) -func Run(input []byte, opts ...Option) []byte { - r := NewHTMLRenderer(HTMLRendererParameters{ - Flags: CommonHTMLFlags, - }) - optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} - optList = append(optList, opts...) - parser := New(optList...) - ast := parser.Parse(input) - var buf bytes.Buffer - parser.renderer.RenderHeader(&buf, ast) - ast.Walk(func(node *Node, entering bool) WalkStatus { - return parser.renderer.RenderNode(&buf, node, entering) - }) - parser.renderer.RenderFooter(&buf, ast) - return buf.Bytes() -} - -// Parse is an entry point to the parsing part of Blackfriday. It takes an -// input markdown document and produces a syntax tree for its contents. This -// tree can then be rendered with a default or custom renderer, or -// analyzed/transformed by the caller to whatever non-standard needs they have. -// The return value is the root node of the syntax tree. -func (p *Markdown) Parse(input []byte) *Node { - p.block(input) - // Walk the tree and finish up some of unfinished blocks - for p.tip != nil { - p.finalize(p.tip) - } - // Walk the tree again and process inline markdown in each block - p.doc.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) - p.parseRefsToAST() - return p.doc -} - -func (p *Markdown) parseRefsToAST() { - if p.extensions&Footnotes == 0 || len(p.notes) == 0 { - return - } - p.tip = p.doc - block := p.addBlock(List, nil) - block.IsFootnotesList = true - block.ListFlags = ListTypeOrdered - flags := ListItemBeginningOfList - // Note: this loop is intentionally explicit, not range-form. This is - // because the body of the loop will append nested footnotes to p.notes and - // we need to process those late additions. Range form would only walk over - // the fixed initial set. - for i := 0; i < len(p.notes); i++ { - ref := p.notes[i] - p.addExistingChild(ref.footnote, 0) - block := ref.footnote - block.ListFlags = flags | ListTypeOrdered - block.RefLink = ref.link - if ref.hasBlock { - flags |= ListItemContainsBlock - p.block(ref.title) - } else { - p.inline(block, ref.title) - } - flags &^= ListItemBeginningOfList | ListItemContainsBlock - } - above := block.Parent - finalizeList(block) - p.tip = above - block.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) -} - -// -// Link references -// -// This section implements support for references that (usually) appear -// as footnotes in a document, and can be referenced anywhere in the document. -// The basic format is: -// -// [1]: http://www.google.com/ "Google" -// [2]: http://www.github.com/ "Github" -// -// Anywhere in the document, the reference can be linked by referring to its -// label, i.e., 1 and 2 in this example, as in: -// -// This library is hosted on [Github][2], a git hosting site. -// -// Actual footnotes as specified in Pandoc and supported by some other Markdown -// libraries such as php-markdown are also taken care of. They look like this: -// -// This sentence needs a bit of further explanation.[^note] -// -// [^note]: This is the explanation. -// -// Footnotes should be placed at the end of the document in an ordered list. -// Inline footnotes such as: -// -// Inline footnotes^[Not supported.] also exist. -// -// are not yet supported. - -// reference holds all information necessary for a reference-style links or -// footnotes. -// -// Consider this markdown with reference-style links: -// -// [link][ref] -// -// [ref]: /url/ "tooltip title" -// -// It will be ultimately converted to this HTML: -// -//

    link

    -// -// And a reference structure will be populated as follows: -// -// p.refs["ref"] = &reference{ -// link: "/url/", -// title: "tooltip title", -// } -// -// Alternatively, reference can contain information about a footnote. Consider -// this markdown: -// -// Text needing a footnote.[^a] -// -// [^a]: This is the note -// -// A reference structure will be populated as follows: -// -// p.refs["a"] = &reference{ -// link: "a", -// title: "This is the note", -// noteID: , -// } -// -// TODO: As you can see, it begs for splitting into two dedicated structures -// for refs and for footnotes. -type reference struct { - link []byte - title []byte - noteID int // 0 if not a footnote ref - hasBlock bool - footnote *Node // a link to the Item node within a list of footnotes - - text []byte // only gets populated by refOverride feature with Reference.Text -} - -func (r *reference) String() string { - return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", - r.link, r.title, r.text, r.noteID, r.hasBlock) -} - -// Check whether or not data starts with a reference link. -// If so, it is parsed and stored in the list of references -// (in the render struct). -// Returns the number of bytes to skip to move past it, -// or zero if the first line is not a reference. -func isReference(p *Markdown, data []byte, tabSize int) int { - // up to 3 optional leading spaces - if len(data) < 4 { - return 0 - } - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - - noteID := 0 - - // id part: anything but a newline between brackets - if data[i] != '[' { - return 0 - } - i++ - if p.extensions&Footnotes != 0 { - if i < len(data) && data[i] == '^' { - // we can set it to anything here because the proper noteIds will - // be assigned later during the second pass. It just has to be != 0 - noteID = 1 - i++ - } - } - idOffset := i - for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { - i++ - } - if i >= len(data) || data[i] != ']' { - return 0 - } - idEnd := i - // footnotes can have empty ID, like this: [^], but a reference can not be - // empty like this: []. Break early if it's not a footnote and there's no ID - if noteID == 0 && idOffset == idEnd { - return 0 - } - // spacer: colon (space | tab)* newline? (space | tab)* - i++ - if i >= len(data) || data[i] != ':' { - return 0 - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && (data[i] == '\n' || data[i] == '\r') { - i++ - if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { - i++ - } - } - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i >= len(data) { - return 0 - } - - var ( - linkOffset, linkEnd int - titleOffset, titleEnd int - lineEnd int - raw []byte - hasBlock bool - ) - - if p.extensions&Footnotes != 0 && noteID != 0 { - linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) - lineEnd = linkEnd - } else { - linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) - } - if lineEnd == 0 { - return 0 - } - - // a valid ref has been found - - ref := &reference{ - noteID: noteID, - hasBlock: hasBlock, - } - - if noteID > 0 { - // reusing the link field for the id since footnotes don't have links - ref.link = data[idOffset:idEnd] - // if footnote, it's not really a title, it's the contained text - ref.title = raw - } else { - ref.link = data[linkOffset:linkEnd] - ref.title = data[titleOffset:titleEnd] - } - - // id matches are case-insensitive - id := string(bytes.ToLower(data[idOffset:idEnd])) - - p.refs[id] = ref - - return lineEnd -} - -func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { - // link: whitespace-free sequence, optionally between angle brackets - if data[i] == '<' { - i++ - } - linkOffset = i - for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { - i++ - } - linkEnd = i - if data[linkOffset] == '<' && data[linkEnd-1] == '>' { - linkOffset++ - linkEnd-- - } - - // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { - return - } - - // compute end-of-line - if i >= len(data) || data[i] == '\r' || data[i] == '\n' { - lineEnd = i - } - if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { - lineEnd++ - } - - // optional (space|tab)* spacer after a newline - if lineEnd > 0 { - i = lineEnd + 1 - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - } - - // optional title: any non-newline sequence enclosed in '"() alone on its line - if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { - i++ - titleOffset = i - - // look for EOL - for i < len(data) && data[i] != '\n' && data[i] != '\r' { - i++ - } - if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { - titleEnd = i + 1 - } else { - titleEnd = i - } - - // step back - i-- - for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { - i-- - } - if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { - lineEnd = titleEnd - titleEnd = i - } - } - - return -} - -// The first bit of this logic is the same as Parser.listItem, but the rest -// is much simpler. This function simply finds the entire block and shifts it -// over by one tab if it is indeed a block (just returns the line if it's not). -// blockEnd is the end of the section in the input buffer, and contents is the -// extracted text that was shifted over one tab. It will need to be rendered at -// the end of the document. -func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { - if i == 0 || len(data) == 0 { - return - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - blockStart = i - - // find the end of the line - blockEnd = i - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[blockEnd:i]) - blockEnd = i - - // process the following lines - containsBlankLine := false - -gatherLines: - for blockEnd < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[blockEnd:i]) > 0 { - containsBlankLine = true - blockEnd = i - continue - } - - n := 0 - if n = isIndented(data[blockEnd:i], indentSize); n == 0 { - // this is the end of the block. - // we don't want to include this last line in the index. - break gatherLines - } - - // if there were blank lines before this one, insert a new one now - if containsBlankLine { - raw.WriteByte('\n') - containsBlankLine = false - } - - // get rid of that first tab, write to buffer - raw.Write(data[blockEnd+n : i]) - hasBlock = true - - blockEnd = i - } - - if data[blockEnd-1] != '\n' { - raw.WriteByte('\n') - } - - contents = raw.Bytes() - - return -} - -// -// -// Miscellaneous helper functions -// -// - -// Test if a character is a punctuation symbol. -// Taken from a private function in regexp in the stdlib. -func ispunct(c byte) bool { - for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { - if c == r { - return true - } - } - return false -} - -// Test if a character is a whitespace character. -func isspace(c byte) bool { - return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v' -} - -// Test if a character is letter. -func isletter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// Test if a character is a letter or a digit. -// TODO: check when this is looking for ASCII alnum and when it should use unicode -func isalnum(c byte) bool { - return (c >= '0' && c <= '9') || isletter(c) -} - -// Replace tab characters with spaces, aligning to the next TAB_SIZE column. -// always ends output with a newline -func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { - // first, check for common cases: no tabs, or only tabs at beginning of line - i, prefix := 0, 0 - slowcase := false - for i = 0; i < len(line); i++ { - if line[i] == '\t' { - if prefix == i { - prefix++ - } else { - slowcase = true - break - } - } - } - - // no need to decode runes if all tabs are at the beginning of the line - if !slowcase { - for i = 0; i < prefix*tabSize; i++ { - out.WriteByte(' ') - } - out.Write(line[prefix:]) - return - } - - // the slow case: we need to count runes to figure out how - // many spaces to insert for each tab - column := 0 - i = 0 - for i < len(line) { - start := i - for i < len(line) && line[i] != '\t' { - _, size := utf8.DecodeRune(line[i:]) - i += size - column++ - } - - if i > start { - out.Write(line[start:i]) - } - - if i >= len(line) { - break - } - - for { - out.WriteByte(' ') - column++ - if column%tabSize == 0 { - break - } - } - - i++ - } -} - -// Find if a line counts as indented or not. -// Returns number of characters the indent is (0 = not indented). -func isIndented(data []byte, indentSize int) int { - if len(data) == 0 { - return 0 - } - if data[0] == '\t' { - return 1 - } - if len(data) < indentSize { - return 0 - } - for i := 0; i < indentSize; i++ { - if data[i] != ' ' { - return 0 - } - } - return indentSize -} - -// Create a url-safe slug for fragments -func slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if isalnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} diff --git a/vendor/github.com/smallstep/cli/pkg/blackfriday/node.go b/vendor/github.com/smallstep/cli/pkg/blackfriday/node.go deleted file mode 100644 index 51b9e8c1..00000000 --- a/vendor/github.com/smallstep/cli/pkg/blackfriday/node.go +++ /dev/null @@ -1,354 +0,0 @@ -package blackfriday - -import ( - "bytes" - "fmt" -) - -// NodeType specifies a type of a single node of a syntax tree. Usually one -// node (and its type) corresponds to a single markdown feature, e.g. emphasis -// or code block. -type NodeType int - -// Constants for identifying different types of nodes. See NodeType. -const ( - Document NodeType = iota - BlockQuote - List - Item - Paragraph - Heading - HorizontalRule - Emph - Strong - Del - Link - Image - Text - HTMLBlock - CodeBlock - Softbreak - Hardbreak - Code - HTMLSpan - Table - TableCell - TableHead - TableBody - TableRow -) - -var nodeTypeNames = []string{ - Document: "Document", - BlockQuote: "BlockQuote", - List: "List", - Item: "Item", - Paragraph: "Paragraph", - Heading: "Heading", - HorizontalRule: "HorizontalRule", - Emph: "Emph", - Strong: "Strong", - Del: "Del", - Link: "Link", - Image: "Image", - Text: "Text", - HTMLBlock: "HTMLBlock", - CodeBlock: "CodeBlock", - Softbreak: "Softbreak", - Hardbreak: "Hardbreak", - Code: "Code", - HTMLSpan: "HTMLSpan", - Table: "Table", - TableCell: "TableCell", - TableHead: "TableHead", - TableBody: "TableBody", - TableRow: "TableRow", -} - -func (t NodeType) String() string { - return nodeTypeNames[t] -} - -// ListData contains fields relevant to a List and Item node type. -type ListData struct { - ListFlags ListType - Tight bool // Skip

    s around list item data if true - BulletChar byte // '*', '+' or '-' in bullet lists - Delimiter byte // '.' or ')' after the number in ordered lists - RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering - IsFootnotesList bool // This is a list of footnotes -} - -// LinkData contains fields relevant to a Link node type. -type LinkData struct { - Destination []byte // Destination is what goes into a href - Title []byte // Title is the tooltip thing that goes in a title attribute - NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote - Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. -} - -// CodeBlockData contains fields relevant to a CodeBlock node type. -type CodeBlockData struct { - IsFenced bool // Specifies whether it's a fenced code block or an indented one - Info []byte // This holds the info string - FenceChar byte - FenceLength int - FenceOffset int -} - -// TableCellData contains fields relevant to a TableCell node type. -type TableCellData struct { - IsHeader bool // This tells if it's under the header row - Align CellAlignFlags // This holds the value for align attribute -} - -// HeadingData contains fields relevant to a Heading node type. -type HeadingData struct { - Level int // This holds the heading level number - HeadingID string // This might hold heading ID, if present - IsTitleblock bool // Specifies whether it's a title block -} - -// Node is a single element in the abstract syntax tree of the parsed document. -// It holds connections to the structurally neighboring nodes and, for certain -// types of nodes, additional information that might be needed when rendering. -type Node struct { - Type NodeType // Determines the type of the node - Parent *Node // Points to the parent - FirstChild *Node // Points to the first child, if any - LastChild *Node // Points to the last child, if any - Prev *Node // Previous sibling; nil if it's the first child - Next *Node // Next sibling; nil if it's the last child - - Literal []byte // Text contents of the leaf nodes - - HeadingData // Populated if Type is Heading - ListData // Populated if Type is List - CodeBlockData // Populated if Type is CodeBlock - LinkData // Populated if Type is Link - TableCellData // Populated if Type is TableCell - - content []byte // Markdown content of the block nodes - open bool // Specifies an open block node that has not been finished to process yet -} - -// NewNode allocates a node of a specified type. -func NewNode(typ NodeType) *Node { - return &Node{ - Type: typ, - open: true, - } -} - -func (n *Node) String() string { - ellipsis := "" - snippet := n.Literal - if len(snippet) > 16 { - snippet = snippet[:16] - ellipsis = "..." - } - return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) -} - -// Unlink removes node 'n' from the tree. -// It panics if the node is nil. -func (n *Node) Unlink() { - if n.Prev != nil { - n.Prev.Next = n.Next - } else if n.Parent != nil { - n.Parent.FirstChild = n.Next - } - if n.Next != nil { - n.Next.Prev = n.Prev - } else if n.Parent != nil { - n.Parent.LastChild = n.Prev - } - n.Parent = nil - n.Next = nil - n.Prev = nil -} - -// AppendChild adds a node 'child' as a child of 'n'. -// It panics if either node is nil. -func (n *Node) AppendChild(child *Node) { - child.Unlink() - child.Parent = n - if n.LastChild != nil { - n.LastChild.Next = child - child.Prev = n.LastChild - n.LastChild = child - } else { - n.FirstChild = child - n.LastChild = child - } -} - -// InsertBefore inserts 'sibling' immediately before 'n'. -// It panics if either node is nil. -func (n *Node) InsertBefore(sibling *Node) { - sibling.Unlink() - sibling.Prev = n.Prev - if sibling.Prev != nil { - sibling.Prev.Next = sibling - } - sibling.Next = n - n.Prev = sibling - sibling.Parent = n.Parent - if sibling.Prev == nil { - sibling.Parent.FirstChild = sibling - } -} - -func (n *Node) isContainer() bool { - switch n.Type { - case Document: - fallthrough - case BlockQuote: - fallthrough - case List: - fallthrough - case Item: - fallthrough - case Paragraph: - fallthrough - case Heading: - fallthrough - case Emph: - fallthrough - case Strong: - fallthrough - case Del: - fallthrough - case Link: - fallthrough - case Image: - fallthrough - case Table: - fallthrough - case TableHead: - fallthrough - case TableBody: - fallthrough - case TableRow: - fallthrough - case TableCell: - return true - default: - return false - } -} - -func (n *Node) canContain(t NodeType) bool { - if n.Type == List { - return t == Item - } - if n.Type == Document || n.Type == BlockQuote || n.Type == Item { - return t != Item - } - if n.Type == Table { - return t == TableHead || t == TableBody - } - if n.Type == TableHead || n.Type == TableBody { - return t == TableRow - } - if n.Type == TableRow { - return t == TableCell - } - return false -} - -// WalkStatus allows NodeVisitor to have some control over the tree traversal. -// It is returned from NodeVisitor and different values allow Node.Walk to -// decide which node to go to next. -type WalkStatus int - -const ( - // GoToNext is the default traversal of every node. - GoToNext WalkStatus = iota - // SkipChildren tells walker to skip all children of current node. - SkipChildren - // Terminate tells walker to terminate the traversal. - Terminate -) - -// NodeVisitor is a callback to be called when traversing the syntax tree. -// Called twice for every node: once with entering=true when the branch is -// first visited, then with entering=false after all the children are done. -type NodeVisitor func(node *Node, entering bool) WalkStatus - -// Walk is a convenience method that instantiates a walker and starts a -// traversal of subtree rooted at n. -func (n *Node) Walk(visitor NodeVisitor) { - w := newNodeWalker(n) - for w.current != nil { - status := visitor(w.current, w.entering) - switch status { - case GoToNext: - w.next() - case SkipChildren: - w.entering = false - w.next() - case Terminate: - return - } - } -} - -type nodeWalker struct { - current *Node - root *Node - entering bool -} - -func newNodeWalker(root *Node) *nodeWalker { - return &nodeWalker{ - current: root, - root: root, - entering: true, - } -} - -func (nw *nodeWalker) next() { - if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root { - nw.current = nil - return - } - if nw.entering && nw.current.isContainer() { - if nw.current.FirstChild != nil { - nw.current = nw.current.FirstChild - nw.entering = true - } else { - nw.entering = false - } - } else if nw.current.Next == nil { - nw.current = nw.current.Parent - nw.entering = false - } else { - nw.current = nw.current.Next - nw.entering = true - } -} - -func dump(ast *Node) { - fmt.Println(dumpString(ast)) -} - -func dumpR(ast *Node, depth int) string { - if ast == nil { - return "" - } - indent := bytes.Repeat([]byte("\t"), depth) - content := ast.Literal - if content == nil { - content = ast.content - } - result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) - for n := ast.FirstChild; n != nil; n = n.Next { - result += dumpR(n, depth+1) - } - return result -} - -func dumpString(ast *Node) string { - return dumpR(ast, 0) -} diff --git a/vendor/github.com/smallstep/cli/pkg/blackfriday/smartypants.go b/vendor/github.com/smallstep/cli/pkg/blackfriday/smartypants.go deleted file mode 100644 index 3a220e94..00000000 --- a/vendor/github.com/smallstep/cli/pkg/blackfriday/smartypants.go +++ /dev/null @@ -1,457 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// SmartyPants rendering -// -// - -package blackfriday - -import ( - "bytes" - "io" -) - -// SPRenderer is a struct containing state of a Smartypants renderer. -type SPRenderer struct { - inSingleQuote bool - inDoubleQuote bool - callbacks [256]smartCallback -} - -func wordBoundary(c byte) bool { - return c == 0 || isspace(c) || ispunct(c) -} - -func tolower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c - 'A' + 'a' - } - return c -} - -func isdigit(c byte) bool { - return c >= '0' && c <= '9' -} - -func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { - // edge of the buffer is likely to be a tag that we don't get to see, - // so we treat it like text sometimes - - // enumerate all sixteen possibilities for (previousChar, nextChar) - // each can be one of {0, space, punct, other} - switch { - case previousChar == 0 && nextChar == 0: - // context is not any help here, so toggle - *isOpen = !*isOpen - case isspace(previousChar) && nextChar == 0: - // [ "] might be [ "foo...] - *isOpen = true - case ispunct(previousChar) && nextChar == 0: - // [!"] hmm... could be [Run!"] or [("...] - *isOpen = false - case /* isnormal(previousChar) && */ nextChar == 0: - // [a"] is probably a close - *isOpen = false - case previousChar == 0 && isspace(nextChar): - // [" ] might be [...foo" ] - *isOpen = false - case isspace(previousChar) && isspace(nextChar): - // [ " ] context is not any help here, so toggle - *isOpen = !*isOpen - case ispunct(previousChar) && isspace(nextChar): - // [!" ] is probably a close - *isOpen = false - case /* isnormal(previousChar) && */ isspace(nextChar): - // [a" ] this is one of the easy cases - *isOpen = false - case previousChar == 0 && ispunct(nextChar): - // ["!] hmm... could be ["$1.95] or ["!...] - *isOpen = false - case isspace(previousChar) && ispunct(nextChar): - // [ "!] looks more like [ "$1.95] - *isOpen = true - case ispunct(previousChar) && ispunct(nextChar): - // [!"!] context is not any help here, so toggle - *isOpen = !*isOpen - case /* isnormal(previousChar) && */ ispunct(nextChar): - // [a"!] is probably a close - *isOpen = false - case previousChar == 0 /* && isnormal(nextChar) */ : - // ["a] is probably an open - *isOpen = true - case isspace(previousChar) /* && isnormal(nextChar) */ : - // [ "a] this is one of the easy cases - *isOpen = true - case ispunct(previousChar) /* && isnormal(nextChar) */ : - // [!"a] is probably an open - *isOpen = true - default: - // [a'b] maybe a contraction? - *isOpen = false - } - - // Note that with the limited lookahead, this non-breaking - // space will also be appended to single double quotes. - if addNBSP && !*isOpen { - out.WriteString(" ") - } - - out.WriteByte('&') - if *isOpen { - out.WriteByte('l') - } else { - out.WriteByte('r') - } - out.WriteByte(quote) - out.WriteString("quo;") - - if addNBSP && *isOpen { - out.WriteString(" ") - } - - return true -} - -func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - t1 := tolower(text[1]) - - if t1 == '\'' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { - out.WriteString("’") - return 0 - } - - if len(text) >= 3 { - t2 := tolower(text[2]) - - if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && - (len(text) < 4 || wordBoundary(text[3])) { - out.WriteString("’") - return 0 - } - } - } - - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { - return 0 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 { - t1 := tolower(text[1]) - t2 := tolower(text[2]) - - if t1 == 'c' && t2 == ')' { - out.WriteString("©") - return 2 - } - - if t1 == 'r' && t2 == ')' { - out.WriteString("®") - return 2 - } - - if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { - out.WriteString("™") - return 3 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - if text[1] == '-' { - out.WriteString("—") - return 1 - } - - if wordBoundary(previousChar) && wordBoundary(text[1]) { - out.WriteString("–") - return 0 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '-' && text[2] == '-' { - out.WriteString("—") - return 2 - } - if len(text) >= 2 && text[1] == '-' { - out.WriteString("–") - return 1 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { - if bytes.HasPrefix(text, []byte(""")) { - nextChar := byte(0) - if len(text) >= 7 { - nextChar = text[6] - } - if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { - return 5 - } - } - - if bytes.HasPrefix(text, []byte("�")) { - return 3 - } - - out.WriteByte('&') - return 0 -} - -func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { - var quote byte = 'd' - if angledQuotes { - quote = 'a' - } - - return func(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) - } -} - -func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '.' && text[2] == '.' { - out.WriteString("…") - return 2 - } - - if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { - out.WriteString("…") - return 4 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 && text[1] == '`' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b - // note: check for regular slash (/) or fraction slash (â„, 0x2044, or 0xe2 81 84 in utf-8) - // and avoid changing dates like 1/23/2005 into fractions. - numEnd := 0 - for len(text) > numEnd && isdigit(text[numEnd]) { - numEnd++ - } - if numEnd == 0 { - out.WriteByte(text[0]) - return 0 - } - denStart := numEnd + 1 - if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { - denStart = numEnd + 3 - } else if len(text) < numEnd+2 || text[numEnd] != '/' { - out.WriteByte(text[0]) - return 0 - } - denEnd := denStart - for len(text) > denEnd && isdigit(text[denEnd]) { - denEnd++ - } - if denEnd == denStart { - out.WriteByte(text[0]) - return 0 - } - if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { - out.WriteString("") - out.Write(text[:numEnd]) - out.WriteString("") - out.Write(text[denStart:denEnd]) - out.WriteString("") - return denEnd - 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - if text[0] == '1' && text[1] == '/' && text[2] == '2' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { - out.WriteString("½") - return 2 - } - } - - if text[0] == '1' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { - out.WriteString("¼") - return 2 - } - } - - if text[0] == '3' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { - out.WriteString("¾") - return 2 - } - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { - out.WriteString(""") - } - - return 0 -} - -func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') -} - -func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') -} - -func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { - i := 0 - - for i < len(text) && text[i] != '>' { - i++ - } - - out.Write(text[:i+1]) - return i -} - -type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int - -// NewSmartypantsRenderer constructs a Smartypants renderer object. -func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { - var ( - r SPRenderer - - smartAmpAngled = r.smartAmp(true, false) - smartAmpAngledNBSP = r.smartAmp(true, true) - smartAmpRegular = r.smartAmp(false, false) - smartAmpRegularNBSP = r.smartAmp(false, true) - - addNBSP = flags&SmartypantsQuotesNBSP != 0 - ) - - if flags&SmartypantsAngledQuotes == 0 { - r.callbacks['"'] = r.smartDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpRegular - } else { - r.callbacks['&'] = smartAmpRegularNBSP - } - } else { - r.callbacks['"'] = r.smartAngledDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpAngled - } else { - r.callbacks['&'] = smartAmpAngledNBSP - } - } - r.callbacks['\''] = r.smartSingleQuote - r.callbacks['('] = r.smartParens - if flags&SmartypantsDashes != 0 { - if flags&SmartypantsLatexDashes == 0 { - r.callbacks['-'] = r.smartDash - } else { - r.callbacks['-'] = r.smartDashLatex - } - } - r.callbacks['.'] = r.smartPeriod - if flags&SmartypantsFractions == 0 { - r.callbacks['1'] = r.smartNumber - r.callbacks['3'] = r.smartNumber - } else { - for ch := '1'; ch <= '9'; ch++ { - r.callbacks[ch] = r.smartNumberGeneric - } - } - r.callbacks['<'] = r.smartLeftAngle - r.callbacks['`'] = r.smartBacktick - return &r -} - -// Process is the entry point of the Smartypants renderer. -func (r *SPRenderer) Process(w io.Writer, text []byte) { - mark := 0 - for i := 0; i < len(text); i++ { - if action := r.callbacks[text[i]]; action != nil { - if i > mark { - w.Write(text[mark:i]) - } - previousChar := byte(0) - if i > 0 { - previousChar = text[i-1] - } - var tmp bytes.Buffer - i += action(&tmp, previousChar, text[i:]) - w.Write(tmp.Bytes()) - mark = i + 1 - } - } - if mark < len(text) { - w.Write(text[mark:]) - } -} diff --git a/vendor/github.com/smallstep/cli/ui/options.go b/vendor/github.com/smallstep/cli/ui/options.go deleted file mode 100644 index 75f730d2..00000000 --- a/vendor/github.com/smallstep/cli/ui/options.go +++ /dev/null @@ -1,163 +0,0 @@ -package ui - -import ( - "fmt" - "regexp" - - "github.com/manifoldco/promptui" -) - -type options struct { - mask rune - defaultValue string - value string - allowEdit bool - printTemplate string - promptTemplates *promptui.PromptTemplates - selectTemplates *promptui.SelectTemplates - validateFunc promptui.ValidateFunc -} - -// apply applies the given options. -func (o *options) apply(opts []Option) *options { - for _, fn := range opts { - fn(o) - } - return o -} - -// valid returns true if the validate function passes on the value. -func (o *options) valid() bool { - if o.validateFunc == nil { - return true - } - return o.validateFunc(o.value) == nil -} - -// getValue validates the value and returns it. -func (o *options) getValue() (string, error) { - if o.validateFunc == nil { - return o.value, nil - } - if err := o.validateFunc(o.value); err != nil { - return "", err - } - return o.value, nil -} - -// getValueBytes validates the value and returns it as a byte slice. -func (o *options) getValueBytes() ([]byte, error) { - if o.validateFunc == nil { - return []byte(o.value), nil - } - if err := o.validateFunc(o.value); err != nil { - return nil, err - } - return []byte(o.value), nil -} - -// Option is the type of the functions that modify the prompt options. -type Option func(*options) - -func extractOptions(args []interface{}) (opts []Option, rest []interface{}) { - rest = args[:0] - for _, arg := range args { - if o, ok := arg.(Option); ok { - opts = append(opts, o) - } else { - rest = append(rest, arg) - } - } - return -} - -// WithMask adds a mask to a prompt. -func WithMask(r rune) Option { - return func(o *options) { - o.mask = r - } -} - -// WithDefaultValue adds a custom string as the default value. -func WithDefaultValue(s string) Option { - return func(o *options) { - o.defaultValue = s - } -} - -// WithValue sets a custom string as the result of a prompt. If value is set, -// the prompt won't be displayed. -func WithValue(value string) Option { - return func(o *options) { - o.value = value - } -} - -// WithAllowEdit if true, let's the user edit the default value set. -func WithAllowEdit(b bool) Option { - return func(o *options) { - o.allowEdit = b - } -} - -// WithPrintTemplate sets the template to use on the print methods. -func WithPrintTemplate(template string) Option { - return func(o *options) { - o.printTemplate = template - } -} - -// WithPromptTemplates adds a custom template to a prompt. -func WithPromptTemplates(t *promptui.PromptTemplates) Option { - return func(o *options) { - o.promptTemplates = t - } -} - -// WithSelectTemplates adds a custom template to a select. -func WithSelectTemplates(t *promptui.SelectTemplates) Option { - return func(o *options) { - o.selectTemplates = t - } -} - -// WithValidateFunc adds a custom validation function to a prompt. -func WithValidateFunc(fn func(string) error) Option { - return func(o *options) { - o.validateFunc = fn - } -} - -// WithValidateNotEmpty adds a custom validation function to a prompt that -// checks that the propted string is not empty. -func WithValidateNotEmpty() Option { - return WithValidateFunc(NotEmpty()) -} - -// WithValidateYesNo adds a custom validation function to a prompt for a Yes/No -// prompt. -func WithValidateYesNo() Option { - return WithValidateFunc(YesNo()) -} - -// WithRichPrompt add the template option with rich templates. -func WithRichPrompt() Option { - return WithPromptTemplates(PromptTemplates()) -} - -// WithSimplePrompt add the template option with simple templates. -func WithSimplePrompt() Option { - return WithPromptTemplates(SimplePromptTemplates()) -} - -// WithValidateRegexp checks a prompt answer with a regular expression. If the -// regular expression is not a valid one, the option will panic. -func WithValidateRegexp(re string) Option { - rx := regexp.MustCompile(re) - return WithValidateFunc(func(s string) error { - if rx.MatchString(s) { - return nil - } - return fmt.Errorf("%s does not match the regular expresion %s", s, re) - }) -} diff --git a/vendor/github.com/smallstep/cli/ui/templates.go b/vendor/github.com/smallstep/cli/ui/templates.go deleted file mode 100644 index 64daecf8..00000000 --- a/vendor/github.com/smallstep/cli/ui/templates.go +++ /dev/null @@ -1,93 +0,0 @@ -package ui - -import ( - "fmt" - "runtime" - - "github.com/chzyer/readline" - "github.com/manifoldco/promptui" -) - -var ( - // IconInitial is the icon used when starting in prompt mode and the icon next to the label when - // starting in select mode. - IconInitial = promptui.Styler(promptui.FGBlue)("?") - - // IconGood is the icon used when a good answer is entered in prompt mode. - IconGood = promptui.Styler(promptui.FGGreen)("✔") - - // IconWarn is the icon used when a good, but potentially invalid answer is entered in prompt mode. - IconWarn = promptui.Styler(promptui.FGYellow)("âš ") - - // IconBad is the icon used when a bad answer is entered in prompt mode. - IconBad = promptui.Styler(promptui.FGRed)("✗") - - // IconSelect is the icon used to identify the currently selected item in select mode. - IconSelect = promptui.Styler(promptui.FGBold)("â–¸") -) - -func init() { - // Set VT100 characters for windows too - if runtime.GOOS == "windows" { - promptui.KeyEnter = readline.CharEnter - promptui.KeyBackspace = readline.CharBackspace - promptui.KeyPrev = readline.CharPrev - promptui.KeyPrevDisplay = "↑" - promptui.KeyNext = readline.CharNext - promptui.KeyNextDisplay = "↓" - promptui.KeyBackward = readline.CharBackward - promptui.KeyBackwardDisplay = "â†" - promptui.KeyForward = readline.CharForward - promptui.KeyForwardDisplay = "→" - } -} - -// PrintSelectedTemplate returns the default template used in PrintSelected. -func PrintSelectedTemplate() string { - return fmt.Sprintf(`{{ "%s" | green }} {{ .Name | bold }}{{ ":" | bold }} {{ .Value }}`, IconGood) + "\n" -} - -// PromptTemplates is the default style for a prompt. -func PromptTemplates() *promptui.PromptTemplates { - bold := promptui.Styler(promptui.FGBold) - return &promptui.PromptTemplates{ - Prompt: fmt.Sprintf("%s {{ . | bold }}%s ", IconInitial, bold(":")), - Success: fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconGood), bold(":")), - // Confirm: fmt.Sprintf(`{{ "%s" | bold }} {{ . | bold }}? {{ "[]" | faint }} `, IconInitial), - Valid: fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconGood), bold(":")), - Invalid: fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconBad), bold(":")), - } -} - -// SimplePromptTemplates is a prompt with a simple style, used by default on password prompts. -func SimplePromptTemplates() *promptui.PromptTemplates { - return &promptui.PromptTemplates{ - Prompt: "{{ . }}: ", - Success: "{{ . }}: ", - Valid: "{{ . }}: ", - Invalid: "{{ . }}: ", - } -} - -// SelectTemplates returns the default promptui.SelectTemplate for string -// slices. The given name is the prompt of the selected option. -func SelectTemplates(name string) *promptui.SelectTemplates { - return &promptui.SelectTemplates{ - Label: fmt.Sprintf("%s {{ . }}: ", IconInitial), - Active: fmt.Sprintf("%s {{ . | underline }}", IconSelect), - Inactive: " {{ . }}", - Selected: fmt.Sprintf(`{{ "%s" | green }} {{ "%s:" | bold }} {{ .Name }}`, IconGood, name), - } -} - -// NamedSelectTemplates returns the default promptui.SelectTemplate for struct -// slices with a name property. The given name is the prompt of the selected -// option. -func NamedSelectTemplates(name string) *promptui.SelectTemplates { - return &promptui.SelectTemplates{ - Label: fmt.Sprintf("%s {{.Name}}: ", IconInitial), - Active: fmt.Sprintf("%s {{ .Name | underline }}", IconSelect), - Inactive: " {{.Name}}", - Selected: fmt.Sprintf(`{{ "%s" | green }} {{ "%s:" | bold }} {{ .Name }}`, IconGood, name), - } -} diff --git a/vendor/github.com/smallstep/cli/ui/ui.go b/vendor/github.com/smallstep/cli/ui/ui.go deleted file mode 100644 index 29300437..00000000 --- a/vendor/github.com/smallstep/cli/ui/ui.go +++ /dev/null @@ -1,322 +0,0 @@ -package ui - -import ( - "fmt" - "os" - "strings" - "text/template" - - "github.com/chzyer/readline" - "github.com/manifoldco/promptui" - "github.com/pkg/errors" - "github.com/smallstep/cli/crypto/randutil" -) - -// stderr implements an io.WriteCloser that skips the terminal bell character -// (ASCII code 7), and writes the rest to os.Stderr. It's used to replace -// readline.Stdout, that is the package used by promptui to display the prompts. -type stderr struct{} - -// Write implements an io.WriterCloser over os.Stderr, but it skips the terminal -// bell character. -func (s *stderr) Write(b []byte) (int, error) { - if len(b) == 1 && b[0] == readline.CharBell { - return 0, nil - } - return os.Stderr.Write(b) -} - -// Close implements an io.WriterCloser over os.Stderr. -func (s *stderr) Close() error { - return os.Stderr.Close() -} - -func init() { - readline.Stdout = &stderr{} -} - -// Print uses templates to print the arguments formated to os.Stderr. -func Print(args ...interface{}) error { - var o options - opts, args := extractOptions(args) - o.apply(opts) - - // Return with a default value. This is useful when we split the question - // and the response in two lines. - if o.value != "" && o.valid() { - return nil - } - - text := fmt.Sprint(args...) - t, err := template.New("Print").Funcs(promptui.FuncMap).Parse(text) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - if err := t.Execute(os.Stderr, nil); err != nil { - return errors.Wrap(err, "error executing template") - } - return nil -} - -// Printf uses templates to print the string formated to os.Stderr. -func Printf(format string, args ...interface{}) error { - var o options - opts, args := extractOptions(args) - o.apply(opts) - - // Return with a default value. This is useful when we split the question - // and the response in two lines. - if o.value != "" && o.valid() { - return nil - } - - text := fmt.Sprintf(format, args...) - t, err := template.New("Printf").Funcs(promptui.FuncMap).Parse(text) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - if err := t.Execute(os.Stderr, nil); err != nil { - return errors.Wrap(err, "error executing template") - } - return nil -} - -// Println uses templates to print the given arguments to os.Stderr -func Println(args ...interface{}) error { - var o options - opts, args := extractOptions(args) - o.apply(opts) - - // Return with a default value. This is useful when we split the question - // and the response in two lines. - if o.value != "" && o.valid() { - return nil - } - - text := fmt.Sprintln(args...) - t, err := template.New("Println").Funcs(promptui.FuncMap).Parse(text) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - if err := t.Execute(os.Stderr, nil); err != nil { - return errors.Wrap(err, "error executing template") - } - return nil -} - -// PrintSelected prints the given name and value as if they were selected from a -// promptui.Select. -func PrintSelected(name, value string, opts ...Option) error { - o := &options{ - printTemplate: PrintSelectedTemplate(), - } - o.apply(opts) - - t, err := template.New(name).Funcs(promptui.FuncMap).Parse(o.printTemplate) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - - data := struct { - Name string - Value string - }{name, value} - if err := t.Execute(os.Stderr, data); err != nil { - return errors.Wrap(err, "error executing template") - } - - return nil -} - -// Prompt creates and runs a promptui.Prompt with the given label. -func Prompt(label string, opts ...Option) (string, error) { - o := &options{ - promptTemplates: PromptTemplates(), - } - o.apply(opts) - - // Return value if set - if o.value != "" { - return o.getValue() - } - - // Prompt using the terminal - clean, err := preparePromptTerminal() - if err != nil { - return "", err - } - defer clean() - - prompt := &promptui.Prompt{ - Label: label, - Default: o.defaultValue, - AllowEdit: o.allowEdit, - Validate: o.validateFunc, - Templates: o.promptTemplates, - } - value, err := prompt.Run() - if err != nil { - return "", errors.Wrap(err, "error running prompt") - } - return value, nil -} - -// PromptPassword creates and runs a promptui.Prompt with the given label. This -// prompt will mask the key entries with \r. -func PromptPassword(label string, opts ...Option) ([]byte, error) { - // Using a not printable character as they work better than \r - o := &options{ - mask: 1, - promptTemplates: SimplePromptTemplates(), - } - o.apply(opts) - - // Return value if set - if o.value != "" { - return o.getValueBytes() - } - - // Prompt using the terminal - clean, err := preparePromptTerminal() - if err != nil { - return nil, err - } - defer clean() - - prompt := &promptui.Prompt{ - Label: label, - Mask: o.mask, - Default: o.defaultValue, - AllowEdit: o.allowEdit, - Validate: o.validateFunc, - Templates: o.promptTemplates, - } - pass, err := prompt.Run() - if err != nil { - return nil, errors.Wrap(err, "error reading password") - } - return []byte(pass), nil -} - -// PromptPasswordGenerate creates and runs a promptui.Prompt with the given label. -// This prompt will mask the key entries with \r. If the result password length -// is 0, it will generate a new prompt with a generated password that can be -// edited. -func PromptPasswordGenerate(label string, opts ...Option) ([]byte, error) { - pass, err := PromptPassword(label, opts...) - if err != nil || len(pass) > 0 { - return pass, err - } - passString, err := randutil.ASCII(32) - if err != nil { - return nil, err - } - passString, err = Prompt("Password", WithDefaultValue(passString), WithAllowEdit(true), WithValidateNotEmpty()) - if err != nil { - return nil, err - } - return []byte(passString), nil -} - -// PromptYesNo creates and runs a promptui.Prompt with the given label, and -// returns true if the answer is y/yes and false if the answer is n/no. -func PromptYesNo(label string, opts ...Option) (bool, error) { - opts = append([]Option{WithValidateYesNo()}, opts...) - s, err := Prompt(label, opts...) - if err != nil { - return false, err - } - switch strings.ToLower(strings.TrimSpace(s)) { - case "y", "yes": - return true, nil - case "n", "no": - return false, nil - default: - return false, fmt.Errorf("%s is not a valid answer", s) - } -} - -// Select creates and runs a promptui.Select with the given label and items. -func Select(label string, items interface{}, opts ...Option) (int, string, error) { - o := &options{ - selectTemplates: SelectTemplates(label), - } - o.apply(opts) - - clean, err := prepareSelectTerminal() - if err != nil { - return 0, "", err - } - defer clean() - - prompt := &promptui.Select{ - Label: label, - Items: items, - Templates: o.selectTemplates, - } - n, s, err := prompt.Run() - if err != nil { - return 0, "", errors.Wrap(err, "error running prompt") - } - return n, s, nil -} - -func preparePromptTerminal() (func(), error) { - nothing := func() {} - if !readline.DefaultIsTerminal() { - tty, err := os.Open("/dev/tty") - if err != nil { - return nothing, errors.Wrap(err, "error allocating terminal") - } - clean := func() { - tty.Close() - } - - fd := int(tty.Fd()) - state, err := readline.MakeRaw(fd) - if err != nil { - defer clean() - return nothing, errors.Wrap(err, "error making raw terminal") - } - stdin := readline.Stdin - readline.Stdin = tty - clean = func() { - readline.Stdin = stdin - readline.Restore(fd, state) - tty.Close() - } - return clean, nil - } - - return nothing, nil -} - -func prepareSelectTerminal() (func(), error) { - nothing := func() {} - if !readline.DefaultIsTerminal() { - tty, err := os.Open("/dev/tty") - if err != nil { - return nothing, errors.Wrap(err, "error allocating terminal") - } - clean := func() { - tty.Close() - } - - fd := int(tty.Fd()) - state, err := readline.MakeRaw(fd) - if err != nil { - defer clean() - return nothing, errors.Wrap(err, "error making raw terminal") - } - stdin := os.Stdin - os.Stdin = tty - clean = func() { - os.Stdin = stdin - readline.Restore(fd, state) - tty.Close() - } - return clean, nil - } - - return nothing, nil -} diff --git a/vendor/github.com/smallstep/cli/ui/validators.go b/vendor/github.com/smallstep/cli/ui/validators.go deleted file mode 100644 index 5daa9cab..00000000 --- a/vendor/github.com/smallstep/cli/ui/validators.go +++ /dev/null @@ -1,69 +0,0 @@ -package ui - -import ( - "fmt" - "net" - "strings" - - "github.com/manifoldco/promptui" -) - -// NotEmpty is a validation function that checks that the prompted string is not -// empty. -func NotEmpty() promptui.ValidateFunc { - return func(s string) error { - if len(strings.TrimSpace(s)) == 0 { - return fmt.Errorf("value is empty") - } - return nil - } -} - -// Address is a validation function that checks that the prompted string is a -// valid TCP address. -func Address() promptui.ValidateFunc { - return func(s string) error { - if _, _, err := net.SplitHostPort(s); err != nil { - return fmt.Errorf("%s is not an TCP address", s) - } - return nil - } -} - -// IPAddress is validation function that checks that the prompted string is a -// valid IP address. -func IPAddress() promptui.ValidateFunc { - return func(s string) error { - if net.ParseIP(s) == nil { - return fmt.Errorf("%s is not an ip address", s) - } - return nil - } -} - -// DNS is a validation function that changes that the prompted string is a valid -// DNS name. -func DNS() promptui.ValidateFunc { - return func(s string) error { - if len(strings.TrimSpace(s)) == 0 { - return fmt.Errorf("value is empty") - } - if _, _, err := net.SplitHostPort(s + ":443"); err != nil { - return fmt.Errorf("%s is not a valid DNS name", s) - } - return nil - } -} - -// YesNo is a validation function that checks for a Yes/No answer. -func YesNo() promptui.ValidateFunc { - return func(s string) error { - s = strings.ToLower(strings.TrimSpace(s)) - switch s { - case "y", "yes", "n", "no": - return nil - default: - return fmt.Errorf("%s is not a valid answer", s) - } - } -} diff --git a/vendor/github.com/smallstep/cli/usage/css.go b/vendor/github.com/smallstep/cli/usage/css.go deleted file mode 100644 index e552534c..00000000 --- a/vendor/github.com/smallstep/cli/usage/css.go +++ /dev/null @@ -1,764 +0,0 @@ -package usage - -// CSS code replicating Github style. -// From https://github.com/sindresorhus/github-markdown-css -// MIT license -var css = `@font-face { - font-family: octicons-link; - src: url(data:font/woff;charset=utf-8;base64,d09GRgABAAAAAAZwABAAAAAACFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABEU0lHAAAGaAAAAAgAAAAIAAAAAUdTVUIAAAZcAAAACgAAAAoAAQAAT1MvMgAAAyQAAABJAAAAYFYEU3RjbWFwAAADcAAAAEUAAACAAJThvmN2dCAAAATkAAAABAAAAAQAAAAAZnBnbQAAA7gAAACyAAABCUM+8IhnYXNwAAAGTAAAABAAAAAQABoAI2dseWYAAAFsAAABPAAAAZwcEq9taGVhZAAAAsgAAAA0AAAANgh4a91oaGVhAAADCAAAABoAAAAkCA8DRGhtdHgAAAL8AAAADAAAAAwGAACfbG9jYQAAAsAAAAAIAAAACABiATBtYXhwAAACqAAAABgAAAAgAA8ASm5hbWUAAAToAAABQgAAAlXu73sOcG9zdAAABiwAAAAeAAAAME3QpOBwcmVwAAAEbAAAAHYAAAB/aFGpk3jaTY6xa8JAGMW/O62BDi0tJLYQincXEypYIiGJjSgHniQ6umTsUEyLm5BV6NDBP8Tpts6F0v+k/0an2i+itHDw3v2+9+DBKTzsJNnWJNTgHEy4BgG3EMI9DCEDOGEXzDADU5hBKMIgNPZqoD3SilVaXZCER3/I7AtxEJLtzzuZfI+VVkprxTlXShWKb3TBecG11rwoNlmmn1P2WYcJczl32etSpKnziC7lQyWe1smVPy/Lt7Kc+0vWY/gAgIIEqAN9we0pwKXreiMasxvabDQMM4riO+qxM2ogwDGOZTXxwxDiycQIcoYFBLj5K3EIaSctAq2kTYiw+ymhce7vwM9jSqO8JyVd5RH9gyTt2+J/yUmYlIR0s04n6+7Vm1ozezUeLEaUjhaDSuXHwVRgvLJn1tQ7xiuVv/ocTRF42mNgZGBgYGbwZOBiAAFGJBIMAAizAFoAAABiAGIAznjaY2BkYGAA4in8zwXi+W2+MjCzMIDApSwvXzC97Z4Ig8N/BxYGZgcgl52BCSQKAA3jCV8CAABfAAAAAAQAAEB42mNgZGBg4f3vACQZQABIMjKgAmYAKEgBXgAAeNpjYGY6wTiBgZWBg2kmUxoDA4MPhGZMYzBi1AHygVLYQUCaawqDA4PChxhmh/8ODDEsvAwHgMKMIDnGL0x7gJQCAwMAJd4MFwAAAHjaY2BgYGaA4DAGRgYQkAHyGMF8NgYrIM3JIAGVYYDT+AEjAwuDFpBmA9KMDEwMCh9i/v8H8sH0/4dQc1iAmAkALaUKLgAAAHjaTY9LDsIgEIbtgqHUPpDi3gPoBVyRTmTddOmqTXThEXqrob2gQ1FjwpDvfwCBdmdXC5AVKFu3e5MfNFJ29KTQT48Ob9/lqYwOGZxeUelN2U2R6+cArgtCJpauW7UQBqnFkUsjAY/kOU1cP+DAgvxwn1chZDwUbd6CFimGXwzwF6tPbFIcjEl+vvmM/byA48e6tWrKArm4ZJlCbdsrxksL1AwWn/yBSJKpYbq8AXaaTb8AAHja28jAwOC00ZrBeQNDQOWO//sdBBgYGRiYWYAEELEwMTE4uzo5Zzo5b2BxdnFOcALxNjA6b2ByTswC8jYwg0VlNuoCTWAMqNzMzsoK1rEhNqByEyerg5PMJlYuVueETKcd/89uBpnpvIEVomeHLoMsAAe1Id4AAAAAAAB42oWQT07CQBTGv0JBhagk7HQzKxca2sJCE1hDt4QF+9JOS0nbaaYDCQfwCJ7Au3AHj+LO13FMmm6cl7785vven0kBjHCBhfpYuNa5Ph1c0e2Xu3jEvWG7UdPDLZ4N92nOm+EBXuAbHmIMSRMs+4aUEd4Nd3CHD8NdvOLTsA2GL8M9PODbcL+hD7C1xoaHeLJSEao0FEW14ckxC+TU8TxvsY6X0eLPmRhry2WVioLpkrbp84LLQPGI7c6sOiUzpWIWS5GzlSgUzzLBSikOPFTOXqly7rqx0Z1Q5BAIoZBSFihQYQOOBEdkCOgXTOHA07HAGjGWiIjaPZNW13/+lm6S9FT7rLHFJ6fQbkATOG1j2OFMucKJJsxIVfQORl+9Jyda6Sl1dUYhSCm1dyClfoeDve4qMYdLEbfqHf3O/AdDumsjAAB42mNgYoAAZQYjBmyAGYQZmdhL8zLdDEydARfoAqIAAAABAAMABwAKABMAB///AA8AAQAAAAAAAAAAAAAAAAABAAAAAA==) format('woff'); -} - -.wrapper { - margin: 0 auto; - max-width: 700px; - padding: 20px 10px; -} - -.markdown-body { - -ms-text-size-adjust: 100%; - -webkit-text-size-adjust: 100%; - line-height: 1.5; - color: #24292e; - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; - font-size: 16px; - line-height: 1.5; - word-wrap: break-word; -} - -.markdown-body .pl-c { - color: #6a737d; -} - -.markdown-body .pl-c1, -.markdown-body .pl-s .pl-v { - color: #005cc5; -} - -.markdown-body .pl-e, -.markdown-body .pl-en { - color: #6f42c1; -} - -.markdown-body .pl-smi, -.markdown-body .pl-s .pl-s1 { - color: #24292e; -} - -.markdown-body .pl-ent { - color: #22863a; -} - -.markdown-body .pl-k { - color: #d73a49; -} - -.markdown-body .pl-s, -.markdown-body .pl-pds, -.markdown-body .pl-s .pl-pse .pl-s1, -.markdown-body .pl-sr, -.markdown-body .pl-sr .pl-cce, -.markdown-body .pl-sr .pl-sre, -.markdown-body .pl-sr .pl-sra { - color: #032f62; -} - -.markdown-body .pl-v, -.markdown-body .pl-smw { - color: #e36209; -} - -.markdown-body .pl-bu { - color: #b31d28; -} - -.markdown-body .pl-ii { - color: #fafbfc; - background-color: #b31d28; -} - -.markdown-body .pl-c2 { - color: #fafbfc; - background-color: #d73a49; -} - -.markdown-body .pl-c2::before { - content: "^M"; -} - -.markdown-body .pl-sr .pl-cce { - font-weight: bold; - color: #22863a; -} - -.markdown-body .pl-ml { - color: #735c0f; -} - -.markdown-body .pl-mh, -.markdown-body .pl-mh .pl-en, -.markdown-body .pl-ms { - font-weight: bold; - color: #005cc5; -} - -.markdown-body .pl-mi { - font-style: italic; - color: #24292e; -} - -.markdown-body .pl-mb { - font-weight: bold; - color: #24292e; -} - -.markdown-body .pl-md { - color: #b31d28; - background-color: #ffeef0; -} - -.markdown-body .pl-mi1 { - color: #22863a; - background-color: #f0fff4; -} - -.markdown-body .pl-mc { - color: #e36209; - background-color: #ffebda; -} - -.markdown-body .pl-mi2 { - color: #f6f8fa; - background-color: #005cc5; -} - -.markdown-body .pl-mdr { - font-weight: bold; - color: #6f42c1; -} - -.markdown-body .pl-ba { - color: #586069; -} - -.markdown-body .pl-sg { - color: #959da5; -} - -.markdown-body .pl-corl { - text-decoration: underline; - color: #032f62; -} - -.markdown-body .octicon { - display: inline-block; - vertical-align: text-top; - fill: currentColor; -} - -.markdown-body a { - background-color: transparent; -} - -.markdown-body a:active, -.markdown-body a:hover { - outline-width: 0; -} - -.markdown-body strong { - font-weight: inherit; -} - -.markdown-body strong { - font-weight: bolder; -} - -.markdown-body h1 { - font-size: 2em; - margin: 0.67em 0; -} - -.markdown-body img { - border-style: none; -} - -.markdown-body code, -.markdown-body kbd, -.markdown-body pre { - font-family: monospace, monospace; - font-size: 1em; -} - -.markdown-body hr { - box-sizing: content-box; - height: 0; - overflow: visible; -} - -.markdown-body input { - font: inherit; - margin: 0; -} - -.markdown-body input { - overflow: visible; -} - -.markdown-body [type="checkbox"] { - box-sizing: border-box; - padding: 0; -} - -.markdown-body * { - box-sizing: border-box; -} - -.markdown-body input { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} - -.markdown-body a { - color: #0366d6; - text-decoration: none; -} - -.markdown-body a:hover { - text-decoration: underline; -} - -.markdown-body strong { - font-weight: 600; -} - -.markdown-body hr { - height: 0; - margin: 15px 0; - overflow: hidden; - background: transparent; - border: 0; - border-bottom: 1px solid #dfe2e5; -} - -.markdown-body hr::before { - display: table; - content: ""; -} - -.markdown-body hr::after { - display: table; - clear: both; - content: ""; -} - -.markdown-body table { - border-spacing: 0; - border-collapse: collapse; -} - -.markdown-body td, -.markdown-body th { - padding: 0; -} - -.markdown-body h1, -.markdown-body h2, -.markdown-body h3, -.markdown-body h4, -.markdown-body h5, -.markdown-body h6 { - margin-top: 0; - margin-bottom: 0; -} - -.markdown-body h1 { - font-size: 32px; - font-weight: 600; -} - -.markdown-body h2 { - font-size: 24px; - font-weight: 600; -} - -.markdown-body h3 { - font-size: 20px; - font-weight: 600; -} - -.markdown-body h4 { - font-size: 16px; - font-weight: 600; -} - -.markdown-body h5 { - font-size: 14px; - font-weight: 600; -} - -.markdown-body h6 { - font-size: 12px; - font-weight: 600; -} - -.markdown-body p { - margin-top: 0; - margin-bottom: 10px; -} - -.markdown-body blockquote { - margin: 0; -} - -.markdown-body ul, -.markdown-body ol { - padding-left: 0; - margin-top: 0; - margin-bottom: 0; -} - -.markdown-body ol ol, -.markdown-body ul ol { - list-style-type: lower-roman; -} - -.markdown-body ul ul ol, -.markdown-body ul ol ol, -.markdown-body ol ul ol, -.markdown-body ol ol ol { - list-style-type: lower-alpha; -} - -.markdown-body dd { - margin-left: 0; -} - -.markdown-body code { - font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; - font-size: 12px; -} - -.markdown-body pre { - margin-top: 0; - margin-bottom: 0; - font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; - font-size: 12px; -} - -.markdown-body .octicon { - vertical-align: text-bottom; -} - -.markdown-body .pl-0 { - padding-left: 0 !important; -} - -.markdown-body .pl-1 { - padding-left: 4px !important; -} - -.markdown-body .pl-2 { - padding-left: 8px !important; -} - -.markdown-body .pl-3 { - padding-left: 16px !important; -} - -.markdown-body .pl-4 { - padding-left: 24px !important; -} - -.markdown-body .pl-5 { - padding-left: 32px !important; -} - -.markdown-body .pl-6 { - padding-left: 40px !important; -} - -.markdown-body::before { - display: table; - content: ""; -} - -.markdown-body::after { - display: table; - clear: both; - content: ""; -} - -.markdown-body>*:first-child { - margin-top: 0 !important; -} - -.markdown-body>*:last-child { - margin-bottom: 0 !important; -} - -.markdown-body a:not([href]) { - color: inherit; - text-decoration: none; -} - -.markdown-body .anchor { - float: left; - padding-right: 4px; - margin-left: -20px; - line-height: 1; -} - -.markdown-body .anchor:focus { - outline: none; -} - -.markdown-body p, -.markdown-body blockquote, -.markdown-body ul, -.markdown-body ol, -.markdown-body dl, -.markdown-body table, -.markdown-body pre { - margin-top: 0; - margin-bottom: 16px; -} - -.markdown-body hr { - height: 0.25em; - padding: 0; - margin: 24px 0; - background-color: #e1e4e8; - border: 0; -} - -.markdown-body blockquote { - padding: 0 1em; - color: #6a737d; - border-left: 0.25em solid #dfe2e5; -} - -.markdown-body blockquote>:first-child { - margin-top: 0; -} - -.markdown-body blockquote>:last-child { - margin-bottom: 0; -} - -.markdown-body kbd { - display: inline-block; - padding: 3px 5px; - font-size: 11px; - line-height: 10px; - color: #444d56; - vertical-align: middle; - background-color: #fafbfc; - border: solid 1px #c6cbd1; - border-bottom-color: #959da5; - border-radius: 3px; - box-shadow: inset 0 -1px 0 #959da5; -} - -.markdown-body h1, -.markdown-body h2, -.markdown-body h3, -.markdown-body h4, -.markdown-body h5, -.markdown-body h6 { - margin-top: 24px; - margin-bottom: 16px; - font-weight: 600; - line-height: 1.25; -} - -.markdown-body h1 .octicon-link, -.markdown-body h2 .octicon-link, -.markdown-body h3 .octicon-link, -.markdown-body h4 .octicon-link, -.markdown-body h5 .octicon-link, -.markdown-body h6 .octicon-link { - color: #1b1f23; - vertical-align: middle; - visibility: hidden; -} - -.markdown-body h1:hover .anchor, -.markdown-body h2:hover .anchor, -.markdown-body h3:hover .anchor, -.markdown-body h4:hover .anchor, -.markdown-body h5:hover .anchor, -.markdown-body h6:hover .anchor { - text-decoration: none; -} - -.markdown-body h1:hover .anchor .octicon-link, -.markdown-body h2:hover .anchor .octicon-link, -.markdown-body h3:hover .anchor .octicon-link, -.markdown-body h4:hover .anchor .octicon-link, -.markdown-body h5:hover .anchor .octicon-link, -.markdown-body h6:hover .anchor .octicon-link { - visibility: visible; -} - -.markdown-body h1 { - padding-bottom: 0.3em; - font-size: 2em; - border-bottom: 1px solid #eaecef; -} - -.markdown-body h2 { - padding-bottom: 0.3em; - font-size: 1.5em; - border-bottom: 1px solid #eaecef; -} - -.markdown-body h3 { - font-size: 1.25em; -} - -.markdown-body h4 { - font-size: 1em; -} - -.markdown-body h5 { - font-size: 0.875em; -} - -.markdown-body h6 { - font-size: 0.85em; - color: #6a737d; -} - -.markdown-body ul, -.markdown-body ol { - padding-left: 2em; -} - -.markdown-body ul ul, -.markdown-body ul ol, -.markdown-body ol ol, -.markdown-body ol ul { - margin-top: 0; - margin-bottom: 0; -} - -.markdown-body li { - word-wrap: break-all; -} - -.markdown-body li>p { - margin-top: 16px; -} - -.markdown-body li+li { - margin-top: 0.25em; -} - -.markdown-body dl { - padding: 0; -} - -.markdown-body dl dt { - padding: 0; - margin-top: 16px; - font-size: 1em; - font-style: italic; - font-weight: 600; -} - -.markdown-body dl dd { - padding: 0 16px; - margin-bottom: 16px; -} - -.markdown-body table { - display: block; - width: 100%; - overflow: auto; -} - -.markdown-body table th { - font-weight: 600; -} - -.markdown-body table th, -.markdown-body table td { - padding: 6px 13px; - border: 1px solid #dfe2e5; -} - -.markdown-body table tr { - background-color: #fff; - border-top: 1px solid #c6cbd1; -} - -.markdown-body table tr:nth-child(2n) { - background-color: #f6f8fa; -} - -.markdown-body img { - max-width: 100%; - box-sizing: content-box; - background-color: #fff; -} - -.markdown-body img[align=right] { - padding-left: 20px; -} - -.markdown-body img[align=left] { - padding-right: 20px; -} - -.markdown-body code { - padding: 0.2em 0.4em; - margin: 0; - font-size: 85%; - background-color: rgba(27,31,35,0.05); - border-radius: 3px; -} - -.markdown-body pre { - word-wrap: normal; -} - -.markdown-body pre>code { - padding: 0; - margin: 0; - font-size: 100%; - word-break: normal; - white-space: pre; - background: transparent; - border: 0; -} - -.markdown-body .highlight { - margin-bottom: 16px; -} - -.markdown-body .highlight pre { - margin-bottom: 0; - word-break: normal; -} - -.markdown-body .highlight pre, -.markdown-body pre { - padding: 16px; - overflow: auto; - font-size: 85%; - line-height: 1.45; - background-color: #f6f8fa; - border-radius: 3px; -} - -.markdown-body pre code { - display: inline; - max-width: auto; - padding: 0; - margin: 0; - overflow: visible; - line-height: inherit; - word-wrap: normal; - background-color: transparent; - border: 0; -} - -.markdown-body .full-commit .btn-outline:not(:disabled):hover { - color: #005cc5; - border-color: #005cc5; -} - -.markdown-body kbd { - display: inline-block; - padding: 3px 5px; - font: 11px "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; - line-height: 10px; - color: #444d56; - vertical-align: middle; - background-color: #fafbfc; - border: solid 1px #d1d5da; - border-bottom-color: #c6cbd1; - border-radius: 3px; - box-shadow: inset 0 -1px 0 #c6cbd1; -} - -.markdown-body :checked+.radio-label { - position: relative; - z-index: 1; - border-color: #0366d6; -} - -.markdown-body .task-list-item { - list-style-type: none; -} - -.markdown-body .task-list-item+.task-list-item { - margin-top: 3px; -} - -.markdown-body .task-list-item input { - margin: 0 0.2em 0.25em -1.6em; - vertical-align: middle; -} - -.markdown-body hr { - border-bottom-color: #eee; -} - -.command { - font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, - monospace; - font-size: 16px; - padding-left: 40px; - -} - -.command h1 { - border: none; - margin-left: -40px; -} - -.command h2 { - border: none; - margin-top: 2em; - margin-left: -40px; - font-size: 18px; -} - -.command>ul { - padding-left: 0; -} - -.command ul { - list-style-type: none; -} - -.command table { - margin: 2em 0 1em; - display: block; - width: 100%; - overflow: auto; - border-collapse: collapse; -} - -.command table th { - font-weight: 600; -} - -.command table th, -.command table td { - padding: 6px 13px; - border: 1px solid #dfe2e5; -} - -.command table tr { - background-color: #fff; - border-top: 1px solid #c6cbd1; -} - -.command table tr:nth-child(2n) { - background-color: #f6f8fa; -} - - -` diff --git a/vendor/github.com/smallstep/cli/usage/help.go b/vendor/github.com/smallstep/cli/usage/help.go deleted file mode 100644 index 86962105..00000000 --- a/vendor/github.com/smallstep/cli/usage/help.go +++ /dev/null @@ -1,193 +0,0 @@ -package usage - -import ( - "fmt" - "strings" - - "github.com/urfave/cli" -) - -// HelpCommandAction is the action function of the overwritten help command. -var HelpCommandAction = cli.ActionFunc(helpAction) - -// HelpCommand overwrites default urfvafe/cli help command to support one or -// multiple subcommands like: -// step help -// step help crypto -// step help crypto jwt -// step help crypto jwt sign -// ... -func HelpCommand() cli.Command { - return cli.Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "display help for the specified command or command group", - UsageText: "**step help** ", - Description: `**step help** command displays help for a command or command group. - -## EXAMPLES - -Display help for **step ca certificate**: -''' -$ step help ca certificate -''' - -Display help for **step ssh**: -''' -$ step help ssh -'''`, - Action: HelpCommandAction, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "http", - Usage: "HTTP service address (e.g., ':8080')", - }, - cli.StringFlag{ - Name: "html", - Usage: "The export for HTML docs.", - }, - cli.StringFlag{ - Name: "markdown", - Usage: "The export for Markdown docs.", - }, - cli.BoolFlag{ - Name: "report", - Usage: "Writes a JSON report to the HTML docs directory.", - }, - cli.BoolFlag{ - Name: "hugo", - Usage: "Writes hugo (vs jekyll) compatible markdown files", - }, - }, - } -} - -func helpAction(ctx *cli.Context) error { - // use html version - if ctx.IsSet("http") { - return httpHelpAction(ctx) - } - - if ctx.IsSet("html") { - return htmlHelpAction(ctx) - } - - if ctx.IsSet("markdown") { - return markdownHelpAction(ctx) - } - - args := ctx.Args() - if args.Present() { - last := len(args) - 1 - lastName := args[last] - subcmd := ctx.App.Commands - parent := createParentCommand(ctx) - - for _, name := range args[:last] { - for _, cmd := range subcmd { - if cmd.HasName(name) { - parent = cmd - subcmd = cmd.Subcommands - break - } - } - } - - for _, cmd := range subcmd { - if cmd.HasName(lastName) { - cmd.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args, " ")) - parent.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args[:last], " ")) - - ctx.Command = cmd - if len(cmd.Subcommands) == 0 { - ctx.App = createCliApp(ctx, parent) - return cli.ShowCommandHelp(ctx, lastName) - } - - ctx.App = createCliApp(ctx, cmd) - return cli.ShowCommandHelp(ctx, "") - } - } - - return cli.NewExitError(fmt.Sprintf("No help topic for '%s %s'", ctx.App.Name, strings.Join(args, " ")), 3) - } - - cli.ShowAppHelp(ctx) - return nil -} - -// createParentCommand returns a command representation of the app. -func createParentCommand(ctx *cli.Context) cli.Command { - return cli.Command{ - Name: ctx.App.Name, - HelpName: ctx.App.HelpName, - Usage: ctx.App.Usage, - UsageText: ctx.App.UsageText, - ArgsUsage: ctx.App.ArgsUsage, - Description: ctx.App.Description, - Subcommands: ctx.App.Commands, - Flags: ctx.App.Flags, - } -} - -// createCliApp is re-implementation of urfave/cli method (in command.go): -// -// func (c Command) startApp(ctx *Context) error -// -// It lets us show the subcommands when help is executed like: -// -// step help foo -// step help foo bar -// ... -func createCliApp(ctx *cli.Context, cmd cli.Command) *cli.App { - app := cli.NewApp() - app.Metadata = ctx.App.Metadata - - // set the name and usage - app.Name = cmd.HelpName - app.HelpName = cmd.HelpName - - app.Usage = cmd.Usage - app.UsageText = cmd.UsageText - app.Description = cmd.Description - app.ArgsUsage = cmd.ArgsUsage - - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - app.CustomAppHelpTemplate = cmd.CustomHelpTemplate - - // set the flags and commands - app.Commands = cmd.Subcommands - app.Flags = cmd.Flags - - app.Version = ctx.App.Version - app.Compiled = ctx.App.Compiled - app.Author = ctx.App.Author - app.Email = ctx.App.Email - app.Writer = ctx.App.Writer - app.ErrWriter = ctx.App.ErrWriter - - // Do not show help or version on subcommands - app.HideHelp = true - app.HideVersion = true - - // bash completion - app.EnableBashCompletion = ctx.App.EnableBashCompletion - if cmd.BashComplete != nil { - app.BashComplete = cmd.BashComplete - } - - // set the actions - app.Before = cmd.Before - app.After = cmd.After - - if cmd.Action != nil { - app.Action = cmd.Action - } else { - app.Action = helpAction - } - app.OnUsageError = cmd.OnUsageError - - app.Setup() - return app -} diff --git a/vendor/github.com/smallstep/cli/usage/html.go b/vendor/github.com/smallstep/cli/usage/html.go deleted file mode 100644 index 38875584..00000000 --- a/vendor/github.com/smallstep/cli/usage/html.go +++ /dev/null @@ -1,352 +0,0 @@ -package usage - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "path" - "strings" - - "github.com/smallstep/cli/errs" - "github.com/urfave/cli" -) - -func httpHelpAction(ctx *cli.Context) error { - addr := ctx.String("http") - if addr == "" { - return errs.RequiredFlag(ctx, "http") - } - - fmt.Printf("Serving HTTP on %s ...\n", addr) - return http.ListenAndServe(addr, &htmlHelpHandler{ - cliApp: ctx.App, - }) -} - -func markdownHelpAction(ctx *cli.Context) error { - dir := path.Clean(ctx.String("markdown")) - if err := os.MkdirAll(dir, 0755); err != nil { - return errs.FileError(err, dir) - } - - isHugo := ctx.Bool("hugo") - - // app index - index := path.Join(dir, "index.md") - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - markdownHelpPrinter(w, mdAppHelpTemplate, "", ctx.App) - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - // Subcommands - for _, cmd := range ctx.App.Commands { - if err := markdownHelpCommand(ctx.App, cmd, cmd, path.Join(dir, cmd.Name), isHugo); err != nil { - return err - } - } - return nil -} - -func markdownHelpCommand(app *cli.App, cmd cli.Command, parent cli.Command, base string, isHugo bool) error { - if err := os.MkdirAll(base, 0755); err != nil { - return errs.FileError(err, base) - } - - fileName := "index.md" - // preserve jekyll compatibility for transition period - if isHugo && len(cmd.Subcommands) > 0 { - fileName = "_index.md" - } - - index := path.Join(base, fileName) - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - - parentName := parent.HelpName - if cmd.HelpName == parent.HelpName { - parentName = "step" - } - - if len(cmd.Subcommands) == 0 { - markdownHelpPrinter(w, mdCommandHelpTemplate, parentName, cmd) - return errs.FileError(w.Close(), index) - } - - ctx := cli.NewContext(app, nil, nil) - ctx.App = createCliApp(ctx, cmd) - markdownHelpPrinter(w, mdSubcommandHelpTemplate, parentName, ctx.App) - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - for _, sub := range cmd.Subcommands { - sub.HelpName = fmt.Sprintf("%s %s", cmd.HelpName, sub.Name) - if err := markdownHelpCommand(app, sub, cmd, path.Join(base, sub.Name), isHugo); err != nil { - return err - } - } - - return nil -} - -func htmlHelpAction(ctx *cli.Context) error { - dir := path.Clean(ctx.String("html")) - - if err := os.MkdirAll(dir, 0755); err != nil { - return errs.FileError(err, dir) - } - - // app index - index := path.Join(dir, "index.html") - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - - tophelp := htmlHelpPrinter(w, mdAppHelpTemplate, ctx.App) - var report *Report - if ctx.IsSet("report") { - report = NewReport(ctx.App.Name, tophelp) - } - - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - // css style - cssFile := path.Join(dir, "style.css") - if err := ioutil.WriteFile(cssFile, []byte(css), 0666); err != nil { - return errs.FileError(err, cssFile) - } - - // Subcommands - for _, cmd := range ctx.App.Commands { - if err := htmlHelpCommand(ctx.App, cmd, path.Join(dir, cmd.Name), report); err != nil { - return err - } - } - - // report - if report != nil { - repjson := path.Join(dir, "report.json") - rjw, err := os.Create(repjson) - if err != nil { - return errs.FileError(err, repjson) - } - - if err := report.Write(rjw); err != nil { - return err - } - - if err := rjw.Close(); err != nil { - return errs.FileError(err, repjson) - } - } - - return nil -} - -func htmlHelpCommand(app *cli.App, cmd cli.Command, base string, report *Report) error { - if err := os.MkdirAll(base, 0755); err != nil { - return errs.FileError(err, base) - } - - index := path.Join(base, "index.html") - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - - if len(cmd.Subcommands) == 0 { - cmdhelp := htmlHelpPrinter(w, mdCommandHelpTemplate, cmd) - - if report != nil { - report.Process(cmd.HelpName, cmdhelp) - } - - return errs.FileError(w.Close(), index) - } - - ctx := cli.NewContext(app, nil, nil) - ctx.App = createCliApp(ctx, cmd) - subhelp := htmlHelpPrinter(w, mdSubcommandHelpTemplate, ctx.App) - - if report != nil { - report.Process(cmd.HelpName, subhelp) - } - - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - for _, sub := range cmd.Subcommands { - sub.HelpName = fmt.Sprintf("%s %s", cmd.HelpName, sub.Name) - if err := htmlHelpCommand(app, sub, path.Join(base, sub.Name), report); err != nil { - return err - } - } - - return nil -} - -type htmlHelpHandler struct { - cliApp *cli.App -} - -func (h *htmlHelpHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - ctx := cli.NewContext(h.cliApp, nil, nil) - - // clean request URI - requestURI := path.Clean(req.RequestURI) - if requestURI == "/" { - htmlHelpPrinter(w, mdAppHelpTemplate, ctx.App) - return - } - - if requestURI == "/style.css" { - w.Header().Set("Content-Type", `text/css; charset="utf-8"`) - w.Write([]byte(css)) - return - } - - args := strings.Split(requestURI, "/") - last := len(args) - 1 - lastName := args[last] - subcmd := ctx.App.Commands - parent := createParentCommand(ctx) - for _, name := range args[:last] { - for _, cmd := range subcmd { - if cmd.HasName(name) { - parent = cmd - subcmd = cmd.Subcommands - break - } - } - } - - for _, cmd := range subcmd { - if cmd.HasName(lastName) { - cmd.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args, " ")) - parent.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args[:last], " ")) - - ctx.Command = cmd - if len(cmd.Subcommands) == 0 { - htmlHelpPrinter(w, mdCommandHelpTemplate, cmd) - return - } - - ctx.App = createCliApp(ctx, cmd) - htmlHelpPrinter(w, mdSubcommandHelpTemplate, ctx.App) - return - } - } - - http.NotFound(w, req) -} - -// AppHelpTemplate contains the modified template for the main app -var mdAppHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -'''raw -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .Commands}} {{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments]{{end}}{{end}} -''' -{{- if .Description}} - -## DESCRIPTION -{{.Description}}{{end}}{{if .VisibleCommands}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -| Name | Usage | -|---|---|{{range .VisibleCommands}} -| **[{{join .Names ", "}}]({{.Name}}/)** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}}{{end}} - -## OPTIONS - -{{range $index, $option := .VisibleFlags}}{{if $index}} -{{end}}{{$option}} -{{end}}{{end}}{{if .Copyright}}{{if len .Authors}} - -## AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: - -{{range $index, $author := .Authors}}{{if $index}} -{{end}}{{$author}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} - -## VERSION - -{{.Version}}{{end}}{{end}} - -## COPYRIGHT - -{{.Copyright}} -{{end}} -` - -// SubcommandHelpTemplate contains the modified template for a sub command -// Note that the weird "|||\n|---|---|" syntax sets up a markdown table with empty headers. -var mdSubcommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -'''raw -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}** {{if .VisibleFlags}} [options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments]{{end}}{{end}} -''' -{{- if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -| Name | Usage | -|---|---|{{range .VisibleCommands}} -| **[{{join .Names ", "}}]({{.Name}}/)** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` - -// CommandHelpTemplate contains the modified template for a command -var mdCommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -'''raw -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .VisibleFlags}} [options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments]{{end}}{{end}} -''' -{{- if .Category}} - -## CATEGORY - -{{.Category}}{{end}}{{if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` diff --git a/vendor/github.com/smallstep/cli/usage/printer.go b/vendor/github.com/smallstep/cli/usage/printer.go deleted file mode 100644 index 52399610..00000000 --- a/vendor/github.com/smallstep/cli/usage/printer.go +++ /dev/null @@ -1,233 +0,0 @@ -package usage - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" - "text/template" - "unicode" - - md "github.com/smallstep/cli/pkg/blackfriday" - "github.com/urfave/cli" -) - -var sectionRe = regexp.MustCompile(`(?m:^##)`) -var sectionNameRe = regexp.MustCompile(`(?m:^## [^\n]+)`) -var indentRe = regexp.MustCompile(`(?m:^:[^\n]+)`) -var definitionListRe = regexp.MustCompile(`(?m:^[\t ]+\*\*[^\*]+\*\*[^\n]*\s+:[^\n]+)`) - -//var sectionRe = regexp.MustCompile(`^## [^\n]*$`) - -type frontmatterData struct { - Data interface{} - Parent string - Children []string -} - -// HelpPrinter overwrites cli.HelpPrinter and prints the formatted help to the terminal. -func HelpPrinter(w io.Writer, templ string, data interface{}) { - b := helpPreprocessor(w, templ, data, false) - w.Write(Render(b)) -} - -func htmlHelpPrinter(w io.Writer, templ string, data interface{}) []byte { - b := helpPreprocessor(w, templ, data, true) - w.Write([]byte(`step command line documentation`)) - w.Write([]byte(``)) - w.Write([]byte(`

    `)) - html := md.Run(b) - w.Write(html) - w.Write([]byte(`
    `)) - - return html -} - -func markdownHelpPrinter(w io.Writer, templ string, parent string, data interface{}) { - b := helpPreprocessor(w, templ, data, true) - - frontmatter := frontmatterData{ - Data: data, - Parent: parent, - } - - if app, ok := data.(*cli.App); ok { - for _, cmd := range app.Commands { - frontmatter.Children = append(frontmatter.Children, cmd.Name) - } - } - - var frontMatterTemplate = `--- -layout: auto-doc -title: {{.Data.HelpName}} -menu: - docs: -{{- if .Parent}} - parent: {{.Parent}} -{{- end }} -{{- if .Children }} - children: -{{- range .Children }} - - {{.}} -{{- end }} -{{- end }} ---- - -` - t, err := template.New("frontmatter").Parse(frontMatterTemplate) - if err != nil { - panic(err) - } - err = t.Execute(w, frontmatter) - if err != nil { - panic(err) - } - w.Write(b) -} - -func helpPreprocessor(w io.Writer, templ string, data interface{}, applyRx bool) []byte { - buf := new(bytes.Buffer) - cli.HelpPrinterCustom(buf, templ, data, nil) - //w.Write(buf.Bytes()) - // s := string(markdownify(buf.Bytes())) - s := markdownify(buf) - // Move the OPTIONS section to the right place. urfave puts them at the end - // of the file, we want them to be after POSITIONAL ARGUMENTS, DESCRIPTION, - // USAGE, or NAME (in that order, depending on which sections exist). - optLoc := strings.Index(s, "## OPTIONS") - if optLoc != -1 { - optEnd := findSectionEnd("OPTIONS", s) - if optEnd != -1 { - options := s[optLoc:optEnd] - s = s[:optLoc] + s[optEnd:] - if newLoc := findSectionEnd("POSITIONAL ARGUMENTS", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else if newLoc := findSectionEnd("DESCRIPTION", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else if newLoc := findSectionEnd("USAGE", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else if newLoc := findSectionEnd("NAME", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else { - // Keep it at the end I guess :/. - s = s + options - } - } - } - - if applyRx { - // Keep capitalized only the first letter in arguments names. - s = sectionNameRe.ReplaceAllStringFunc(s, func(s string) string { - return s[0:4] + strings.ToLower(s[4:]) - }) - // Remove `:` at the start of a line. - s = indentRe.ReplaceAllStringFunc(s, func(s string) string { - return strings.TrimSpace(s[1:]) - }) - // Convert lines like: - // **Foo** - // : Bar zar ... - // To: - // - **Foo**: Bar zar ... - s = definitionListRe.ReplaceAllStringFunc(s, func(s string) string { - i := strings.Index(s, "\n") - j := strings.Index(s, ":") - return "- " + strings.TrimSpace(s[:i]) + ": " + strings.TrimSpace(s[j+1:]) - }) - } - - return []byte(s) -} - -func findSectionEnd(h, s string) int { - start := strings.Index(s, fmt.Sprintf("## %s", h)) - if start == -1 { - return start - } - nextSection := sectionRe.FindStringIndex(s[start+2:]) - if nextSection == nil { - return len(s) - } - return start + 2 + nextSection[0] -} - -// Convert some stuff that we can't easily write in help files because -// backticks and raw strings don't mix: -// - "" to "`foo`" -// - "'''" to "```" -func markdownify(r *bytes.Buffer) string { - const escapeByte = byte('\\') - var last byte - var inCode bool - - w := new(bytes.Buffer) - for { - b, err := r.ReadByte() - if err != nil { - return w.String() - } - loop: - switch b { - case '<': - if last != escapeByte && !inCode { - w.WriteByte('`') - } else { - w.WriteByte(b) - } - case '>': - if last != escapeByte && !inCode { - w.WriteByte('`') - } else { - w.WriteByte(b) - } - case '\'': - b1, _ := r.ReadByte() - b2, _ := r.ReadByte() - if b1 == b && b2 == b { - w.WriteString("```") - if !inCode { - if n, _, err := r.ReadRune(); err == nil { - if unicode.IsSpace(n) { - w.WriteString("shell") - } - r.UnreadRune() - } - } - inCode = !inCode - } else { - // We can only unread the last one (b2) - w.WriteByte(b) - r.UnreadByte() - b = b1 - last = b - goto loop - } - case '*': - if inCode { - if b1, _ := r.ReadByte(); b1 != '*' { - w.WriteByte(b) - w.UnreadByte() - } - } else { - w.WriteByte(b) - } - case escapeByte: - if last == escapeByte { - w.WriteByte(escapeByte) - b = 0 - } else { - if n, _, err := r.ReadRune(); err == nil { - if unicode.IsSpace(n) { - w.WriteByte(escapeByte) - } - r.UnreadRune() - } - } - case 0: // probably because io.EOF - default: - w.WriteByte(b) - } - last = b - } -} diff --git a/vendor/github.com/smallstep/cli/usage/renderer.go b/vendor/github.com/smallstep/cli/usage/renderer.go deleted file mode 100644 index cfbc682f..00000000 --- a/vendor/github.com/smallstep/cli/usage/renderer.go +++ /dev/null @@ -1,371 +0,0 @@ -package usage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strings" - "text/tabwriter" - "unicode" - - "github.com/samfoo/ansi" - md "github.com/smallstep/cli/pkg/blackfriday" -) - -// Render renders the given data with a custom markdown renderer. -func Render(b []byte) []byte { - return md.Run(b, md.WithRenderer(&Renderer{6, 0, nil, nil, false})) -} - -var colorEscapeRe = regexp.MustCompile(`\033\[\d*(;\d*)?m?\]?`) -var maxLineLength = 80 - -func stripColors(b []byte) []byte { - return colorEscapeRe.ReplaceAll(b, []byte("")) -} - -type item struct { - flags md.ListType - term []byte - definitions [][]byte -} - -type list struct { - items []item - flags md.ListType - parent *list -} - -/* TODO: commented because unused -func (l *list) isUnordered() bool { - return !l.isOrdered() && !l.isDefinition() -} - -func (l *list) isOrdered() bool { - return l.flags&md.ListTypeOrdered != 0 -} - -func (l *list) containsBlock() bool { - // TODO: Not sure if we have to check every item or if it gets - // automatically set on the list? - return l.flags&md.ListItemContainsBlock != 0 -} -*/ - -func (l *list) isDefinition() bool { - return l.flags&md.ListTypeDefinition != 0 -} - -type bufqueue struct { - w io.Writer - buf *bytes.Buffer - next *bufqueue - mode RenderMode -} - -// RenderMode enumerates different line breaks modes. -type RenderMode int - -const ( - // RenderModeKeepBreaks will keep the line breaks in the docs. - RenderModeKeepBreaks RenderMode = iota - // RenderModeBreakLines will automatically wrap the lines. - RenderModeBreakLines -) - -// Renderer implements a custom markdown renderer for blackfriday. -type Renderer struct { - depth int - listdepth int - list *list - out *bufqueue - inpara bool -} - -func (r *Renderer) write(b []byte) { - r.out.w.Write(b) -} - -func (r *Renderer) printf(s string, a ...interface{}) { - fmt.Fprintf(r.out.w, s, a...) -} - -func (r *Renderer) capture(mode RenderMode) { - buf := new(bytes.Buffer) - r.out = &bufqueue{buf, buf, r.out, mode} -} - -func (r *Renderer) finishCapture() *bytes.Buffer { - buf := r.out.buf - r.out = r.out.next - return buf -} - -func (r *Renderer) inParagraph() bool { - return r.inpara -} - -/* TODO: commented because unused -func (r *Renderer) inList() bool { - return r.list != nil -} -*/ - -func (r *Renderer) renderParagraphKeepBreaks(buf *bytes.Buffer) { - scanner := bufio.NewScanner(buf) - for scanner.Scan() { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", scanner.Text()) - } -} - -func (r *Renderer) renderParagraphBreakLines(buf *bytes.Buffer, maxlen int) { - maxlen = maxlen - r.depth - scanner := bufio.NewScanner(buf) - scanner.Split(bufio.ScanWords) - line := []string{} - length := 0 - for scanner.Scan() { - word := scanner.Text() - wordLength := len(stripColors([]byte(word))) - // Print the line if we've got a collection of words over 80 characters, or if - // we have a single word that is over 80 characters on an otherwise empty line. - if length+wordLength > maxlen { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", strings.Join(line, " ")) - line = []string{word} - length = wordLength - } else if length == 0 && wordLength > maxlen { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", word) - } else { - line = append(line, word) - length += wordLength + 1 // Plus one for space - } - } - if len(line) > 0 { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", strings.Join(line, " ")) - } -} - -func (r *Renderer) renderParagraph(buf *bytes.Buffer) { - switch r.out.mode { - case RenderModeKeepBreaks: - r.renderParagraphKeepBreaks(buf) - case RenderModeBreakLines: - r.renderParagraphBreakLines(buf, maxLineLength) - } -} - -// RenderNode implements blackfriday.Renderer interface. -func (r *Renderer) RenderNode(w io.Writer, node *md.Node, entering bool) md.WalkStatus { - if r.out == nil { - r.out = &bufqueue{w, nil, nil, RenderModeBreakLines} - } - - switch node.Type { - case md.Paragraph: - // Alternative idea here: call r.RenderNode() with our new buffer as - // `w`. In the `else` condition here render to the outter buffer and - // always return md.Terminate. So when we enter a paragraph we start - // parsing with a new output buffer and capture the output. - if entering { - if r.inParagraph() { - panic("already in paragraph") - } - r.inpara = true - //r.printf(out, "[paragraph:") - r.capture(r.out.mode) - } else { - r.renderParagraph(r.finishCapture()) - // Write a newline unless the parent node is a definition list term. - if node.Parent.Type != md.Item || node.Parent.ListFlags&md.ListTypeTerm == 0 { - r.printf("\n") - } - r.inpara = false - //r.printf(w, ":paragraph]") - } - case md.Text: - // TODO: is this necessary? I think all text is in a paragraph. - if r.inParagraph() { - r.write(node.Literal) - } else { - s := strings.Replace(string(node.Literal), "\n", "\n"+strings.Repeat(" ", r.depth), -1) - r.printf(s) - } - case md.Heading: - if entering { - r.printf(ansi.ColorCode("default+bh")) - } else { - r.printf(ansi.Reset) - r.printf("\n") - } - case md.Link: - if entering { - r.printf(ansi.ColorCode("default+b")) - //r.printf("\033[2m") // Dim - } else { - r.printf(ansi.Reset) - } - case md.Strong: - if entering { - r.printf(ansi.ColorCode("default+bh")) - } else { - r.printf(ansi.Reset) - } - case md.Emph: - if entering { - r.printf(ansi.ColorCode("default+u")) - } else { - r.printf(ansi.Reset) - } - case md.Code: - r.printf(ansi.ColorCode("default+u")) - r.write(node.Literal) - r.printf(ansi.Reset) - case md.List: - if entering { - r.listdepth++ - r.list = &list{[]item{}, node.ListFlags, r.list} - //r.printf("[list (type %s:", node.ListData.ListFlags) - } else { - if r.listdepth > 1 && r.list.isDefinition() { - w := new(tabwriter.Writer) - w.Init(r.out.w, 0, 8, 4, ' ', tabwriter.StripEscape) - for _, item := range r.list.items { - fmt.Fprint(w, strings.TrimRight(string(item.term), " \n")) - fmt.Fprint(w, "\n") - for _, def := range item.definitions { - fmt.Fprint(w, strings.TrimRight(string(def), " \n")) - } - fmt.Fprintf(w, "\n\n") - } - w.Flush() - } else { - ordered := (node.ListFlags&md.ListTypeOrdered != 0) - unordered := (node.ListFlags&md.ListTypeOrdered == 0 && node.ListFlags&md.ListTypeDefinition == 0) - for i, item := range r.list.items { - if ordered || unordered { - p := bytes.IndexFunc(item.term, func(r rune) bool { return !unicode.IsSpace(r) }) - switch { - case ordered: // add numbers on ordered lists - item.term = append(item.term[:p], append([]byte(fmt.Sprintf("%d. ", i+1)), item.term[p:]...)...) - case unordered: // add bullet points on unordered lists - item.term = append(item.term[:p], append([]byte("• "), item.term[p:]...)...) - } - } - - r.write(item.term) - for _, def := range item.definitions { - r.write(def) - } - } - } - r.listdepth-- - r.list = r.list.parent - //r.printf(":list]") - } - case md.Item: - incdepth := 4 - //ltype := "normal" - if node.ListFlags&md.ListTypeTerm != 0 { - // Nested definition list terms get indented two spaces. Non-nested - // definition list terms are not indented. - if r.listdepth > 1 { - incdepth = 2 - } else { - incdepth = 0 - } - //ltype = "dt" - } else if node.ListFlags&md.ListTypeDefinition != 0 { - incdepth = 4 - //ltype = "dd" - } - - if entering { - //fmt.Fprintf(out, "[list item %s:", ltype) - r.depth += incdepth - if r.listdepth > 1 && r.list.isDefinition() { - r.capture(RenderModeKeepBreaks) - } else { - r.capture(RenderModeBreakLines) - } - if !r.list.isDefinition() || node.ListFlags&md.ListTypeTerm != 0 { - r.list.items = append(r.list.items, item{node.ListFlags, nil, nil}) - } - } else { - //fmt.Fprintf(out, ":list item]") - r.depth -= incdepth - buf := r.finishCapture() - if r.list.isDefinition() && node.ListFlags&md.ListTypeTerm == 0 { - i := len(r.list.items) - 1 - r.list.items[i].definitions = append(r.list.items[i].definitions, buf.Bytes()) - } else { - r.list.items[len(r.list.items)-1].term = buf.Bytes() - } - } - case md.Table: - if entering { - r.capture(RenderModeKeepBreaks) - w := new(tabwriter.Writer) - w.Init(r.out.w, 1, 8, 2, ' ', tabwriter.StripEscape) - r.out.w = w - } else { - r.out.w.(*tabwriter.Writer).Flush() - buf := r.finishCapture() - r.renderParagraphKeepBreaks(buf) - r.printf("\n") - } - case md.TableBody: - // Do nothing. - case md.TableHead: - if entering { - r.capture(r.out.mode) - } else { - // Markdown doens't have a way to create a table without headers. - // We've opted to fix that here by not rendering headers at all if - // they're empty. - result := r.finishCapture().Bytes() - if strings.TrimSpace(string(stripColors(result))) != "" { - parts := strings.Split(strings.TrimRight(string(result), "\t\n"), "\t") - for i := 0; i < len(parts); i++ { - parts[i] = "\xff" + ansi.ColorCode("default+bh") + "\xff" + parts[i] + "\xff" + ansi.Reset + "\xff" - } - r.printf(strings.Join(parts, "\t") + "\t\n") - } - } - case md.TableRow: - if entering { - r.capture(r.out.mode) - } else { - // Escape any colors in the row before writing to the - // tabwriter, otherwise they screw up the width calculations. The - // escape character for tabwriter is \xff. - result := r.finishCapture().Bytes() - result = colorEscapeRe.ReplaceAll(result, []byte("\xff$0\xff")) - r.write(result) - r.printf("\n") - } - case md.TableCell: - if !entering { - r.printf("\t") - } - case md.CodeBlock: - r.depth += 4 - r.renderParagraphKeepBreaks(bytes.NewBuffer(node.Literal)) - r.printf("\n") - r.depth -= 4 - case md.Document: - default: - r.printf("unknown block %s:", node.Type) - r.write(node.Literal) - } - //w.Write([]byte(fmt.Sprintf("node<%s; %t>", node.Type, entering))) - //w.Write(node.Literal) - return md.GoToNext -} - -// RenderHeader implements blackfriday.Renderer interface. -func (r *Renderer) RenderHeader(w io.Writer, ast *md.Node) {} - -// RenderFooter implements blackfriday.Renderer interface. -func (r *Renderer) RenderFooter(w io.Writer, ast *md.Node) {} diff --git a/vendor/github.com/smallstep/cli/usage/report.go b/vendor/github.com/smallstep/cli/usage/report.go deleted file mode 100644 index cd87831b..00000000 --- a/vendor/github.com/smallstep/cli/usage/report.go +++ /dev/null @@ -1,148 +0,0 @@ -package usage - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "regexp" - "strings" - - "golang.org/x/net/html" -) - -// Section keeps track of individual sections -type Section struct { - Command string `json:"command"` - Name string `json:"name"` - Text string `json:"text"` - Words int `json:"words"` - Lines int `json:"lines"` - Sections []*Section `json:"sections"` -} - -// Report holds together a report of sections -type Report struct { - Report []*Section `json:"report"` -} - -// NewReport returns report based on raw -func NewReport(command string, top []byte) *Report { - report := Report{} - report.Process(command, top) - - return &report -} - -// Write serializes the report to json -func (report *Report) Write(w io.Writer) error { - j, err := json.MarshalIndent(report, "", " ") - - if err != nil { - return err - } - - w.Write(j) - - return nil -} - -// Process adds a html based help page to the report -func (report *Report) Process(command string, raw []byte) error { - r := bytes.NewBuffer(raw) - doc, err := html.Parse(r) - - if err != nil { - return err - } - - if doc.FirstChild.Type != html.ElementNode || - doc.FirstChild.Data != "html" || - doc.FirstChild.FirstChild.NextSibling.Data != "body" { - return errors.New("error parsing raw html") - } - - body := doc.FirstChild.FirstChild.NextSibling - - report.addSection(command, body.FirstChild, nil) - - return nil -} - -func (report *Report) addSection(command string, node *html.Node, section *Section) (*html.Node, *Section) { - if node == nil || - node.Type != html.ElementNode || - node.Data != "h2" { - return nil, nil - } - - text, next := report.processNode(node) - words := strings.Fields(text) - lines := strings.Split(text, "\n") - - s := Section{ - Command: command, - Name: node.FirstChild.Data, - Text: text, - Words: len(words), - Lines: len(lines), - } - - if section == nil { - report.Report = append(report.Report, &s) - return report.addSection(command, next, &s) - } - - section.Sections = append(section.Sections, &s) - return report.addSection(command, next, section) -} - -func (report *Report) processNode(node *html.Node) (string, *html.Node) { - text := "" - current := node.NextSibling - - r, _ := regexp.Compile("<[^>]*>") - - for current != nil { - var buf bytes.Buffer - w := io.Writer(&buf) - html.Render(w, current) - - notags := r.ReplaceAllString(buf.String(), "") - clean := strings.TrimSpace(notags) - - if len(text) > 0 && len(clean) > 0 { - text = fmt.Sprintf("%s %s", text, clean) - } else if len(clean) > 0 { - text = clean - } - - current = current.NextSibling - if current == nil { - return text, nil - } else if current.Type == html.ElementNode && - current.Data == "h2" { - node = current - current = nil - } - } - - return text, node -} - -// PerHeadline returns all sections across commands/pages with the same headline -func (report *Report) PerHeadline(headline string) []Section { - var results []Section - for _, top := range report.Report { - for _, section := range top.Sections { - if section.Name != headline { - continue - } - - results = append(results, *section) - } - } - - return results -} diff --git a/vendor/github.com/smallstep/cli/usage/usage.go b/vendor/github.com/smallstep/cli/usage/usage.go deleted file mode 100644 index 6a0bae8c..00000000 --- a/vendor/github.com/smallstep/cli/usage/usage.go +++ /dev/null @@ -1,214 +0,0 @@ -package usage - -import ( - "bytes" - "fmt" - "html" - "strconv" - "strings" - "text/template" -) - -var usageTextTempl = " {{.Name}}\n {{.Usage}} {{if .Required}}(Required){{else}}(Optional){{end}}{{if .Multiple}} (Multiple can be specified){{end}}\n" -var templ *template.Template - -func init() { - templ = template.Must(template.New("usageText").Parse(usageTextTempl)) -} - -// Argument specifies the Name, Usage, and whether or not an Argument is -// required or not -type Argument struct { - Required bool - Multiple bool - Name string - Usage string -} - -// Decorate returns the name of an Argument and decorates it with notation to -// indicate whether its required or not -func (a Argument) Decorate() string { - name := a.Name - if a.Multiple { - name = name + "(s)..." - } - if a.Required { - return fmt.Sprintf("<%s>", name) - } - - return fmt.Sprintf("[%s]", name) -} - -// Arguments is an array of Argument structs that specify which arguments are -// accepted by a Command -type Arguments []Argument - -// UsageText returns the value of the UsageText property for a cli.Command for -// these arguments -func (args Arguments) UsageText() string { - var buf bytes.Buffer - for _, a := range args { - data := map[string]interface{}{ - "Name": a.Decorate(), - "Multiple": a.Multiple, - "Required": a.Required, - "Usage": a.Usage, - } - - err := templ.Execute(&buf, data) - if err != nil { - panic(fmt.Sprintf("Could not generate args template for %s: %s", a.Name, err)) - } - } - - return "\n\n" + buf.String() -} - -// ArgsUsage returns the value of the ArgsUsage property for a cli.Command for -// these arguments -func (args Arguments) ArgsUsage() string { - out := "" - for i, a := range args { - out += a.Decorate() - if i < len(args)-1 { - out += " " - } - } - - return out -} - -// AppHelpTemplate contains the modified template for the main app -var AppHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .Commands}} {{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}_[arguments]_{{end}}{{end}}{{if .Description}} - -## DESCRIPTION -{{.Description}}{{end}}{{if .VisibleCommands}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -||| -|---|---|{{range .VisibleCommands}} -| **{{join .Names ", "}}** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}}{{end}} - -## OPTIONS - -{{range $index, $option := .VisibleFlags}}{{if $index}} -{{end}}{{$option}} -{{end}}{{end}}{{if .Copyright}}{{if len .Authors}} - -## AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: - -{{range $index, $author := .Authors}}{{if $index}} -{{end}}{{$author}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} - -## ONLINE - -This documentation is available online at https://smallstep.com/docs/cli - -## VERSION - -{{.Version}}{{end}}{{end}} - -## COPYRIGHT - -{{.Copyright}} - -## FEEDBACK ` + - html.UnescapeString("&#"+strconv.Itoa(128525)+";") + " " + - html.UnescapeString("&#"+strconv.Itoa(127867)+";") + - ` - -The **step** utility is not instrumented for usage statistics. It does not phone home. -But your feedback is extremely valuable. Any information you can provide regarding how you’re using **step** helps. -Please send us a sentence or two, good or bad: **feedback@smallstep.com** or ask in [GitHub Discussions](https://github.com/smallstep/certificates/discussions). -{{end}} -` - -// SubcommandHelpTemplate contains the modified template for a sub command -// Note that the weird "|||\n|---|---|" syntax sets up a markdown table with empty headers. -var SubcommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}** {{if .VisibleFlags}} _[options]_{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}_[arguments]_{{end}}{{end}}{{if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -||| -|---|---|{{range .VisibleCommands}} -| **{{join .Names ", "}}** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` - -// CommandHelpTemplate contains the modified template for a command -var CommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .VisibleFlags}} _[options]_{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}_[arguments]_{{end}}{{end}}{{if .Category}} - -## CATEGORY - -{{.Category}}{{end}}{{if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` - -// FlagNamePrefixer converts a full flag name and its placeholder into the help -// message flag prefix. This is used by the default FlagStringer. -// -// This method clones urflave/cli functionality but adds a new line at the end. -func FlagNamePrefixer(fullName, placeholder string) string { - var prefixed string - parts := strings.Split(fullName, ",") - for i, name := range parts { - name = strings.Trim(name, " ") - prefixed += "**" + prefixFor(name) + name + "**" - - if placeholder != "" { - prefixed += "=" + placeholder - } - if i < len(parts)-1 { - prefixed += ", " - } - } - //return "* " + prefixed + "\n" - return prefixed + "\n: " -} - -func prefixFor(name string) (prefix string) { - if len(name) == 1 { - prefix = "-" - } else { - prefix = "--" - } - - return -} diff --git a/vendor/github.com/smallstep/cli/utils/cli.go b/vendor/github.com/smallstep/cli/utils/cli.go deleted file mode 100644 index e3fdfaec..00000000 --- a/vendor/github.com/smallstep/cli/utils/cli.go +++ /dev/null @@ -1,79 +0,0 @@ -package utils - -import ( - "github.com/smallstep/cli/errs" - "github.com/urfave/cli" -) - -// DefaultRSASize sets the default key size for RSA to 2048 bits. -const DefaultRSASize = 2048 - -// DefaultECCurve sets the default curve for EC to P-256. -const DefaultECCurve = "P-256" - -// GetKeyDetailsFromCLI gets the key pair algorithm, curve, and size inputs -// from the CLI context. -func GetKeyDetailsFromCLI(ctx *cli.Context, insecure bool, ktyKey, curveKey, sizeKey string) (string, string, int, error) { - var ( - crv = ctx.String("curve") - size = ctx.Int("size") - kty = ctx.String("kty") - ) - - if ctx.IsSet(ktyKey) { - switch kty { - case "RSA": - if !ctx.IsSet(sizeKey) { - size = DefaultRSASize - } - if ctx.IsSet(curveKey) { - return kty, crv, size, errs.IncompatibleFlagValue(ctx, curveKey, ktyKey, kty) - } - if size < 2048 && !insecure { - return kty, crv, size, errs.MinSizeInsecureFlag(ctx, sizeKey, "2048") - } - if size <= 0 { - return kty, crv, size, errs.MinSizeFlag(ctx, sizeKey, "0") - } - case "EC": - if ctx.IsSet("size") { - return kty, crv, size, errs.IncompatibleFlagValue(ctx, sizeKey, ktyKey, kty) - } - if !ctx.IsSet("curve") { - crv = DefaultECCurve - } - switch crv { - case "P-256", "P-384", "P-521": //ok - default: - return kty, crv, size, errs.IncompatibleFlagValueWithFlagValue(ctx, ktyKey, kty, - curveKey, crv, "P-256, P-384, P-521") - } - case "OKP": - if ctx.IsSet("size") { - return kty, crv, size, errs.IncompatibleFlagValue(ctx, sizeKey, ktyKey, kty) - } - switch crv { - case "Ed25519": //ok - case "": // ok: OKP defaults to Ed25519 - crv = "Ed25519" - default: - return kty, crv, size, errs.IncompatibleFlagValueWithFlagValue(ctx, ktyKey, kty, - curveKey, crv, "Ed25519") - } - default: - return kty, crv, size, errs.InvalidFlagValue(ctx, ktyKey, kty, "RSA, EC, OKP") - } - } else { - if ctx.IsSet(curveKey) { - return kty, crv, size, errs.RequiredWithFlag(ctx, curveKey, ktyKey) - } - if ctx.IsSet("size") { - return kty, crv, size, errs.RequiredWithFlag(ctx, sizeKey, ktyKey) - } - // Set default key type | curve | size. - kty = "EC" - crv = "P-256" - size = 0 - } - return kty, crv, size, nil -} diff --git a/vendor/github.com/smallstep/cli/utils/file.go b/vendor/github.com/smallstep/cli/utils/file.go deleted file mode 100644 index 98b1ccc7..00000000 --- a/vendor/github.com/smallstep/cli/utils/file.go +++ /dev/null @@ -1,99 +0,0 @@ -package utils - -import ( - "os" - - "github.com/smallstep/cli/errs" -) - -// File represents a wrapper on os.File that supports read, write, seek and -// close methods, but they won't be called if an error occurred before. -type File struct { - File *os.File - err error -} - -// OpenFile calls os.OpenFile method and returns the os.File wrapped. -func OpenFile(name string, flag int, perm os.FileMode) (*File, error) { - f, err := os.OpenFile(name, flag, perm) - if err != nil { - return nil, errs.FileError(err, name) - } - return &File{ - File: f, - }, nil -} - -// error writes f.err if it's not set and returns f.err. -func (f *File) error(err error) error { - if f.err == nil && err != nil { - f.err = errs.FileError(err, f.File.Name()) - } - return f.err -} - -// Close wraps `func (*os.File) Close` it will always call Close but the error -// return will be the first error thrown if any. -func (f *File) Close() error { - return f.error(f.File.Close()) -} - -// Read wraps `func (*os.File) Read` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) Read(b []byte) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.Read(b) - return n, f.error(err) -} - -// ReadAt wraps `func (*os.File) ReadAt` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.ReadAt(b, off) - return n, f.error(err) -} - -// Seek wraps `func (*os.File) Seek` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) Seek(offset int64, whence int) (ret int64, err error) { - if f.err != nil { - return 0, f.err - } - ret, err = f.File.Seek(offset, whence) - return ret, f.error(err) -} - -// Write wraps `func (*os.File) Write` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) Write(b []byte) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.Write(b) - return n, f.error(err) -} - -// WriteAt wraps `func (*os.File) WriteAt` but doesn't perform the operation if -// a previous error was thrown. -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.WriteAt(b, off) - return n, f.error(err) -} - -// WriteString wraps `func (*os.File) WriteString` but doesn't perform the -// operation if a previous error was thrown. -func (f *File) WriteString(s string) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.WriteString(s) - return n, f.error(err) -} diff --git a/vendor/github.com/smallstep/cli/utils/read.go b/vendor/github.com/smallstep/cli/utils/read.go deleted file mode 100644 index 95414e52..00000000 --- a/vendor/github.com/smallstep/cli/utils/read.go +++ /dev/null @@ -1,100 +0,0 @@ -package utils - -import ( - "bufio" - "bytes" - "io" - "io/ioutil" - "os" - "strings" - "unicode" - - "github.com/pkg/errors" - "github.com/smallstep/cli/errs" - "github.com/smallstep/cli/ui" -) - -// In command line utilities, it is a de facto standard that a hyphen "-" -// indicates STDIN as a file to be read. -const stdinFilename = "-" - -// stdin points to os.Stdin. -var stdin = os.Stdin - -// FileExists is a wrapper on os.Stat that returns false if os.Stat returns an -// error, it returns true otherwise. This method does not care if os.Stat -// returns any other kind of errors. -func FileExists(path string) bool { - if path == "" { - return false - } - _, err := os.Stat(path) - return err == nil -} - -// ReadAll returns a slice of bytes with the content of the given reader. -func ReadAll(r io.Reader) ([]byte, error) { - b, err := ioutil.ReadAll(r) - return b, errors.Wrap(err, "error reading data") -} - -// ReadString reads one line from the given io.Reader. -func ReadString(r io.Reader) (string, error) { - br := bufio.NewReader(r) - str, err := br.ReadString('\n') - if err != nil && err != io.EOF { - return "", errors.Wrap(err, "error reading string") - } - return strings.TrimSpace(str), nil -} - -// ReadPasswordFromFile reads and returns the password from the given filename. -// The contents of the file will be trimmed at the right. -func ReadPasswordFromFile(filename string) ([]byte, error) { - password, err := ioutil.ReadFile(filename) - if err != nil { - return nil, errs.FileError(err, filename) - } - password = bytes.TrimRightFunc(password, unicode.IsSpace) - return password, nil -} - -// ReadStringPasswordFromFile reads and returns the password from the given filename. -// The contents of the file will be trimmed at the right. -func ReadStringPasswordFromFile(filename string) (string, error) { - b, err := ReadPasswordFromFile(filename) - if err != nil { - return "", err - } - return string(b), nil -} - -// ReadInput from stdin if something is detected or ask the user for an input -// using the given prompt. -func ReadInput(prompt string) ([]byte, error) { - st, err := stdin.Stat() - if err != nil { - return nil, errors.Wrap(err, "error reading data") - } - - if st.Size() == 0 && st.Mode()&os.ModeNamedPipe == 0 { - return ui.PromptPassword(prompt) - } - - return ReadAll(stdin) -} - -// ReadFile returns the contents of the file identified by name. It reads from -// STDIN if name is a hyphen ("-"). -func ReadFile(name string) (b []byte, err error) { - if name == stdinFilename { - name = "/dev/stdin" - b, err = ioutil.ReadAll(stdin) - } else { - b, err = ioutil.ReadFile(name) - } - if err != nil { - return nil, errs.FileError(err, name) - } - return b, nil -} diff --git a/vendor/github.com/smallstep/cli/utils/utils.go b/vendor/github.com/smallstep/cli/utils/utils.go deleted file mode 100644 index 3ce68e98..00000000 --- a/vendor/github.com/smallstep/cli/utils/utils.go +++ /dev/null @@ -1,19 +0,0 @@ -package utils - -import ( - "fmt" - "os" -) - -// Fail prints out the error struct if STEPDEBUG is true otherwise it just -// prints out the error message. Finally, it exits with an error code of 1. -func Fail(err error) { - if err != nil { - if os.Getenv("STEPDEBUG") == "1" { - fmt.Fprintf(os.Stderr, "%+v\n", err) - } else { - fmt.Fprintln(os.Stderr, err) - } - os.Exit(1) - } -} diff --git a/vendor/github.com/smallstep/cli/utils/write.go b/vendor/github.com/smallstep/cli/utils/write.go deleted file mode 100644 index 5ab9c42a..00000000 --- a/vendor/github.com/smallstep/cli/utils/write.go +++ /dev/null @@ -1,160 +0,0 @@ -package utils - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/cli/command" - "github.com/smallstep/cli/errs" - "github.com/smallstep/cli/ui" -) - -var ( - // ErrFileExists is the error returned if a file exists. - ErrFileExists = errors.New("file exists") - - // ErrIsDir is the error returned if the file is a directory. - ErrIsDir = errors.New("file is a directory") - - // SnippetHeader is the header of a step generated snippet in a - // configuration file. - SnippetHeader = "# autogenerated by step" - - // SnippetFooter is the header of a step generated snippet in a - // configuration file. - SnippetFooter = "# end" -) - -// WriteFile wraps ioutil.WriteFile with a prompt to overwrite a file if -// the file exists. It returns ErrFileExists if the user picks to not overwrite -// the file. If force is set to true, the prompt will not be presented and the -// file if exists will be overwritten. -func WriteFile(filename string, data []byte, perm os.FileMode) error { - if command.IsForce() { - return ioutil.WriteFile(filename, data, perm) - } - - st, err := os.Stat(filename) - if err != nil { - if os.IsNotExist(err) { - return ioutil.WriteFile(filename, data, perm) - } - return errors.Wrapf(err, "error reading information for %s", filename) - } - - if st.IsDir() { - return ErrIsDir - } - - str, err := ui.Prompt(fmt.Sprintf("Would you like to overwrite %s [y/n]", filename), ui.WithValidateYesNo()) - if err != nil { - return err - } - switch strings.ToLower(strings.TrimSpace(str)) { - case "y", "yes": - case "n", "no": - return ErrFileExists - } - - return ioutil.WriteFile(filename, data, perm) -} - -// AppendNewLine appends the given data at the end of the file. If the last -// character of the file does not contain an LF it prepends it to the data. -func AppendNewLine(filename string, data []byte, perm os.FileMode) error { - f, err := OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, perm) - if err != nil { - return err - } - // Read last character - if st, err := f.File.Stat(); err == nil && st.Size() != 0 { - last := make([]byte, 1) - f.Seek(-1, 2) - f.Read(last) - if last[0] != '\n' { - f.WriteString("\n") - } - } - f.Write(data) - return f.Close() -} - -// WriteSnippet writes the given data on the given filename. It surrounds the -// data with a header and footer, and it will replace the previous one. -func WriteSnippet(filename string, data []byte, perm os.FileMode) error { - // Get file permissions - if st, err := os.Stat(filename); err == nil { - perm = st.Mode() - } else if !os.IsNotExist(err) { - return errs.FileError(err, filename) - } - - // Read file contents - b, err := ioutil.ReadFile(filename) - if err != nil && !os.IsNotExist(err) { - return errs.FileError(err, filename) - } - - // Detect previous configuration - _, start, end := findConfiguration(bytes.NewReader(b)) - - // Replace previous configuration - f, err := OpenFile(filename, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, perm) - if err != nil { - return errs.FileError(err, filename) - } - if len(b) > 0 { - f.Write(b[:start]) - if start == end { - f.WriteString("\n") - } - } - f.WriteString(fmt.Sprintf("%s @ %s\n", SnippetHeader, time.Now().UTC().Format(time.RFC3339))) - f.Write(data) - if !bytes.HasSuffix(data, []byte("\n")) { - f.WriteString("\n") - } - f.WriteString(SnippetFooter + "\n") - if len(b) > 0 { - f.Write(b[end:]) - } - return f.Close() -} - -type offsetCounter struct { - offset int64 -} - -func (o *offsetCounter) ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - advance, token, err = bufio.ScanLines(data, atEOF) - o.offset += int64(advance) - return -} - -func findConfiguration(r io.Reader) (lines []string, start int64, end int64) { - var inConfig bool - counter := new(offsetCounter) - scanner := bufio.NewScanner(r) - scanner.Split(counter.ScanLines) - for scanner.Scan() { - line := scanner.Text() - switch { - case !inConfig && strings.HasPrefix(line, SnippetHeader): - inConfig = true - start = counter.offset - int64(len(line)+1) - case inConfig && strings.HasPrefix(line, SnippetFooter): - return lines, start, counter.offset - case inConfig: - lines = append(lines, line) - } - } - - return lines, counter.offset, counter.offset -} diff --git a/vendor/github.com/smallstep/nosql/.gitignore b/vendor/github.com/smallstep/nosql/.gitignore deleted file mode 100644 index 06dece20..00000000 --- a/vendor/github.com/smallstep/nosql/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Vendor directory -vendor/* \ No newline at end of file diff --git a/vendor/github.com/smallstep/nosql/.golangci.yml b/vendor/github.com/smallstep/nosql/.golangci.yml deleted file mode 100644 index fcb07f93..00000000 --- a/vendor/github.com/smallstep/nosql/.golangci.yml +++ /dev/null @@ -1,71 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - settings: - printf: - funcs: - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Infof - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Errorf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Warnf - - (github.com/golangci/golangci-lint/pkg/logutils.Log).Fatalf - golint: - min-confidence: 0 - gocyclo: - min-complexity: 10 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 2 - depguard: - list-type: blacklist - packages: - # logging is allowed only by logutils.Log, logrus - # is allowed to use only in logutils package - - github.com/sirupsen/logrus - misspell: - locale: US - lll: - line-length: 140 - goimports: - local-prefixes: github.com/golangci/golangci-lint - gocritic: - enabled-tags: - - performance - - style - - experimental - disabled-checks: - - wrapperFunc - - dupImport # https://github.com/go-critic/go-critic/issues/845 - -linters: - disable-all: true - enable: - - gofmt - - golint - - vet - - misspell - - ineffassign - - deadcode - - staticcheck - - unused - - structcheck - -run: - skip-dirs: - - pkg - -issues: - exclude: - - can't lint - - declaration of "err" shadows declaration at line - - should have a package comment, unless it's in another file for this package - - error strings should not be capitalized or end with punctuation or a newline -# golangci.com configuration -# https://github.com/golangci/golangci/wiki/Configuration -service: - golangci-lint-version: 1.18.x # use the fixed version to not introduce new linters unexpectedly - prepare: - - echo "here I can run custom commands, but no preparation needed for this repo" diff --git a/vendor/github.com/smallstep/nosql/.travis.yml b/vendor/github.com/smallstep/nosql/.travis.yml deleted file mode 100644 index ba4355d3..00000000 --- a/vendor/github.com/smallstep/nosql/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go -go: -- 1.14.x -services: - - mysql -addons: - apt: - packages: - - debhelper - - fakeroot - - bash-completion -env: - global: - - V=1 -before_script: -- make bootstrap -script: -- TRAVIS=1 make -after_success: -- bash <(curl -s https://codecov.io/bash) || echo "Codecov did - not collect coverage reports" -notifications: - email: false diff --git a/vendor/github.com/smallstep/nosql/LICENSE b/vendor/github.com/smallstep/nosql/LICENSE deleted file mode 100644 index 3476db75..00000000 --- a/vendor/github.com/smallstep/nosql/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2019 Smallstep Labs, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/smallstep/nosql/Makefile b/vendor/github.com/smallstep/nosql/Makefile deleted file mode 100644 index 07f448ba..00000000 --- a/vendor/github.com/smallstep/nosql/Makefile +++ /dev/null @@ -1,78 +0,0 @@ -# Set V to 1 for verbose output from the Makefile -Q=$(if $V,,@) -PREFIX?= -SRC=$(shell find . -type f -name '*.go' -not -path "./vendor/*") -GOOS_OVERRIDE ?= -OUTPUT_ROOT=output/ - -all: test lint - -travis: travis-test lint - -.PHONY: all - -######################################### -# Bootstrapping -######################################### - -bootstra%: - $Q GO111MODULE=on go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.24.0 - -.PHONY: bootstra% - -################################################# -# Determine the type of `push` and `version` -################################################# - -# Version flags to embed in the binaries -VERSION ?= $(shell [ -d .git ] && git describe --tags --always --dirty="-dev") -# If we are not in an active git dir then try reading the version from .VERSION. -# .VERSION contains a slug populated by `git archive`. -VERSION := $(or $(VERSION),$(shell ./.version.sh .VERSION)) -VERSION := $(shell echo $(VERSION) | sed 's/^v//') -NOT_RC := $(shell echo $(VERSION) | grep -v -e -rc) - -# If TRAVIS_TAG is set then we know this ref has been tagged. -ifdef TRAVIS_TAG - ifeq ($(NOT_RC),) - PUSHTYPE=release-candidate - else - PUSHTYPE=release - endif -else - PUSHTYPE=master -endif - -######################################### -# Test -######################################### -test: - $Q $(GOFLAGS) go test -short -coverprofile=coverage.out ./... - -travis-test: - $Q $(GOFLAGS) TRAVIS=1 go test -short -coverprofile=coverage.out ./... - -.PHONY: test travis-test - -######################################### -# Linting -######################################### - -fmt: - $Q gofmt -l -w $(SRC) - -lint: - $Q LOG_LEVEL=error golangci-lint run - -.PHONY: lint fmt - -######################################### -# Clean -######################################### - -clean: -ifneq ($(BINNAME),"") - $Q rm -f bin/$(BINNAME) -endif - -.PHONY: clean diff --git a/vendor/github.com/smallstep/nosql/README.md b/vendor/github.com/smallstep/nosql/README.md deleted file mode 100644 index 6fbd019d..00000000 --- a/vendor/github.com/smallstep/nosql/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# NoSQL - -NoSQL is an abstraction layer for data persistence. - -This project is in development, the API is not stable. - -# Implementations - -The current version comes with a few implementations inlcuding Mysql, Badger, -and BoltDB, but implementations are on the roadmap. - -- [ ] Memory -- [x] [BoltDB](https://github.com/etcd-io/bbolt) etcd fork. -- [x] Badger -- [x] MariaDB/MySQL -- [ ] PostgreSQL -- [ ] Cassandra -- [ ] ... diff --git a/vendor/github.com/smallstep/nosql/badger/v1/badger.go b/vendor/github.com/smallstep/nosql/badger/v1/badger.go deleted file mode 100644 index cbb8f480..00000000 --- a/vendor/github.com/smallstep/nosql/badger/v1/badger.go +++ /dev/null @@ -1,399 +0,0 @@ -package badger - -import ( - "bytes" - "encoding/binary" - "strings" - - "github.com/dgraph-io/badger" - "github.com/dgraph-io/badger/options" - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" -) - -// DB is a wrapper over *badger.DB, -type DB struct { - db *badger.DB -} - -// Open opens or creates a BoltDB database in the given path. -func (db *DB) Open(dir string, opt ...database.Option) (err error) { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - - bo := badger.DefaultOptions(dir) - - // Set the Table and Value LoadingMode - default is MemoryMap. Low memory/RAM - // systems may want to use FileIO. - switch strings.ToLower(opts.BadgerFileLoadingMode) { - case "", database.BadgerMemoryMap, "memorymap": - bo.TableLoadingMode = options.MemoryMap - bo.ValueLogLoadingMode = options.MemoryMap - case database.BadgerFileIO: - bo.TableLoadingMode = options.FileIO - bo.ValueLogLoadingMode = options.FileIO - default: - return badger.ErrInvalidLoadingMode - } - - if opts.ValueDir != "" { - bo.ValueDir = opts.ValueDir - } else { - bo.ValueDir = dir - } - - db.db, err = badger.Open(bo) - return errors.Wrap(err, "error opening Badger database") -} - -// Close closes the DB database. -func (db *DB) Close() error { - return errors.Wrap(db.db.Close(), "error closing Badger database") -} - -// CreateTable creates a token element with the 'bucket' prefix so that such -// that their appears to be a table. -func (db *DB) CreateTable(bucket []byte) error { - bk, err := badgerEncode(bucket) - if err != nil { - return err - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, []byte{}), "failed to create %s/", bucket) - }) -} - -// DeleteTable deletes a root or embedded bucket. Returns an error if the -// bucket cannot be found or if the key represents a non-bucket value. -func (db *DB) DeleteTable(bucket []byte) error { - var tableExists bool - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - deleteKeys := func(keysForDelete [][]byte) error { - if err := db.db.Update(func(txn *badger.Txn) error { - for _, key := range keysForDelete { - tableExists = true - if err := txn.Delete(key); err != nil { - return errors.Wrapf(err, "error deleting key %s", key) - } - } - return nil - }); err != nil { - return errors.Wrapf(err, "update failed") - } - return nil - } - - collectSize := 1000 - err = db.db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.AllVersions = false - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - - keysForDelete := make([][]byte, collectSize) - keysCollected := 0 - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - key := it.Item().KeyCopy(nil) - keysForDelete[keysCollected] = key - keysCollected++ - if keysCollected == collectSize { - if err := deleteKeys(keysForDelete); err != nil { - return err - } - keysCollected = 0 - } - } - if keysCollected > 0 { - if err := deleteKeys(keysForDelete[:keysCollected]); err != nil { - return err - } - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "table %s does not exist", bucket) - } - - return nil - }) - return err -} - -// badgerGet is a helper for the Get method. -func badgerGet(txn *badger.Txn, key []byte) ([]byte, error) { - item, err := txn.Get(key) - switch { - case err == badger.ErrKeyNotFound: - return nil, errors.Wrapf(database.ErrNotFound, "key %s not found", key) - case err != nil: - return nil, errors.Wrapf(err, "failed to get key %s", key) - default: - val, err := item.ValueCopy(nil) - if err != nil { - return nil, errors.Wrap(err, "error accessing value returned by database") - } - return val, nil - } -} - -// Get returns the value stored in the given bucked and key. -func (db *DB) Get(bucket, key []byte) (ret []byte, err error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - err = db.db.View(func(txn *badger.Txn) error { - ret, err = badgerGet(txn, bk) - return err - }) - return -} - -// Set stores the given value on bucket and key. -func (db *DB) Set(bucket, key, value []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, value), "failed to set %s/%s", bucket, key) - }) -} - -// Del deletes the value stored in the given bucked and key. -func (db *DB) Del(bucket, key []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Delete(bk), "failed to delete %s/%s", bucket, key) - }) -} - -// List returns the full list of entries in a bucket. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - var ( - entries []*database.Entry - tableExists bool - ) - err := db.db.View(func(txn *badger.Txn) error { - it := txn.NewIterator(badger.DefaultIteratorOptions) - defer it.Close() - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - tableExists = true - item := it.Item() - bk := item.KeyCopy(nil) - if isBadgerTable(bk) { - continue - } - _bucket, key, err := fromBadgerKey(bk) - if err != nil { - return errors.Wrapf(err, "error converting from badgerKey %s", bk) - } - if !bytes.Equal(_bucket, bucket) { - return errors.Errorf("bucket names do not match; want %v, but got %v", - bucket, _bucket) - } - v, err := item.ValueCopy(nil) - if err != nil { - return errors.Wrap(err, "error retrieving contents from database value") - } - entries = append(entries, &database.Entry{ - Bucket: _bucket, - Key: key, - Value: v, - }) - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "bucket %s not found", bucket) - } - return nil - }) - return entries, err -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, false, err - } - - badgerTxn := db.db.NewTransaction(true) - defer badgerTxn.Discard() - - val, swapped, err := cmpAndSwap(badgerTxn, bk, oldValue, newValue) - switch { - case err != nil: - return nil, false, err - case swapped: - if err := badgerTxn.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit badger transaction") - } - return val, swapped, nil - default: - return val, swapped, err - } -} - -func cmpAndSwap(badgerTxn *badger.Txn, bk, oldValue, newValue []byte) ([]byte, bool, error) { - current, err := badgerGet(badgerTxn, bk) - // If value does not exist but expected is not nil, then return w/out swapping. - if err != nil && !database.IsErrNotFound(err) { - return nil, false, err - } - if !bytes.Equal(current, oldValue) { - return current, false, nil - } - - if err := badgerTxn.Set(bk, newValue); err != nil { - return current, false, errors.Wrapf(err, "failed to set %s", bk) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(txn *database.Tx) error { - return db.db.Update(func(badgerTxn *badger.Txn) (err error) { - for _, q := range txn.Operations { - switch q.Cmd { - case database.CreateTable: - if err = db.CreateTable(q.Bucket); err != nil { - return err - } - continue - case database.DeleteTable: - if err = db.DeleteTable(q.Bucket); err != nil { - return err - } - continue - } - bk, err := toBadgerKey(q.Bucket, q.Key) - if err != nil { - return err - } - switch q.Cmd { - case database.Get: - if q.Result, err = badgerGet(badgerTxn, bk); err != nil { - return errors.Wrapf(err, "failed to get %s/%s", q.Bucket, q.Key) - } - case database.Set: - if err := badgerTxn.Set(bk, q.Value); err != nil { - return errors.Wrapf(err, "failed to set %s/%s", q.Bucket, q.Key) - } - case database.Delete: - if err = badgerTxn.Delete(bk); err != nil { - return errors.Wrapf(err, "failed to delete %s/%s", q.Bucket, q.Key) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwap(badgerTxn, bk, q.CmpValue, q.Value) - if err != nil { - return errors.Wrapf(err, "failed to CmpAndSwap %s/%s", q.Bucket, q.Key) - } - case database.CmpOrRollback: - return database.ErrOpNotSupported - default: - return database.ErrOpNotSupported - } - } - return nil - }) -} - -// toBadgerKey returns the Badger database key using the following algorithm: -// First 2 bytes are the length of the bucket/table name in little endian format, -// followed by the bucket/table name, -// followed by 2 bytes representing the length of the key in little endian format, -// followed by the key. -func toBadgerKey(bucket, key []byte) ([]byte, error) { - first, err := badgerEncode(bucket) - if err != nil { - return nil, err - } - second, err := badgerEncode(key) - if err != nil { - return nil, err - } - return append(first, second...), nil -} - -// isBadgerTable returns True if the slice is a badgerTable token, false otherwise. -// badgerTable means that the slice contains only the [size|value] of one section -// of a badgerKey and no remainder. A badgerKey is [buket|key], while a badgerTable -// is only the bucket section. -func isBadgerTable(bk []byte) bool { - if k, rest := parseBadgerEncode(bk); len(k) > 0 && len(rest) == 0 { - return true - } - return false -} - -// fromBadgerKey returns the bucket and key encoded in a BadgerKey. -// See documentation for toBadgerKey. -func fromBadgerKey(bk []byte) ([]byte, []byte, error) { - bucket, rest := parseBadgerEncode(bk) - if len(bucket) == 0 || len(rest) == 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - key, rest2 := parseBadgerEncode(rest) - if len(key) == 0 || len(rest2) != 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - return bucket, key, nil -} - -// badgerEncode encodes a byte slice into a section of a BadgerKey. -// See documentation for toBadgerKey. -func badgerEncode(val []byte) ([]byte, error) { - l := len(val) - switch { - case l == 0: - return nil, errors.Errorf("input cannot be empty") - case l > 65535: - return nil, errors.Errorf("length of input cannot be greater than 65535") - default: - lb := new(bytes.Buffer) - if err := binary.Write(lb, binary.LittleEndian, uint16(l)); err != nil { - return nil, errors.Wrap(err, "error doing binary Write") - } - return append(lb.Bytes(), val...), nil - } -} - -func parseBadgerEncode(bk []byte) (value, rest []byte) { - var ( - keyLen uint16 - start = uint16(2) - length = uint16(len(bk)) - ) - if uint16(len(bk)) < start { - return nil, bk - } - // First 2 bytes stores the length of the value. - if err := binary.Read(bytes.NewReader(bk[:2]), binary.LittleEndian, &keyLen); err != nil { - return nil, bk - } - end := start + keyLen - switch { - case length < end: - return nil, bk - case length == end: - return bk[start:end], nil - default: - return bk[start:end], bk[end:] - } -} diff --git a/vendor/github.com/smallstep/nosql/badger/v2/badger.go b/vendor/github.com/smallstep/nosql/badger/v2/badger.go deleted file mode 100644 index c8709b64..00000000 --- a/vendor/github.com/smallstep/nosql/badger/v2/badger.go +++ /dev/null @@ -1,404 +0,0 @@ -package badger - -import ( - "bytes" - "encoding/binary" - "strings" - - "github.com/dgraph-io/badger/v2" - "github.com/dgraph-io/badger/v2/options" - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" -) - -// DB is a wrapper over *badger/v2.DB, -type DB struct { - db *badger.DB -} - -// Open opens or creates a BoltDB database in the given path. -func (db *DB) Open(dir string, opt ...database.Option) (err error) { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - - bo := badger.DefaultOptions(dir) - if opts.ValueDir != "" { - bo.ValueDir = opts.ValueDir - } - - // Set the ValueLogLoadingMode - default is MemoryMap. Low memory/RAM - // systems may want to use FileIO. - switch strings.ToLower(opts.BadgerFileLoadingMode) { - case "", database.BadgerMemoryMap, "memorymap": - bo.ValueLogLoadingMode = options.MemoryMap - case database.BadgerFileIO: - bo.ValueLogLoadingMode = options.FileIO - default: - return badger.ErrInvalidLoadingMode - } - - db.db, err = badger.Open(bo) - return errors.Wrap(err, "error opening Badger database") -} - -// Close closes the DB database. -func (db *DB) Close() error { - return errors.Wrap(db.db.Close(), "error closing Badger database") -} - -// CreateTable creates a token element with the 'bucket' prefix so that such -// that their appears to be a table. -func (db *DB) CreateTable(bucket []byte) error { - bk, err := badgerEncode(bucket) - if err != nil { - return err - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, []byte{}), "failed to create %s/", bucket) - }) -} - -// DeleteTable deletes a root or embedded bucket. Returns an error if the -// bucket cannot be found or if the key represents a non-bucket value. -func (db *DB) DeleteTable(bucket []byte) error { - var tableExists bool - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - deleteKeys := func(keysForDelete [][]byte) error { - if err := db.db.Update(func(txn *badger.Txn) error { - for _, key := range keysForDelete { - tableExists = true - if err := txn.Delete(key); err != nil { - return errors.Wrapf(err, "error deleting key %s", key) - } - } - return nil - }); err != nil { - return errors.Wrapf(err, "update failed") - } - return nil - } - - collectSize := 1000 - err = db.db.View(func(txn *badger.Txn) error { - opts := badger.DefaultIteratorOptions - opts.AllVersions = false - opts.PrefetchValues = false - it := txn.NewIterator(opts) - defer it.Close() - - keysForDelete := make([][]byte, collectSize) - keysCollected := 0 - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - key := it.Item().KeyCopy(nil) - keysForDelete[keysCollected] = key - keysCollected++ - if keysCollected == collectSize { - if err := deleteKeys(keysForDelete); err != nil { - return err - } - keysCollected = 0 - } - } - if keysCollected > 0 { - if err := deleteKeys(keysForDelete[:keysCollected]); err != nil { - return err - } - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "table %s does not exist", bucket) - } - - return nil - }) - return err -} - -// badgerGetV2 is a helper for the Get method. -func badgerGetV2(txn *badger.Txn, key []byte) ([]byte, error) { - item, err := txn.Get(key) - switch { - case err == badger.ErrKeyNotFound: - return nil, errors.Wrapf(database.ErrNotFound, "key %s not found", key) - case err != nil: - return nil, errors.Wrapf(err, "failed to get key %s", key) - default: - val, err := item.ValueCopy(nil) - if err != nil { - return nil, errors.Wrap(err, "error accessing value returned by database") - } - - // Make sure to return a copy as val is only valid during the - // transaction. - return cloneBytes(val), nil - } -} - -// Get returns the value stored in the given bucked and key. -func (db *DB) Get(bucket, key []byte) (ret []byte, err error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - err = db.db.View(func(txn *badger.Txn) error { - ret, err = badgerGetV2(txn, bk) - return err - }) - return -} - -// Set stores the given value on bucket and key. -func (db *DB) Set(bucket, key, value []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Set(bk, value), "failed to set %s/%s", bucket, key) - }) -} - -// Del deletes the value stored in the given bucked and key. -func (db *DB) Del(bucket, key []byte) error { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return errors.Wrapf(err, "error converting %s/%s to badgerKey", bucket, key) - } - return db.db.Update(func(txn *badger.Txn) error { - return errors.Wrapf(txn.Delete(bk), "failed to delete %s/%s", bucket, key) - }) -} - -// List returns the full list of entries in a bucket. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - var ( - entries []*database.Entry - tableExists bool - ) - err := db.db.View(func(txn *badger.Txn) error { - it := txn.NewIterator(badger.DefaultIteratorOptions) - defer it.Close() - prefix, err := badgerEncode(bucket) - if err != nil { - return err - } - for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { - tableExists = true - item := it.Item() - bk := item.KeyCopy(nil) - if isBadgerTable(bk) { - continue - } - _bucket, key, err := fromBadgerKey(bk) - if err != nil { - return errors.Wrapf(err, "error converting from badgerKey %s", bk) - } - if !bytes.Equal(_bucket, bucket) { - return errors.Errorf("bucket names do not match; want %v, but got %v", - bucket, _bucket) - } - v, err := item.ValueCopy(nil) - if err != nil { - return errors.Wrap(err, "error retrieving contents from database value") - } - entries = append(entries, &database.Entry{ - Bucket: _bucket, - Key: key, - Value: cloneBytes(v), - }) - } - if !tableExists { - return errors.Wrapf(database.ErrNotFound, "bucket %s not found", bucket) - } - return nil - }) - return entries, err -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - bk, err := toBadgerKey(bucket, key) - if err != nil { - return nil, false, err - } - - badgerTxn := db.db.NewTransaction(true) - defer badgerTxn.Discard() - - val, swapped, err := cmpAndSwapV2(badgerTxn, bk, oldValue, newValue) - switch { - case err != nil: - return nil, false, err - case swapped: - if err := badgerTxn.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit badger transaction") - } - return val, swapped, nil - default: - return val, swapped, err - } -} - -func cmpAndSwapV2(badgerTxn *badger.Txn, bk, oldValue, newValue []byte) ([]byte, bool, error) { - current, err := badgerGetV2(badgerTxn, bk) - // If value does not exist but expected is not nil, then return w/out swapping. - if err != nil && !database.IsErrNotFound(err) { - return nil, false, err - } - if !bytes.Equal(current, oldValue) { - return current, false, nil - } - - if err := badgerTxn.Set(bk, newValue); err != nil { - return current, false, errors.Wrapf(err, "failed to set %s", bk) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(txn *database.Tx) error { - return db.db.Update(func(badgerTxn *badger.Txn) (err error) { - for _, q := range txn.Operations { - switch q.Cmd { - case database.CreateTable: - if err = db.CreateTable(q.Bucket); err != nil { - return err - } - continue - case database.DeleteTable: - if err = db.DeleteTable(q.Bucket); err != nil { - return err - } - continue - } - bk, err := toBadgerKey(q.Bucket, q.Key) - if err != nil { - return err - } - switch q.Cmd { - case database.Get: - if q.Result, err = badgerGetV2(badgerTxn, bk); err != nil { - return errors.Wrapf(err, "failed to get %s/%s", q.Bucket, q.Key) - } - case database.Set: - if err := badgerTxn.Set(bk, q.Value); err != nil { - return errors.Wrapf(err, "failed to set %s/%s", q.Bucket, q.Key) - } - case database.Delete: - if err = badgerTxn.Delete(bk); err != nil { - return errors.Wrapf(err, "failed to delete %s/%s", q.Bucket, q.Key) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwapV2(badgerTxn, bk, q.CmpValue, q.Value) - if err != nil { - return errors.Wrapf(err, "failed to CmpAndSwap %s/%s", q.Bucket, q.Key) - } - case database.CmpOrRollback: - return database.ErrOpNotSupported - default: - return database.ErrOpNotSupported - } - } - return nil - }) -} - -// toBadgerKey returns the Badger database key using the following algorithm: -// First 2 bytes are the length of the bucket/table name in little endian format, -// followed by the bucket/table name, -// followed by 2 bytes representing the length of the key in little endian format, -// followed by the key. -func toBadgerKey(bucket, key []byte) ([]byte, error) { - first, err := badgerEncode(bucket) - if err != nil { - return nil, err - } - second, err := badgerEncode(key) - if err != nil { - return nil, err - } - return append(first, second...), nil -} - -// isBadgerTable returns True if the slice is a badgerTable token, false otherwise. -// badgerTable means that the slice contains only the [size|value] of one section -// of a badgerKey and no remainder. A badgerKey is [buket|key], while a badgerTable -// is only the bucket section. -func isBadgerTable(bk []byte) bool { - if k, rest := parseBadgerEncode(bk); len(k) > 0 && len(rest) == 0 { - return true - } - return false -} - -// fromBadgerKey returns the bucket and key encoded in a BadgerKey. -// See documentation for toBadgerKey. -func fromBadgerKey(bk []byte) ([]byte, []byte, error) { - bucket, rest := parseBadgerEncode(bk) - if len(bucket) == 0 || len(rest) == 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - key, rest2 := parseBadgerEncode(rest) - if len(key) == 0 || len(rest2) != 0 { - return nil, nil, errors.Errorf("invalid badger key: %v", bk) - } - - return bucket, key, nil -} - -// badgerEncode encodes a byte slice into a section of a BadgerKey. -// See documentation for toBadgerKey. -func badgerEncode(val []byte) ([]byte, error) { - l := len(val) - switch { - case l == 0: - return nil, errors.Errorf("input cannot be empty") - case l > 65535: - return nil, errors.Errorf("length of input cannot be greater than 65535") - default: - lb := new(bytes.Buffer) - if err := binary.Write(lb, binary.LittleEndian, uint16(l)); err != nil { - return nil, errors.Wrap(err, "error doing binary Write") - } - return append(lb.Bytes(), val...), nil - } -} - -func parseBadgerEncode(bk []byte) (value, rest []byte) { - var ( - keyLen uint16 - start = uint16(2) - length = uint16(len(bk)) - ) - if uint16(len(bk)) < start { - return nil, bk - } - // First 2 bytes stores the length of the value. - if err := binary.Read(bytes.NewReader(bk[:2]), binary.LittleEndian, &keyLen); err != nil { - return nil, bk - } - end := start + keyLen - switch { - case length < end: - return nil, bk - case length == end: - return bk[start:end], nil - default: - return bk[start:end], bk[end:] - } -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/github.com/smallstep/nosql/bolt/bbolt.go b/vendor/github.com/smallstep/nosql/bolt/bbolt.go deleted file mode 100644 index d6d26ea1..00000000 --- a/vendor/github.com/smallstep/nosql/bolt/bbolt.go +++ /dev/null @@ -1,272 +0,0 @@ -package bolt - -import ( - "bytes" - "time" - - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" - bolt "go.etcd.io/bbolt" -) - -var boltDBSep = []byte("/") - -// DB is a wrapper over bolt.DB, -type DB struct { - db *bolt.DB -} - -type boltBucket interface { - Bucket(name []byte) *bolt.Bucket - CreateBucket(name []byte) (*bolt.Bucket, error) - CreateBucketIfNotExists(name []byte) (*bolt.Bucket, error) - DeleteBucket(name []byte) error -} - -// Open opens or creates a DB database in the given path. -func (db *DB) Open(dataSourceName string, opt ...database.Option) (err error) { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - db.db, err = bolt.Open(dataSourceName, 0600, &bolt.Options{Timeout: 5 * time.Second}) - return errors.WithStack(err) -} - -// Close closes the DB database. -func (db *DB) Close() error { - return errors.WithStack(db.db.Close()) -} - -// CreateTable creates a bucket or an embedded bucket if it does not exists. -func (db *DB) CreateTable(bucket []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - return db.createBucket(tx, bucket) - }) -} - -// DeleteTable deletes a root or embedded bucket. Returns an error if the -// bucket cannot be found or if the key represents a non-bucket value. -func (db *DB) DeleteTable(bucket []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - return db.deleteBucket(tx, bucket) - }) -} - -// Get returns the value stored in the given bucked and key. -func (db *DB) Get(bucket, key []byte) (ret []byte, err error) { - err = db.db.View(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return err - } - ret = b.Get(key) - if ret == nil { - return database.ErrNotFound - } - // Make sure to return a copy as ret is only valid during the - // transaction. - ret = cloneBytes(ret) - return nil - }) - return -} - -// Set stores the given value on bucket and key. -func (db *DB) Set(bucket, key, value []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return err - } - return errors.WithStack(b.Put(key, value)) - }) -} - -// Del deletes the value stored in the given bucked and key. -func (db *DB) Del(bucket, key []byte) error { - return db.db.Update(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return err - } - return errors.WithStack(b.Delete(key)) - }) -} - -// List returns the full list of entries in a bucket. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - var entries []*database.Entry - err := db.db.View(func(tx *bolt.Tx) error { - b, err := db.getBucket(tx, bucket) - if err != nil { - return errors.Wrap(err, "getBucket failed") - } - - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - entries = append(entries, &database.Entry{ - Bucket: bucket, - Key: cloneBytes(k), - Value: cloneBytes(v), - }) - } - return nil - }) - return entries, err -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - boltTx, err := db.db.Begin(true) - if err != nil { - return nil, false, errors.Wrap(err, "error creating Bolt transaction") - } - - boltBucket := boltTx.Bucket(bucket) - if boltBucket == nil { - return nil, false, errors.Errorf("failed to get bucket %s", bucket) - } - - val, swapped, err := cmpAndSwap(boltBucket, key, oldValue, newValue) - switch { - case err != nil: - if err := boltTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to execute CmpAndSwap transaction on %s/%s and failed to rollback transaction", bucket, key) - } - return nil, false, err - case swapped: - if err := boltTx.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit badger transaction") - } - return val, swapped, nil - default: - if err := boltTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to rollback read-only CmpAndSwap transaction on %s/%s", bucket, key) - } - return val, swapped, err - } -} - -func cmpAndSwap(boltBucket *bolt.Bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - current := boltBucket.Get(key) - if !bytes.Equal(current, oldValue) { - return cloneBytes(current), false, nil - } - - if err := boltBucket.Put(key, newValue); err != nil { - return nil, false, errors.Wrapf(err, "failed to set key %s", key) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(tx *database.Tx) error { - return db.db.Update(func(boltTx *bolt.Tx) (err error) { - var b *bolt.Bucket - for _, q := range tx.Operations { - // create or delete buckets - switch q.Cmd { - case database.CreateTable: - err = db.createBucket(boltTx, q.Bucket) - if err != nil { - return err - } - continue - case database.DeleteTable: - err = db.deleteBucket(boltTx, q.Bucket) - if err != nil { - return err - } - continue - } - - // For other operations, get bucket and perform operation - b = boltTx.Bucket(q.Bucket) - - switch q.Cmd { - case database.Get: - ret := b.Get(q.Key) - if ret == nil { - return errors.WithStack(database.ErrNotFound) - } - q.Result = cloneBytes(ret) - case database.Set: - if err = b.Put(q.Key, q.Value); err != nil { - return errors.WithStack(err) - } - case database.Delete: - if err = b.Delete(q.Key); err != nil { - return errors.WithStack(err) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwap(b, q.Key, q.CmpValue, q.Value) - if err != nil { - return errors.Wrapf(err, "failed to execute CmpAndSwap on %s/%s", q.Bucket, q.Key) - } - case database.CmpOrRollback: - return errors.Errorf("operation '%s' is not yet implemented", q.Cmd) - default: - return errors.Errorf("operation '%s' is not supported", q.Cmd) - } - } - return nil - }) -} - -// getBucket returns the bucket supporting nested buckets, nested buckets are -// bucket names separated by '/'. -func (db *DB) getBucket(tx *bolt.Tx, name []byte) (b *bolt.Bucket, err error) { - buckets := bytes.Split(name, boltDBSep) - for i, n := range buckets { - if i == 0 { - b = tx.Bucket(n) - } else { - b = b.Bucket(n) - } - if b == nil { - return nil, database.ErrNotFound - } - } - return -} - -// createBucket creates a bucket or a nested bucket in the given transaction. -func (db *DB) createBucket(tx *bolt.Tx, name []byte) (err error) { - b := boltBucket(tx) - buckets := bytes.Split(name, boltDBSep) - for _, name := range buckets { - b, err = b.CreateBucketIfNotExists(name) - if err != nil { - return errors.WithStack(err) - } - } - return -} - -// deleteBucket deletes a bucket or a nested bucked in the given transaction. -func (db *DB) deleteBucket(tx *bolt.Tx, name []byte) (err error) { - b := boltBucket(tx) - buckets := bytes.Split(name, boltDBSep) - last := len(buckets) - 1 - for i := 0; i < last; i++ { - if buck := b.Bucket(buckets[i]); buck == nil { - return errors.Wrapf(database.ErrNotFound, "bucket %s does not exist", bytes.Join(buckets[0:i+1], boltDBSep)) - } - } - err = b.DeleteBucket(buckets[last]) - if err == bolt.ErrBucketNotFound { - return errors.Wrapf(database.ErrNotFound, "bucket %s does not exist", name) - } - return -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/github.com/smallstep/nosql/database/database.go b/vendor/github.com/smallstep/nosql/database/database.go deleted file mode 100644 index ec64946e..00000000 --- a/vendor/github.com/smallstep/nosql/database/database.go +++ /dev/null @@ -1,259 +0,0 @@ -package database - -import ( - "fmt" - - "errors" -) - -var ( - // ErrNotFound is the type returned on DB implementations if an item does not - // exist. - ErrNotFound = errors.New("not found") - // ErrOpNotSupported is the type returned on DB implementations if an operation - // is not supported. - ErrOpNotSupported = errors.New("operation not supported") -) - -// IsErrNotFound returns true if the cause of the given error is ErrNotFound. -func IsErrNotFound(err error) bool { - return err == ErrNotFound || cause(err) == ErrNotFound -} - -// IsErrOpNotSupported returns true if the cause of the given error is ErrOpNotSupported. -func IsErrOpNotSupported(err error) bool { - return err == ErrOpNotSupported || cause(err) == ErrNotFound -} - -// cause (from github.com/pkg/errors) returns the underlying cause of the -// error, if possible. An error value has a cause if it implements the -// following interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} - -// Options are configuration options for the database. -type Options struct { - Database string - ValueDir string - BadgerFileLoadingMode string -} - -// Option is the modifier type over Options. -type Option func(o *Options) error - -// WithValueDir is a modifier that sets the ValueDir attribute of Options. -func WithValueDir(path string) Option { - return func(o *Options) error { - o.ValueDir = path - return nil - } -} - -// WithDatabase is a modifier that sets the Database attribute of Options. -func WithDatabase(db string) Option { - return func(o *Options) error { - o.Database = db - return nil - } -} - -// WithBadgerFileLoadingMode is a modifier that sets the ValueLogLoadingMode -// of Badger db. -func WithBadgerFileLoadingMode(mode string) Option { - return func(o *Options) error { - o.BadgerFileLoadingMode = mode - return nil - } -} - -// DB is a interface to be implemented by the databases. -type DB interface { - // Open opens the database available with the given options. - Open(dataSourceName string, opt ...Option) error - // Close closes the current database. - Close() error - // Get returns the value stored in the given table/bucket and key. - Get(bucket, key []byte) (ret []byte, err error) - // Set sets the given value in the given table/bucket and key. - Set(bucket, key, value []byte) error - // CmpAndSwap swaps the value at the given bucket and key if the current - // value is equivalent to the oldValue input. Returns 'true' if the - // swap was successful and 'false' otherwise. - CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) - // Del deletes the data in the given table/bucket and key. - Del(bucket, key []byte) error - // List returns a list of all the entries in a given table/bucket. - List(bucket []byte) ([]*Entry, error) - // Update performs a transaction with multiple read-write commands. - Update(tx *Tx) error - // CreateTable creates a table or a bucket in the database. - CreateTable(bucket []byte) error - // DeleteTable deletes a table or a bucket in the database. - DeleteTable(bucket []byte) error -} - -// Badger FileLoadingMode constants. -const ( - BadgerMemoryMap = "mmap" - BadgerFileIO = "fileio" -) - -// TxCmd is the type used to represent database command and operations. -type TxCmd int - -const ( - // CreateTable on a TxEntry will represent the creation of a table or - // bucket on the database. - CreateTable TxCmd = iota - // DeleteTable on a TxEntry will represent the deletion of a table or - // bucket on the database. - DeleteTable - // Get on a TxEntry will represent a command to retrieve data from the - // database. - Get - // Set on a TxEntry will represent a command to write data on the - // database. - Set - // Delete on a TxEntry represent a command to delete data on the database. - Delete - // CmpAndSwap on a TxEntry will represent a compare and swap operation on - // the database. It will compare the value read and change it if it's - // different. The TxEntry will contain the value read. - CmpAndSwap - // CmpOrRollback on a TxEntry will represent a read transaction that will - // compare the values will the ones passed, and if they don't match the - // transaction will fail - CmpOrRollback -) - -// String implements the fmt.Stringer interface on TxCmd. -func (o TxCmd) String() string { - switch o { - case CreateTable: - return "create-table" - case DeleteTable: - return "delete-table" - case Get: - return "read" - case Set: - return "write" - case Delete: - return "delete" - case CmpAndSwap: - return "compare-and-swap" - case CmpOrRollback: - return "compare-and-rollback" - default: - return fmt.Sprintf("unknown(%d)", o) - } -} - -// Tx represents a transaction and it's list of multiple TxEntry. Each TxEntry -// represents a read or write operation on the database. -type Tx struct { - Operations []*TxEntry -} - -// CreateTable adds a new create query to the transaction. -func (tx *Tx) CreateTable(bucket []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Cmd: CreateTable, - }) -} - -// DeleteTable adds a new create query to the transaction. -func (tx *Tx) DeleteTable(bucket []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Cmd: DeleteTable, - }) -} - -// Get adds a new read query to the transaction. -func (tx *Tx) Get(bucket, key []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Cmd: Get, - }) -} - -// Set adds a new write query to the transaction. -func (tx *Tx) Set(bucket, key, value []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Value: value, - Cmd: Set, - }) -} - -// Del adds a new delete query to the transaction. -func (tx *Tx) Del(bucket, key []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Cmd: Delete, - }) -} - -// Cas adds a new compare-and-swap query to the transaction. -func (tx *Tx) Cas(bucket, key, value []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Value: value, - Cmd: CmpAndSwap, - }) -} - -// Cmp adds a new compare-or-rollback query to the transaction. -func (tx *Tx) Cmp(bucket, key, value []byte) { - tx.Operations = append(tx.Operations, &TxEntry{ - Bucket: bucket, - Key: key, - Value: value, - Cmd: CmpOrRollback, - }) -} - -// TxEntry is the base elements for the transactions, a TxEntry is a read or -// write operation on the database. -type TxEntry struct { - Bucket []byte - Key []byte - Value []byte - CmpValue []byte - // Where the result of Get or CmpAndSwap txns is stored. - Result []byte - Cmd TxCmd - Swapped bool -} - -// Entry is the return value for list commands. -type Entry struct { - Bucket []byte - Key []byte - Value []byte -} diff --git a/vendor/github.com/smallstep/nosql/mysql/mysql.go b/vendor/github.com/smallstep/nosql/mysql/mysql.go deleted file mode 100644 index 6acd9510..00000000 --- a/vendor/github.com/smallstep/nosql/mysql/mysql.go +++ /dev/null @@ -1,265 +0,0 @@ -package mysql - -import ( - "bytes" - "database/sql" - "fmt" - "strings" - - // import mysql driver anonymously (just run the init) - _ "github.com/go-sql-driver/mysql" - "github.com/pkg/errors" - "github.com/smallstep/nosql/database" -) - -// DB is a wrapper over *sql.DB, -type DB struct { - db *sql.DB -} - -// Open creates a Driver and connects to the database with the given address -// and access details. -func (db *DB) Open(dataSourceName string, opt ...database.Option) error { - opts := &database.Options{} - for _, o := range opt { - if err := o(opts); err != nil { - return err - } - } - - var err error - _db, err := sql.Open("mysql", dataSourceName) - if err != nil { - return errors.Wrap(err, "error connecting to mysql") - } - _, err = _db.Exec(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", opts.Database)) - if err != nil { - return errors.Wrapf(err, "error creating database %s (if not exists)", opts.Database) - } - db.db, err = sql.Open("mysql", dataSourceName+opts.Database) - if err != nil { - return errors.Wrapf(err, "error connecting to mysql database") - } - - return nil -} - -// Close shutsdown the database driver. -func (db *DB) Close() error { - return errors.WithStack(db.db.Close()) -} - -func getQry(bucket []byte) string { - return fmt.Sprintf("SELECT nvalue FROM `%s` WHERE nkey = ?", bucket) -} - -func insertUpdateQry(bucket []byte) string { - return fmt.Sprintf("INSERT INTO `%s`(nkey, nvalue) VALUES(?,?) ON DUPLICATE KEY UPDATE nvalue = ?", bucket) -} - -func delQry(bucket []byte) string { - return fmt.Sprintf("DELETE FROM `%s` WHERE nkey = ?", bucket) -} - -func createTableQry(bucket []byte) string { - return fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%s`(nkey VARBINARY(255), nvalue BLOB, PRIMARY KEY (nkey));", bucket) -} - -func deleteTableQry(bucket []byte) string { - return fmt.Sprintf("DROP TABLE `%s`", bucket) -} - -// Get retrieves the column/row with given key. -func (db *DB) Get(bucket, key []byte) ([]byte, error) { - var val string - err := db.db.QueryRow(getQry(bucket), key).Scan(&val) - switch { - case err == sql.ErrNoRows: - return nil, errors.Wrapf(database.ErrNotFound, "%s/%s not found", bucket, key) - case err != nil: - return nil, errors.Wrapf(err, "failed to get %s/%s", bucket, key) - default: - return []byte(val), nil - } -} - -// Set inserts the key and value into the given bucket(column). -func (db *DB) Set(bucket, key, value []byte) error { - _, err := db.db.Exec(insertUpdateQry(bucket), key, value, value) - if err != nil { - return errors.Wrapf(err, "failed to set %s/%s", bucket, key) - } - return nil -} - -// Del deletes a row from the database. -func (db *DB) Del(bucket, key []byte) error { - _, err := db.db.Exec(delQry(bucket), key) - return errors.Wrapf(err, "failed to delete %s/%s", bucket, key) -} - -// List returns the full list of entries in a column. -func (db *DB) List(bucket []byte) ([]*database.Entry, error) { - rows, err := db.db.Query(fmt.Sprintf("SELECT * FROM `%s`", bucket)) - if err != nil { - estr := err.Error() - if strings.HasPrefix(estr, "Error 1146:") { - return nil, errors.Wrapf(database.ErrNotFound, estr) - } - return nil, errors.Wrapf(err, "error querying table %s", bucket) - } - defer rows.Close() - var ( - key, value string - entries []*database.Entry - ) - for rows.Next() { - err := rows.Scan(&key, &value) - if err != nil { - return nil, errors.Wrap(err, "error getting key and value from row") - } - entries = append(entries, &database.Entry{ - Bucket: bucket, - Key: []byte(key), - Value: []byte(value), - }) - } - err = rows.Err() - if err != nil { - return nil, errors.Wrap(err, "error accessing row") - } - return entries, nil -} - -// CmpAndSwap modifies the value at the given bucket and key (to newValue) -// only if the existing (current) value matches oldValue. -func (db *DB) CmpAndSwap(bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - sqlTx, err := db.db.Begin() - if err != nil { - return nil, false, errors.WithStack(err) - } - - val, swapped, err := cmpAndSwap(sqlTx, bucket, key, oldValue, newValue) - switch { - case err != nil: - if err := sqlTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to execute CmpAndSwap transaction on %s/%s and failed to rollback transaction", bucket, key) - } - return nil, false, err - case swapped: - if err := sqlTx.Commit(); err != nil { - return nil, false, errors.Wrapf(err, "failed to commit badger transaction") - } - return val, swapped, nil - default: - if err := sqlTx.Rollback(); err != nil { - return nil, false, errors.Wrapf(err, "failed to rollback read-only CmpAndSwap transaction on %s/%s", bucket, key) - } - return val, swapped, err - } -} - -func cmpAndSwap(sqlTx *sql.Tx, bucket, key, oldValue, newValue []byte) ([]byte, bool, error) { - var current []byte - err := sqlTx.QueryRow(getQry(bucket), key).Scan(¤t) - - if err != nil && err != sql.ErrNoRows { - return nil, false, err - } - if !bytes.Equal(current, oldValue) { - return current, false, nil - } - - if _, err = sqlTx.Exec(insertUpdateQry(bucket), key, newValue, newValue); err != nil { - return nil, false, errors.Wrapf(err, "failed to set %s/%s", bucket, key) - } - return newValue, true, nil -} - -// Update performs multiple commands on one read-write transaction. -func (db *DB) Update(tx *database.Tx) error { - sqlTx, err := db.db.Begin() - if err != nil { - return errors.WithStack(err) - } - rollback := func(err error) error { - if rollbackErr := sqlTx.Rollback(); rollbackErr != nil { - return errors.Wrap(err, "UPDATE failed, unable to rollback transaction") - } - return errors.Wrap(err, "UPDATE failed") - } - for _, q := range tx.Operations { - // create or delete buckets - switch q.Cmd { - case database.CreateTable: - _, err := sqlTx.Exec(createTableQry(q.Bucket)) - if err != nil { - return rollback(errors.Wrapf(err, "failed to create table %s", q.Bucket)) - } - case database.DeleteTable: - _, err := sqlTx.Exec(deleteTableQry(q.Bucket)) - if err != nil { - estr := err.Error() - if strings.HasPrefix(err.Error(), "Error 1051:") { - return errors.Wrapf(database.ErrNotFound, estr) - } - return errors.Wrapf(err, "failed to delete table %s", q.Bucket) - } - case database.Get: - var val string - err := sqlTx.QueryRow(getQry(q.Bucket), q.Key).Scan(&val) - switch { - case err == sql.ErrNoRows: - return rollback(errors.Wrapf(database.ErrNotFound, "%s/%s not found", q.Bucket, q.Key)) - case err != nil: - return rollback(errors.Wrapf(err, "failed to get %s/%s", q.Bucket, q.Key)) - default: - q.Result = []byte(val) - } - case database.Set: - if _, err = sqlTx.Exec(insertUpdateQry(q.Bucket), q.Key, q.Value, q.Value); err != nil { - return rollback(errors.Wrapf(err, "failed to set %s/%s", q.Bucket, q.Key)) - } - case database.Delete: - if _, err = sqlTx.Exec(delQry(q.Bucket), q.Key); err != nil { - return rollback(errors.Wrapf(err, "failed to delete %s/%s", q.Bucket, q.Key)) - } - case database.CmpAndSwap: - q.Result, q.Swapped, err = cmpAndSwap(sqlTx, q.Bucket, q.Key, q.CmpValue, q.Value) - if err != nil { - return rollback(errors.Wrapf(err, "failed to load-or-store %s/%s", q.Bucket, q.Key)) - } - case database.CmpOrRollback: - return database.ErrOpNotSupported - default: - return database.ErrOpNotSupported - } - } - - if err = errors.WithStack(sqlTx.Commit()); err != nil { - return rollback(err) - } - return nil -} - -// CreateTable creates a table in the database. -func (db *DB) CreateTable(bucket []byte) error { - _, err := db.db.Exec(createTableQry(bucket)) - if err != nil { - return errors.Wrapf(err, "failed to create table %s", bucket) - } - return nil -} - -// DeleteTable deletes a table in the database. -func (db *DB) DeleteTable(bucket []byte) error { - _, err := db.db.Exec(deleteTableQry(bucket)) - if err != nil { - estr := err.Error() - if strings.HasPrefix(err.Error(), "Error 1051:") { - return errors.Wrapf(database.ErrNotFound, estr) - } - return errors.Wrapf(err, "failed to delete table %s", bucket) - } - return nil -} diff --git a/vendor/github.com/smallstep/nosql/nosql.go b/vendor/github.com/smallstep/nosql/nosql.go deleted file mode 100644 index a740b0b9..00000000 --- a/vendor/github.com/smallstep/nosql/nosql.go +++ /dev/null @@ -1,69 +0,0 @@ -package nosql - -import ( - "strings" - - "github.com/pkg/errors" - badgerV1 "github.com/smallstep/nosql/badger/v1" - badgerV2 "github.com/smallstep/nosql/badger/v2" - "github.com/smallstep/nosql/bolt" - "github.com/smallstep/nosql/database" - "github.com/smallstep/nosql/mysql" -) - -// Option is just a wrapper over database.Option. -type Option = database.Option - -// DB is just a wrapper over database.DB. -type DB = database.DB - -var ( - // WithValueDir is a wrapper over database.WithValueDir. - WithValueDir = database.WithValueDir - // WithDatabase is a wrapper over database.WithDatabase. - WithDatabase = database.WithDatabase - // WithBadgerFileLoadingMode is a wrapper over database.WithBadgerFileLoadingMode. - WithBadgerFileLoadingMode = database.WithBadgerFileLoadingMode - // IsErrNotFound is a wrapper over database.IsErrNotFound. - IsErrNotFound = database.IsErrNotFound - // IsErrOpNotSupported is a wrapper over database.IsErrOpNotSupported. - IsErrOpNotSupported = database.IsErrOpNotSupported - - // Available db driver types. // - - // BadgerDriver indicates the default Badger database - currently Badger V1. - BadgerDriver = "badger" - // BadgerV1Driver explicitly selects the Badger V1 driver. - BadgerV1Driver = "badgerv1" - // BadgerV2Driver explicitly selects the Badger V2 driver. - BadgerV2Driver = "badgerv2" - // BBoltDriver indicates the default BBolt database. - BBoltDriver = "bbolt" - // MySQLDriver indicates the default MySQL database. - MySQLDriver = "mysql" - - // Badger FileLoadingMode - - // BadgerMemoryMap indicates the MemoryMap FileLoadingMode option. - BadgerMemoryMap = database.BadgerMemoryMap - // BadgerFileIO indicates the FileIO FileLoadingMode option. - BadgerFileIO = database.BadgerFileIO -) - -// New returns a database with the given driver. -func New(driver, dataSourceName string, opt ...Option) (db database.DB, err error) { - switch strings.ToLower(driver) { - case BadgerDriver, BadgerV1Driver: - db = &badgerV1.DB{} - case BadgerV2Driver: - db = &badgerV2.DB{} - case BBoltDriver: - db = &bolt.DB{} - case MySQLDriver: - db = &mysql.DB{} - default: - return nil, errors.Errorf("%s database not supported", driver) - } - err = db.Open(dataSourceName, opt...) - return -} diff --git a/vendor/github.com/smallstep/truststore/.gitignore b/vendor/github.com/smallstep/truststore/.gitignore deleted file mode 100644 index 930d57c5..00000000 --- a/vendor/github.com/smallstep/truststore/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -# Binaries for programs and plugins -/bin/truststore -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Others -*.swp -.travis-releases -coverage.txt -output -vendor -step diff --git a/vendor/github.com/smallstep/truststore/LICENSE b/vendor/github.com/smallstep/truststore/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/smallstep/truststore/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/smallstep/truststore/Makefile b/vendor/github.com/smallstep/truststore/Makefile deleted file mode 100644 index cb7f82ad..00000000 --- a/vendor/github.com/smallstep/truststore/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - go build -o bin/truststore cmd/truststore/main.go - -clean: - rm -rf bin - -.PHONY: all clean diff --git a/vendor/github.com/smallstep/truststore/README.md b/vendor/github.com/smallstep/truststore/README.md deleted file mode 100644 index a95fb30c..00000000 --- a/vendor/github.com/smallstep/truststore/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# truststore - -[![GoDoc](https://godoc.org/github.com/smallstep/truststore?status.svg)](https://godoc.org/github.com/smallstep/truststore) -[![Go Report Card](https://goreportcard.com/badge/github.com/smallstep/truststore)](https://goreportcard.com/report/github.com/smallstep/truststore) - -Package to locally install development certificates. - -Based on https://github.com/FiloSottile/mkcert diff --git a/vendor/github.com/smallstep/truststore/errors.go b/vendor/github.com/smallstep/truststore/errors.go deleted file mode 100644 index 13741bae..00000000 --- a/vendor/github.com/smallstep/truststore/errors.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2018 The truststore Authors. All rights reserved. - -package truststore - -import ( - "errors" - "fmt" - "os/exec" - "path/filepath" -) - -var ( - // ErrNotSupported is the error to indicate that the install of the - // certificate is not supported on the system. - ErrNotSupported = errors.New("install is not supported on this system") - - // ErrNotFound is the error to indicate that a cert was not found. - ErrNotFound = errors.New("no certs found") - - // ErrInvalidCertificate is the error to indicate that a cert contains bad data. - ErrInvalidCertificate = errors.New("invalid PEM data") - - // ErrTrustExists is the error returned when a trust already exists. - ErrTrustExists = errors.New("trust already exists") - - // ErrTrustNotFound is the error returned when a trust does not exists. - ErrTrustNotFound = errors.New("trust does not exists") - - // ErrTrustNotSupported is the error returned when a trust is not supported. - ErrTrustNotSupported = errors.New("trust not supported") -) - -// CmdError is the error used when an executable fails. -type CmdError struct { - err error - cmd *exec.Cmd - out []byte -} - -// NewCmdError creates a new CmdError. -func NewCmdError(err error, cmd *exec.Cmd, out []byte) *CmdError { - return &CmdError{ - err: err, - cmd: cmd, - - out: out, - } -} - -// Error implements the error interface. -func (e *CmdError) Error() string { - name := filepath.Base(e.cmd.Path) - return fmt.Sprintf("failed to execute %s: %v", name, e.err) -} - -// Err returns the internal error. -func (e *CmdError) Err() error { - return e.err -} - -// Cmd returns the command executed. -func (e *CmdError) Cmd() *exec.Cmd { - return e.cmd -} - -// Out returns the output of the command. -func (e *CmdError) Out() []byte { - return e.out -} - -func wrapError(err error, msg string) error { - if err == nil { - return nil - } - return fmt.Errorf("%s: %s", msg, err) -} diff --git a/vendor/github.com/smallstep/truststore/truststore.go b/vendor/github.com/smallstep/truststore/truststore.go deleted file mode 100644 index 714af038..00000000 --- a/vendor/github.com/smallstep/truststore/truststore.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (c) 2018 The truststore Authors. All rights reserved. - -package truststore - -import ( - "bytes" - "crypto/x509" - "encoding/pem" - "io" - "io/ioutil" - "log" - "os" -) - -var prefix = "" -var enableDebug bool - -func debug(format string, args ...interface{}) { - if enableDebug { - log.Printf(format, args...) - } -} - -// Trust is the interface that non-system trustores implement to add and remove -// a certificate on its trustore. Right now we there are two implementations of -// trust NSS (Firefox) and Java. -type Trust interface { - Name() string - Install(filename string, cert *x509.Certificate) error - Uninstall(filename string, cert *x509.Certificate) error - Exists(cert *x509.Certificate) bool - PreCheck() error -} - -// Install installs the given certificate into the system truststore, and -// optionally to the Firefox and Java trustores. -func Install(cert *x509.Certificate, opts ...Option) error { - filename, fn, err := saveTempCert(cert) - defer fn() - if err != nil { - return err - } - return installCertificate(filename, cert, opts) -} - -// InstallFile will read the certificate in the given file and install it to the -// system truststore, and optionally to the Firefox and Java truststores. -func InstallFile(filename string, opts ...Option) error { - cert, err := ReadCertificate(filename) - if err != nil { - return err - } - return installCertificate(filename, cert, opts) -} - -func installCertificate(filename string, cert *x509.Certificate, opts []Option) error { - o := newOptions(opts) - - for _, t := range o.trusts { - if err := t.PreCheck(); err != nil { - debug(err.Error()) - continue - } - if !t.Exists(cert) { - if err := t.Install(filename, cert); err != nil { - return err - } - } - } - - if o.withNoSystem { - return nil - } - - return installPlatform(filename, cert) -} - -// Uninstall removes the given certificate from the system truststore, and -// optionally from the Firefox and Java truststres. -func Uninstall(cert *x509.Certificate, opts ...Option) error { - filename, fn, err := saveTempCert(cert) - defer fn() - if err != nil { - return err - } - return uninstallCertificate(filename, cert, opts) -} - -// UninstallFile reads the certificate in the given file and removes it from the -// system truststore, and optionally to the Firefox and Java truststores. -func UninstallFile(filename string, opts ...Option) error { - cert, err := ReadCertificate(filename) - if err != nil { - return err - } - return uninstallCertificate(filename, cert, opts) -} - -func uninstallCertificate(filename string, cert *x509.Certificate, opts []Option) error { - o := newOptions(opts) - - for _, t := range o.trusts { - if err := t.PreCheck(); err != nil { - debug(err.Error()) - continue - } - if err := t.Uninstall(filename, cert); err != nil { - return err - } - } - - if o.withNoSystem { - return nil - } - - return uninstallPlatform(filename, cert) -} - -// ReadCertificate reads a certificate file and returns a x509.Certificate struct. -func ReadCertificate(filename string) (*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - // PEM format - if bytes.HasPrefix(b, []byte("-----BEGIN ")) { - b, err = ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - block, _ := pem.Decode(b) - if block == nil || block.Type != "CERTIFICATE" { - return nil, ErrInvalidCertificate - } - b = block.Bytes - } - - // DER format (binary) - crt, err := x509.ParseCertificate(b) - return crt, wrapError(err, "error parsing "+filename) -} - -// SaveCertificate saves the given x509.Certificate with the given filename. -func SaveCertificate(filename string, cert *x509.Certificate) error { - block := &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - } - return ioutil.WriteFile(filename, pem.EncodeToMemory(block), 0600) -} - -type options struct { - withNoSystem bool - trusts map[string]Trust -} - -func newOptions(opts []Option) *options { - o := &options{ - trusts: make(map[string]Trust), - } - - for _, fn := range opts { - fn(o) - } - return o -} - -// Option is the type used to pass custom options. -type Option func(*options) - -// WithTrust enables the given trust. -func WithTrust(t Trust) Option { - return func(o *options) { - o.trusts[t.Name()] = t - } -} - -// WithJava enables the install or uninstall of a certificate in the Java -// truststore. -func WithJava() Option { - t, _ := NewJavaTrust() - return WithTrust(t) -} - -// WithFirefox enables the install or uninstall of a certificate in the Firefox -// truststore. -func WithFirefox() Option { - t, _ := NewNSSTrust() - return WithTrust(t) -} - -// WithNoSystem disables the install or uninstall of a certificate in the system -// truststore. -func WithNoSystem() Option { - return func(o *options) { - o.withNoSystem = true - } -} - -// WithDebug enables debug logging messages. -func WithDebug() Option { - return func(o *options) { - enableDebug = true - } -} - -// WithPrefix sets a custom prefix for the truststore name. -func WithPrefix(s string) Option { - return func(o *options) { - prefix = s - } -} - -func uniqueName(cert *x509.Certificate) string { - switch { - case prefix != "": - return prefix + cert.SerialNumber.String() - case cert.Subject.CommonName != "": - return cert.Subject.CommonName + " " + cert.SerialNumber.String() - default: - return "Truststore Development CA " + cert.SerialNumber.String() - } -} - -func saveTempCert(cert *x509.Certificate) (string, func(), error) { - f, err := ioutil.TempFile(os.TempDir(), "truststore.*.pem") - if err != nil { - return "", func() {}, err - } - name := f.Name() - clean := func() { - os.Remove(name) - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }) - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return name, clean, err -} diff --git a/vendor/github.com/smallstep/truststore/truststore_darwin.go b/vendor/github.com/smallstep/truststore/truststore_darwin.go deleted file mode 100644 index bdc1f6c2..00000000 --- a/vendor/github.com/smallstep/truststore/truststore_darwin.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2018 The truststore Authors. All rights reserved. -// Copyright (c) 2018 The mkcert Authors. All rights reserved. - -package truststore - -import ( - "bytes" - "crypto/x509" - "encoding/asn1" - "fmt" - "io/ioutil" - "os" - "os/exec" - - plist "howett.net/plist" -) - -var ( - // NSSProfile is the path of the Firefox profiles. - NSSProfile = os.Getenv("HOME") + "/Library/Application Support/Firefox/Profiles/*" - - // CertutilInstallHelp is the command to run on macOS to add NSS support. - CertutilInstallHelp = "brew install nss" -) - -// https://github.com/golang/go/issues/24652#issuecomment-399826583 -var trustSettings []interface{} -var _, _ = plist.Unmarshal(trustSettingsData, &trustSettings) -var trustSettingsData = []byte(` - - - kSecTrustSettingsPolicy - - KoZIhvdjZAED - - kSecTrustSettingsPolicyName - sslServer - kSecTrustSettingsResult - 1 - - - kSecTrustSettingsPolicy - - KoZIhvdjZAEC - - kSecTrustSettingsPolicyName - basicX509 - kSecTrustSettingsResult - 1 - - -`) - -func installPlatform(filename string, cert *x509.Certificate) error { - cmd := exec.Command("sudo", "security", "add-trusted-cert", "-d", "-k", "/Library/Keychains/System.keychain", filename) - out, err := cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - // Make trustSettings explicit, as older Go does not know the defaults. - // https://github.com/golang/go/issues/24652 - plistFile, err := ioutil.TempFile("", "trust-settings") - if err != nil { - return wrapError(err, "failed to create temp file") - } - defer os.Remove(plistFile.Name()) - - cmd = exec.Command("sudo", "security", "trust-settings-export", "-d", plistFile.Name()) - out, err = cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - plistData, err := ioutil.ReadFile(plistFile.Name()) - if err != nil { - return wrapError(err, "failed to read trust settings") - } - - var plistRoot map[string]interface{} - _, err = plist.Unmarshal(plistData, &plistRoot) - if err != nil { - return wrapError(err, "failed to parse trust settings") - } - if v, ok := plistRoot["trustVersion"].(uint64); v != 1 || !ok { - return fmt.Errorf("unsupported trust settings version: %v", plistRoot["trustVersion"]) - } - - trustList := plistRoot["trustList"].(map[string]interface{}) - rootSubjectASN1, _ := asn1.Marshal(cert.Subject.ToRDNSequence()) - for key := range trustList { - entry := trustList[key].(map[string]interface{}) - if _, ok := entry["issuerName"]; !ok { - continue - } - issuerName := entry["issuerName"].([]byte) - if !bytes.Equal(rootSubjectASN1, issuerName) { - continue - } - entry["trustSettings"] = trustSettings - break - } - - plistData, err = plist.MarshalIndent(plistRoot, plist.XMLFormat, "\t") - if err != nil { - return wrapError(err, "failed to serialize trust settings") - } - - err = ioutil.WriteFile(plistFile.Name(), plistData, 0600) - if err != nil { - return wrapError(err, "failed to write trust settings") - } - - cmd = exec.Command("sudo", "security", "trust-settings-import", "-d", plistFile.Name()) - out, err = cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - debug("certificate installed properly in macOS keychain") - return nil -} - -func uninstallPlatform(filename string, cert *x509.Certificate) error { - cmd := exec.Command("sudo", "security", "remove-trusted-cert", "-d", filename) - out, err := cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - debug("certificate uninstalled properly from macOS keychain") - return nil -} diff --git a/vendor/github.com/smallstep/truststore/truststore_java.go b/vendor/github.com/smallstep/truststore/truststore_java.go deleted file mode 100644 index d7bbea6f..00000000 --- a/vendor/github.com/smallstep/truststore/truststore_java.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) 2018 The truststore Authors. All rights reserved. -// Copyright (c) 2018 The mkcert Authors. All rights reserved. - -package truststore - -import ( - "bytes" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "encoding/hex" - "fmt" - "hash" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" -) - -// JavaStorePass is the default store password of the keystore. -var JavaStorePass = "changeit" - -// JavaTrust implements a Trust for the Java runtime. -type JavaTrust struct { - keytoolPath string - cacertsPath string -} - -// NewJavaTrust initializes a new JavaTrust if the environment has java installed. -func NewJavaTrust() (*JavaTrust, error) { - home := os.Getenv("JAVA_HOME") - if home == "" { - return nil, ErrTrustNotFound - } - - var keytoolPath, cacertsPath string - if runtime.GOOS == "windows" { - keytoolPath = filepath.Join(home, "bin", "keytool.exe") - } else { - keytoolPath = filepath.Join(home, "bin", "keytool") - } - - if _, err := os.Stat(keytoolPath); err != nil { - return nil, ErrTrustNotFound - } - - _, err := os.Stat(filepath.Join(home, "lib", "security", "cacerts")) - if err == nil { - cacertsPath = filepath.Join(home, "lib", "security", "cacerts") - } - - _, err = os.Stat(filepath.Join(home, "jre", "lib", "security", "cacerts")) - if err == nil { - cacertsPath = filepath.Join(home, "jre", "lib", "security", "cacerts") - } - - return &JavaTrust{ - keytoolPath: keytoolPath, - cacertsPath: cacertsPath, - }, nil -} - -// Name implement the Trust interface. -func (t *JavaTrust) Name() string { - return "java" -} - -// Install implements the Trust interface. -func (t *JavaTrust) Install(filename string, cert *x509.Certificate) error { - args := []string{ - "-importcert", "-noprompt", - "-keystore", t.cacertsPath, - "-storepass", JavaStorePass, - "-file", filename, - "-alias", uniqueName(cert), - } - - cmd := exec.Command(t.keytoolPath, args...) - if out, err := execKeytool(cmd); err != nil { - return NewCmdError(err, cmd, out) - } - - debug("certificate installed properly in Java keystore") - return nil -} - -// Uninstall implements the Trust interface. -func (t *JavaTrust) Uninstall(filename string, cert *x509.Certificate) error { - args := []string{ - "-delete", - "-alias", uniqueName(cert), - "-keystore", t.cacertsPath, - "-storepass", JavaStorePass, - } - - cmd := exec.Command(t.keytoolPath, args...) - out, err := execKeytool(cmd) - if bytes.Contains(out, []byte("does not exist")) { - return nil - } - if err != nil { - return NewCmdError(err, cmd, out) - } - - debug("certificate uninstalled properly from the Java keystore") - return nil -} - -// Exists implements the Trust interface. -func (t *JavaTrust) Exists(cert *x509.Certificate) bool { - if t == nil { - return false - } - - // exists returns true if the given x509.Certificate's fingerprint - // is in the keytool -list output - exists := func(c *x509.Certificate, h hash.Hash, keytoolOutput []byte) bool { - h.Write(c.Raw) - fp := strings.ToUpper(hex.EncodeToString(h.Sum(nil))) - return bytes.Contains(keytoolOutput, []byte(fp)) - } - - cmd := exec.Command(t.keytoolPath, "-list", "-keystore", t.cacertsPath, "-storepass", JavaStorePass) - keytoolOutput, err := cmd.CombinedOutput() - if err != nil { - debug("failed to execute \"keytool -list\": %s\n\n%s", err, keytoolOutput) - return false - } - - // keytool outputs SHA1 and SHA256 (Java 9+) certificates in uppercase hex - // with each octet pair delimitated by ":". Drop them from the keytool output - keytoolOutput = bytes.Replace(keytoolOutput, []byte(":"), nil, -1) - - // pre-Java 9 uses SHA1 fingerprints - s1, s256 := sha1.New(), sha256.New() - return exists(cert, s1, keytoolOutput) || exists(cert, s256, keytoolOutput) -} - -// PreCheck implements the Trust interface. -func (t *JavaTrust) PreCheck() error { - if t != nil { - return nil - } - return fmt.Errorf("define JAVA_HOME environment variable to use the Java trust") -} - -// execKeytool will execute a "keytool" command and if needed re-execute -// the command wrapped in 'sudo' to work around file permissions. -func execKeytool(cmd *exec.Cmd) ([]byte, error) { - out, err := cmd.CombinedOutput() - if err != nil && bytes.Contains(out, []byte("java.io.FileNotFoundException")) && runtime.GOOS != "windows" { - origArgs := cmd.Args[1:] - cmd = exec.Command("sudo", cmd.Path) - cmd.Args = append(cmd.Args, origArgs...) - cmd.Env = []string{ - "JAVA_HOME=" + os.Getenv("JAVA_HOME"), - } - out, err = cmd.CombinedOutput() - } - return out, err -} diff --git a/vendor/github.com/smallstep/truststore/truststore_linux.go b/vendor/github.com/smallstep/truststore/truststore_linux.go deleted file mode 100644 index 5110c2a4..00000000 --- a/vendor/github.com/smallstep/truststore/truststore_linux.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2018 The truststore Authors. All rights reserved. -// Copyright (c) 2018 The mkcert Authors. All rights reserved. - -package truststore - -import ( - "bytes" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - "os/exec" - "strings" -) - -var ( - // NSSProfile is the path of the Firefox profiles. - NSSProfile = os.Getenv("HOME") + "/.mozilla/firefox/*" - - // CertutilInstallHelp is the command to run on linux to add NSS support. - CertutilInstallHelp = `apt install libnss3-tools" or "yum install nss-tools` - - // SystemTrustFilename is the format used to name the root certificates. - SystemTrustFilename string - - // SystemTrustCommand is the command used to update the system truststore. - SystemTrustCommand []string -) - -func init() { - if pathExists("/etc/pki/ca-trust/source/anchors/") { - SystemTrustFilename = "/etc/pki/ca-trust/source/anchors/%s.pem" - SystemTrustCommand = []string{"update-ca-trust", "extract"} - } else if pathExists("/usr/local/share/ca-certificates/") { - SystemTrustFilename = "/usr/local/share/ca-certificates/%s.crt" - SystemTrustCommand = []string{"update-ca-certificates"} - } else if pathExists("/etc/ca-certificates/trust-source/anchors/") { - SystemTrustFilename = "/etc/ca-certificates/trust-source/anchors/%s.crt" - SystemTrustCommand = []string{"trust", "extract-compat"} - } - if SystemTrustCommand != nil { - _, err := exec.LookPath(SystemTrustCommand[0]) - if err != nil { - SystemTrustCommand = nil - } - } -} - -func pathExists(path string) bool { - _, err := os.Stat(path) - return err == nil -} - -func systemTrustFilename(cert *x509.Certificate) string { - return fmt.Sprintf(SystemTrustFilename, strings.Replace(uniqueName(cert), " ", "_", -1)) -} - -func installPlatform(filename string, cert *x509.Certificate) error { - if SystemTrustCommand == nil { - return ErrNotSupported - } - - data, err := ioutil.ReadFile(filename) - if err != nil { - return err - } - - cmd := CommandWithSudo("tee", systemTrustFilename(cert)) - cmd.Stdin = bytes.NewReader(data) - out, err := cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - cmd = CommandWithSudo(SystemTrustCommand...) - out, err = cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - debug("certificate installed properly in linux trusts") - return nil -} - -func uninstallPlatform(filename string, cert *x509.Certificate) error { - if SystemTrustCommand == nil { - return ErrNotSupported - } - - cmd := CommandWithSudo("rm", "-f", systemTrustFilename(cert)) - out, err := cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - cmd = CommandWithSudo(SystemTrustCommand...) - out, err = cmd.CombinedOutput() - if err != nil { - return NewCmdError(err, cmd, out) - } - - debug("certificate uninstalled properly from linux trusts") - return nil -} - -func CommandWithSudo(cmd ...string) *exec.Cmd { - if _, err := exec.LookPath("sudo"); err != nil { - return exec.Command(cmd[0], cmd[1:]...) - } - return exec.Command("sudo", append([]string{"--"}, cmd...)...) -} diff --git a/vendor/github.com/smallstep/truststore/truststore_nss.go b/vendor/github.com/smallstep/truststore/truststore_nss.go deleted file mode 100644 index 9bdcbbc8..00000000 --- a/vendor/github.com/smallstep/truststore/truststore_nss.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2018 The truststore Authors. All rights reserved. -// Copyright (c) 2018 The mkcert Authors. All rights reserved. - -package truststore - -import ( - "crypto/x509" - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" -) - -var nssDB = filepath.Join(os.Getenv("HOME"), ".pki/nssdb") - -// NSSTrust implements a Trust for Firefox or other NSS based applications. -type NSSTrust struct { - certutilPath string -} - -// NewNSSTrust creates a new NSSTrust. -func NewNSSTrust() (*NSSTrust, error) { - var err error - var certutilPath string - switch runtime.GOOS { - case "darwin": - certutilPath, err = exec.LookPath("certutil") - if err != nil { - cmd := exec.Command("brew", "--prefix", "nss") - out, err1 := cmd.Output() - if err1 != nil { - return nil, NewCmdError(err1, cmd, out) - } - certutilPath = filepath.Join(strings.TrimSpace(string(out)), "bin", "certutil") - if _, err = os.Stat(certutilPath); err != nil { - return nil, err - } - } - case "linux": - if certutilPath, err = exec.LookPath("certutil"); err != nil { - return nil, err - } - default: - return nil, ErrTrustNotSupported - } - - return &NSSTrust{ - certutilPath: certutilPath, - }, nil -} - -// Name implements the Trust interface. -func (t *NSSTrust) Name() string { - return "nss" -} - -// Install implements the Trust interface. -func (t *NSSTrust) Install(filename string, cert *x509.Certificate) error { - // install certificate in all profiles - if forEachNSSProfile(func(profile string) { - cmd := exec.Command(t.certutilPath, "-A", "-d", profile, "-t", "C,,", "-n", uniqueName(cert), "-i", filename) - out, err := cmd.CombinedOutput() - if err != nil { - debug("failed to execute \"certutil -A\": %s\n\n%s", err, out) - } - }) == 0 { - return fmt.Errorf("not NSS security databases found") - } - - // check for the cert in all profiles - if !t.Exists(cert) { - return fmt.Errorf("certificate cannot be installed in NSS security databases") - } - - debug("certificate installed properly in NSS security databases") - return nil -} - -// Uninstall implements the Trust interface. -func (t *NSSTrust) Uninstall(filename string, cert *x509.Certificate) (err error) { - forEachNSSProfile(func(profile string) { - if err != nil { - return - } - // skip if not found - if err := exec.Command(t.certutilPath, "-V", "-d", profile, "-u", "L", "-n", uniqueName(cert)).Run(); err != nil { - return - } - // delete certificate - cmd := exec.Command(t.certutilPath, "-D", "-d", profile, "-n", uniqueName(cert)) - out, err1 := cmd.CombinedOutput() - if err1 != nil { - err = NewCmdError(err1, cmd, out) - } - }) - if err == nil { - debug("certificate uninstalled properly from NSS security databases") - } - return -} - -// Exists implements the Trust interface. Exists checks if the certificate is -// already installed. -func (t *NSSTrust) Exists(cert *x509.Certificate) bool { - success := true - if forEachNSSProfile(func(profile string) { - err := exec.Command(t.certutilPath, "-V", "-d", profile, "-u", "L", "-n", uniqueName(cert)).Run() - if err != nil { - success = false - } - }) == 0 { - success = false - } - return success -} - -// PreCheck implements the Trust interface. -func (t *NSSTrust) PreCheck() error { - if t != nil { - if forEachNSSProfile(func(_ string) {}) == 0 { - return fmt.Errorf("not NSS security databases found") - } - return nil - } - - if CertutilInstallHelp == "" { - return fmt.Errorf("Note: NSS support is not available on your platform") - } - - return fmt.Errorf(`Warning: "certutil" is not available, install "certutil" with "%s" and try again`, CertutilInstallHelp) -} - -func forEachNSSProfile(f func(profile string)) (found int) { - profiles, _ := filepath.Glob(NSSProfile) - if _, err := os.Stat(nssDB); err == nil { - profiles = append(profiles, nssDB) - } - if len(profiles) == 0 { - return - } - for _, profile := range profiles { - if stat, err := os.Stat(profile); err != nil || !stat.IsDir() { - continue - } - if _, err := os.Stat(filepath.Join(profile, "cert9.db")); err == nil { - f("sql:" + profile) - found++ - continue - } - if _, err := os.Stat(filepath.Join(profile, "cert8.db")); err == nil { - f("dbm:" + profile) - found++ - } - } - return -} diff --git a/vendor/github.com/smallstep/truststore/truststore_others.go b/vendor/github.com/smallstep/truststore/truststore_others.go deleted file mode 100644 index b743fe9f..00000000 --- a/vendor/github.com/smallstep/truststore/truststore_others.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !linux,!darwin,!windows - -package truststore - -import "crypto/x509" - -var ( - // NSSProfile is the path of the Firefox profiles. - NSSProfile = "" - - // CertutilInstallHelp is the command to add NSS support. - CertutilInstallHelp = "" -) - -func installPlatform(filename string, cert *x509.Certificate) error { - return ErrTrustNotSupported -} - -func uninstallPlatform(filename string, cert *x509.Certificate) error { - return ErrTrustNotSupported -} diff --git a/vendor/github.com/smallstep/truststore/truststore_windows.go b/vendor/github.com/smallstep/truststore/truststore_windows.go deleted file mode 100644 index f3f158bc..00000000 --- a/vendor/github.com/smallstep/truststore/truststore_windows.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2018 The truststore Authors. All rights reserved. -// Copyright (c) 2018 The mkcert Authors. All rights reserved. - -package truststore - -import ( - "crypto/x509" - "fmt" - "math/big" - "os" - "syscall" - "unsafe" -) - -var ( - // NSSProfile is the path of the Firefox profiles. - NSSProfile = os.Getenv("USERPROFILE") + "\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles" - - // CertutilInstallHelp is the command to run on windows to add NSS support. - // Certutils is not supported on Windows. - CertutilInstallHelp = "" -) - -var ( - modcrypt32 = syscall.NewLazyDLL("crypt32.dll") - procCertAddEncodedCertificateToStore = modcrypt32.NewProc("CertAddEncodedCertificateToStore") - procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") - procCertDuplicateCertificateContext = modcrypt32.NewProc("CertDuplicateCertificateContext") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") -) - -func installPlatform(filename string, cert *x509.Certificate) error { - // Open root store - store, err := openWindowsRootStore() - if err != nil { - return wrapError(err, "open root store failed") - } - defer store.close() - - // Add cert - if err := store.addCert(cert.Raw); err != nil { - return wrapError(err, "add cert failed") - } - - debug("certificate installed properly in windows trusts") - return nil -} - -func uninstallPlatform(filename string, cert *x509.Certificate) error { - // We'll just remove all certs with the same serial number - // Open root store - store, err := openWindowsRootStore() - if err != nil { - return wrapError(err, "open root store failed") - } - defer store.close() - - // Do the deletion - deletedAny, err := store.deleteCertsWithSerial(cert.SerialNumber) - if err != nil { - return wrapError(err, "delete cert failed") - } - if !deletedAny { - return ErrNotFound - } - - debug("certificate uninstalled properly from windows trusts") - return nil -} - -type windowsRootStore uintptr - -func openWindowsRootStore() (windowsRootStore, error) { - store, _, err := procCertOpenSystemStoreW.Call(0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr("ROOT")))) - if store != 0 { - return windowsRootStore(store), nil - } - return 0, fmt.Errorf("cannot open windows root store: %v", err) -} - -func (w windowsRootStore) close() error { - ret, _, err := procCertCloseStore.Call(uintptr(w), 0) - if ret != 0 { - return nil - } - return fmt.Errorf("cannot close windows root store: %v", err) -} - -func (w windowsRootStore) addCert(cert []byte) error { - // TODO: ok to always overwrite? - ret, _, err := procCertAddEncodedCertificateToStore.Call( - uintptr(w), // HCERTSTORE hCertStore - uintptr(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING), // DWORD dwCertEncodingType - uintptr(unsafe.Pointer(&cert[0])), // const BYTE *pbCertEncoded - uintptr(len(cert)), // DWORD cbCertEncoded - 3, // DWORD dwAddDisposition (CERT_STORE_ADD_REPLACE_EXISTING is 3) - 0, // PCCERT_CONTEXT *ppCertContext - ) - if ret != 0 { - return nil - } - return fmt.Errorf("Failed adding cert: %v", err) -} - -func (w windowsRootStore) deleteCertsWithSerial(serial *big.Int) (bool, error) { - // Go over each, deleting the ones we find - var cert *syscall.CertContext - deletedAny := false - for { - // Next enum - certPtr, _, err := procCertEnumCertificatesInStore.Call(uintptr(w), uintptr(unsafe.Pointer(cert))) - if cert = (*syscall.CertContext)(unsafe.Pointer(certPtr)); cert == nil { - if errno, ok := err.(syscall.Errno); ok && errno == 0x80092004 { - break - } - return deletedAny, fmt.Errorf("Failed enumerating certs: %v", err) - } - // Parse cert - certBytes := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:cert.Length] - parsedCert, err := x509.ParseCertificate(certBytes) - // We'll just ignore parse failures for now - if err == nil && parsedCert.SerialNumber != nil && parsedCert.SerialNumber.Cmp(serial) == 0 { - // Duplicate the context so it doesn't stop the enum when we delete it - dupCertPtr, _, err := procCertDuplicateCertificateContext.Call(uintptr(unsafe.Pointer(cert))) - if dupCertPtr == 0 { - return deletedAny, fmt.Errorf("Failed duplicating context: %v", err) - } - if ret, _, err := procCertDeleteCertificateFromStore.Call(dupCertPtr); ret == 0 { - return deletedAny, fmt.Errorf("Failed deleting certificate: %v", err) - } - deletedAny = true - } - } - return deletedAny, nil -} diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore deleted file mode 100644 index 9c1d9861..00000000 --- a/vendor/github.com/spf13/afero/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -sftpfs/file1 -sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index e944f594..00000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -sudo: false -language: go -arch: - - amd64 - - ppc64e - -go: - - "1.14" - - "1.15" - - "1.16" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build -v ./... - - go test -count=1 -cover -race -v ./... - - go vet ./... - - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md deleted file mode 100644 index fb8eaaf8..00000000 --- a/vendor/github.com/spf13/afero/README.md +++ /dev/null @@ -1,430 +0,0 @@ -![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) - -A FileSystem Abstraction System for Go - -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -# Overview - -Afero is a filesystem framework providing a simple, uniform and universal API -interacting with any filesystem, as an abstraction layer providing interfaces, -types and methods. Afero has an exceptionally clean interface and simple design -without needless constructors or initialization methods. - -Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power -and benefit of the os and ioutil packages. - -Afero provides significant improvements over using the os package alone, most -notably the ability to create mock and testing filesystems without relying on the disk. - -It is suitable for use in any situation where you would consider using the OS -package as it provides an additional abstraction that makes it easy to use a -memory backed file system during testing. It also adds support for the http -filesystem for full interoperability. - - -## Afero Features - -* A single consistent API for accessing a variety of filesystems -* Interoperation between a variety of file system types -* A set of interfaces to encourage and enforce interoperability between backends -* An atomic cross platform memory backed file system -* Support for compositional (union) file systems by combining multiple file systems acting as one -* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) -* A set of utility functions ported from io, ioutil & hugo to be afero aware -* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` - -# Using Afero - -Afero is easy to use and easier to adopt. - -A few different ways you could use Afero: - -* Use the interfaces alone to define your own file system. -* Wrapper for the OS packages. -* Define different filesystems for different parts of your application. -* Use Afero for mock filesystems while testing - -## Step 1: Install Afero - -First use go get to install the latest version of the library. - - $ go get github.com/spf13/afero - -Next include Afero in your application. -```go -import "github.com/spf13/afero" -``` - -## Step 2: Declare a backend - -First define a package variable and set it to a pointer to a filesystem. -```go -var AppFs = afero.NewMemMapFs() - -or - -var AppFs = afero.NewOsFs() -``` -It is important to note that if you repeat the composite literal you -will be using a completely new and isolated filesystem. In the case of -OsFs it will still use the same underlying filesystem but will reduce -the ability to drop in other filesystems as desired. - -## Step 3: Use it like you would the OS package - -Throughout your application use any function and method like you normally -would. - -So if my application before had: -```go -os.Open('/tmp/foo') -``` -We would replace it with: -```go -AppFs.Open('/tmp/foo') -``` - -`AppFs` being the variable we defined above. - - -## List of all available functions - -File System Methods Available: -```go -Chmod(name string, mode os.FileMode) : error -Chown(name string, uid, gid int) : error -Chtimes(name string, atime time.Time, mtime time.Time) : error -Create(name string) : File, error -Mkdir(name string, perm os.FileMode) : error -MkdirAll(path string, perm os.FileMode) : error -Name() : string -Open(name string) : File, error -OpenFile(name string, flag int, perm os.FileMode) : File, error -Remove(name string) : error -RemoveAll(path string) : error -Rename(oldname, newname string) : error -Stat(name string) : os.FileInfo, error -``` -File Interfaces and Methods Available: -```go -io.Closer -io.Reader -io.ReaderAt -io.Seeker -io.Writer -io.WriterAt - -Name() : string -Readdir(count int) : []os.FileInfo, error -Readdirnames(n int) : []string, error -Stat() : os.FileInfo, error -Sync() : error -Truncate(size int64) : error -WriteString(s string) : ret int, err error -``` -In some applications it may make sense to define a new package that -simply exports the file system variable for easy access from anywhere. - -## Using Afero's utility functions - -Afero provides a set of functions to make it easier to use the underlying file systems. -These functions have been primarily ported from io & ioutil with some developed for Hugo. - -The afero utilities support all afero compatible backends. - -The list of utilities includes: - -```go -DirExists(path string) (bool, error) -Exists(path string) (bool, error) -FileContainsBytes(filename string, subslice []byte) (bool, error) -GetTempDir(subPath string) string -IsDir(path string) (bool, error) -IsEmpty(path string) (bool, error) -ReadDir(dirname string) ([]os.FileInfo, error) -ReadFile(filename string) ([]byte, error) -SafeWriteReader(path string, r io.Reader) (err error) -TempDir(dir, prefix string) (name string, err error) -TempFile(dir, prefix string) (f File, err error) -Walk(root string, walkFn filepath.WalkFunc) error -WriteFile(filename string, data []byte, perm os.FileMode) error -WriteReader(path string, r io.Reader) (err error) -``` -For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) - -They are available under two different approaches to use. You can either call -them directly where the first parameter of each function will be the file -system, or you can declare a new `Afero`, a custom type used to bind these -functions as methods to a given filesystem. - -### Calling utilities directly - -```go -fs := new(afero.MemMapFs) -f, err := afero.TempFile(fs,"", "ioutil-test") - -``` - -### Calling via Afero - -```go -fs := afero.NewMemMapFs() -afs := &afero.Afero{Fs: fs} -f, err := afs.TempFile("", "ioutil-test") -``` - -## Using Afero for Testing - -There is a large benefit to using a mock filesystem for testing. It has a -completely blank state every time it is initialized and can be easily -reproducible regardless of OS. You could create files to your heart’s content -and the file access would be fast while also saving you from all the annoying -issues with deleting temporary files, Windows file locking, etc. The MemMapFs -backend is perfect for testing. - -* Much faster than performing I/O operations on disk -* Avoid security issues and permissions -* Far more control. 'rm -rf /' with confidence -* Test setup is far more easier to do -* No test cleanup needed - -One way to accomplish this is to define a variable as mentioned above. -In your application this will be set to afero.NewOsFs() during testing you -can set it to afero.NewMemMapFs(). - -It wouldn't be uncommon to have each test initialize a blank slate memory -backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere -appropriate in my application code. This approach ensures that Tests are order -independent, with no test relying on the state left by an earlier test. - -Then in my tests I would initialize a new MemMapFs for each test: -```go -func TestExist(t *testing.T) { - appFS := afero.NewMemMapFs() - // create test files and directories - appFS.MkdirAll("src/a", 0755) - afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) - afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - name := "src/c" - _, err := appFS.Stat(name) - if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) - } -} -``` - -# Available Backends - -## Operating System Native - -### OsFs - -The first is simply a wrapper around the native OS calls. This makes it -very easy to use as all of the calls are the same as the existing OS -calls. It also makes it trivial to have your code use the OS during -operation and a mock filesystem during testing or as needed. - -```go -appfs := afero.NewOsFs() -appfs.MkdirAll("src/a", 0755) -``` - -## Memory Backed Storage - -### MemMapFs - -Afero also provides a fully atomic memory backed filesystem perfect for use in -mocking and to speed up unnecessary disk io when persistence isn’t -necessary. It is fully concurrent and will work within go routines -safely. - -```go -mm := afero.NewMemMapFs() -mm.MkdirAll("src/a", 0755) -``` - -#### InMemoryFile - -As part of MemMapFs, Afero also provides an atomic, fully concurrent memory -backed file implementation. This can be used in other memory backed file -systems with ease. Plans are to add a radix tree memory stored file -system using InMemoryFile. - -## Network Interfaces - -### SftpFs - -Afero has experimental support for secure file transfer protocol (sftp). Which can -be used to perform file operations over a encrypted channel. - -## Filtering Backends - -### BasePathFs - -The BasePathFs restricts all operations to a given path within an Fs. -The given file name to the operations on this Fs will be prepended with -the base path before calling the source Fs. - -```go -bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") -``` - -### ReadOnlyFs - -A thin wrapper around the source Fs providing a read only view. - -```go -fs := afero.NewReadOnlyFs(afero.NewOsFs()) -_, err := fs.Create("/file.txt") -// err = syscall.EPERM -``` - -# RegexpFs - -A filtered view on file names, any file NOT matching -the passed regexp will be treated as non-existing. -Files not matching the regexp provided will not be created. -Directories are not filtered. - -```go -fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) -_, err := fs.Create("/file.html") -// err = syscall.ENOENT -``` - -### HttpFs - -Afero provides an http compatible backend which can wrap any of the existing -backends. - -The Http package requires a slightly specific version of Open which -returns an http.File type. - -Afero provides an httpFs file system which satisfies this requirement. -Any Afero FileSystem can be used as an httpFs. - -```go -httpFs := afero.NewHttpFs() -fileserver := http.FileServer(httpFs.Dir()) -http.Handle("/", fileserver) -``` - -## Composite Backends - -Afero provides the ability have two filesystems (or more) act as a single -file system. - -### CacheOnReadFs - -The CacheOnReadFs will lazily make copies of any accessed files from the base -layer into the overlay. Subsequent reads will be pulled from the overlay -directly permitting the request is within the cache duration of when it was -created in the overlay. - -If the base filesystem is writeable, any changes to files will be -done first to the base, then to the overlay layer. Write calls to open file -handles like `Write()` or `Truncate()` to the overlay first. - -To writing files to the overlay only, you can use the overlay Fs directly (not -via the union Fs). - -Cache files in the layer for the given time.Duration, a cache duration of 0 -means "forever" meaning the file will not be re-requested from the base ever. - -A read-only base will make the overlay also read-only but still copy files -from the base to the overlay when they're not present (or outdated) in the -caching layer. - -```go -base := afero.NewOsFs() -layer := afero.NewMemMapFs() -ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) -``` - -### CopyOnWriteFs() - -The CopyOnWriteFs is a read only base file system with a potentially -writeable layer on top. - -Read operations will first look in the overlay and if not found there, will -serve the file from the base. - -Changes to the file system will only be made in the overlay. - -Any attempt to modify a file found only in the base will copy the file to the -overlay layer before modification (including opening a file with a writable -handle). - -Removing and Renaming files present only in the base layer is not currently -permitted. If a file is present in the base layer and the overlay, only the -overlay will be removed/renamed. - -```go - base := afero.NewOsFs() - roBase := afero.NewReadOnlyFs(base) - ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) - - fh, _ = ufs.Create("/home/test/file2.txt") - fh.WriteString("This is a test") - fh.Close() -``` - -In this example all write operations will only occur in memory (MemMapFs) -leaving the base filesystem (OsFs) untouched. - - -## Desired/possible backends - -The following is a short list of possible backends we hope someone will -implement: - -* SSH -* S3 - -# About the project - -## What's in the name - -Afero comes from the latin roots Ad-Facere. - -**"Ad"** is a prefix meaning "to". - -**"Facere"** is a form of the root "faciÅ" making "make or do". - -The literal meaning of afero is "to make" or "to do" which seems very fitting -for a library that allows one to make files and directories and do things with them. - -The English word that shares the same roots as Afero is "affair". Affair shares -the same concept but as a noun it means "something that is made or done" or "an -object of a particular type". - -It's also nice that unlike some of my other libraries (hugo, cobra, viper) it -Googles very well. - -## Release Notes - -See the [Releases Page](https://github.com/spf13/afero/releases). - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) -* [jaqx0r](https://github.com/jaqx0r) -* [mbertschler](https://github.com/mbertschler) -* [xor-gate](https://github.com/xor-gate) - -## License - -Afero is released under the Apache 2.0 license. See -[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index 469ff7d2..00000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - // Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - // Chown changes the uid and gid of the named file. - Chown(name string, uid, gid int) error - - //Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml deleted file mode 100644 index 5d2f34bf..00000000 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ /dev/null @@ -1,15 +0,0 @@ -version: '{build}' -clone_folder: C:\gopath\src\github.com\spf13\afero -environment: - GOPATH: C:\gopath -build_script: -- cmd: >- - go version - - go env - - go get -v github.com/spf13/afero/... - - go build -v github.com/spf13/afero/... -test_script: -- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 4f983282..00000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,211 +0,0 @@ -package afero - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var _ Lstater = (*BasePathFs)(nil) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chown", Path: name, Err: err} - } - return b.source.Chown(name, uid, gid) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { - oldname, err := b.RealPath(oldname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - newname, err = b.RealPath(newname) - if err != nil { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} - } - if linker, ok := b.source.(Linker); ok { - return linker.SymlinkIfPossible(oldname, newname) - } - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { - name, err := b.RealPath(name) - if err != nil { - return "", &os.PathError{Op: "readlink", Path: name, Err: err} - } - if reader, ok := b.source.(LinkReader); ok { - return reader.ReadlinkIfPossible(name) - } - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 71471aa2..00000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,311 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chown(name, uid, gid) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chown(name, uid, gid) - } - if err != nil { - return err - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyToLayer(name); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 18b45824..00000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build aix darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 2b850e4d..00000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd -// +build !aix - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index 6ff8f309..00000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,326 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes(), Chmod() and Chown()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chown(name, uid, gid) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { - if slayer, ok := u.layer.(Linker); ok { - return slayer.SymlinkIfPossible(oldname, newname) - } - - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { - if rlayer, ok := u.layer.(LinkReader); ok { - return rlayer.ReadlinkIfPossible(name) - } - - if rbase, ok := u.base.(LinkReader); ok { - return rbase.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return ErrFileExists - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - // This is in line with how os.MkdirAll behaves. - return nil - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) -} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index 2b86e30d..00000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chown(name string, uid, gid int) error { - return h.source.Chown(name, uid, gid) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go deleted file mode 100644 index c8034553..00000000 --- a/vendor/github.com/spf13/afero/iofs.go +++ /dev/null @@ -1,288 +0,0 @@ -// +build go1.16 - -package afero - -import ( - "io" - "io/fs" - "os" - "path" - "time" -) - -// IOFS adopts afero.Fs to stdlib io/fs.FS -type IOFS struct { - Fs -} - -func NewIOFS(fs Fs) IOFS { - return IOFS{Fs: fs} -} - -var ( - _ fs.FS = IOFS{} - _ fs.GlobFS = IOFS{} - _ fs.ReadDirFS = IOFS{} - _ fs.ReadFileFS = IOFS{} - _ fs.StatFS = IOFS{} - _ fs.SubFS = IOFS{} -) - -func (iofs IOFS) Open(name string) (fs.File, error) { - const op = "open" - - // by convention for fs.FS implementations we should perform this check - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - file, err := iofs.Fs.Open(name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - // file should implement fs.ReadDirFile - if _, ok := file.(fs.ReadDirFile); !ok { - file = readDirFile{file} - } - - return file, nil -} - -func (iofs IOFS) Glob(pattern string) ([]string, error) { - const op = "glob" - - // afero.Glob does not perform this check but it's required for implementations - if _, err := path.Match(pattern, ""); err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - items, err := Glob(iofs.Fs, pattern) - if err != nil { - return nil, iofs.wrapError(op, pattern, err) - } - - return items, nil -} - -func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { - items, err := ReadDir(iofs.Fs, name) - if err != nil { - return nil, iofs.wrapError("readdir", name, err) - } - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = dirEntry{items[i]} - } - - return ret, nil -} - -func (iofs IOFS) ReadFile(name string) ([]byte, error) { - const op = "readfile" - - if !fs.ValidPath(name) { - return nil, iofs.wrapError(op, name, fs.ErrInvalid) - } - - bytes, err := ReadFile(iofs.Fs, name) - if err != nil { - return nil, iofs.wrapError(op, name, err) - } - - return bytes, nil -} - -func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } - -func (IOFS) wrapError(op, path string, err error) error { - if _, ok := err.(*fs.PathError); ok { - return err // don't need to wrap again - } - - return &fs.PathError{ - Op: op, - Path: path, - Err: err, - } -} - -// dirEntry provides adapter from os.FileInfo to fs.DirEntry -type dirEntry struct { - fs.FileInfo -} - -var _ fs.DirEntry = dirEntry{} - -func (d dirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } - -func (d dirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } - -// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open -type readDirFile struct { - File -} - -var _ fs.ReadDirFile = readDirFile{} - -func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { - items, err := r.File.Readdir(n) - if err != nil { - return nil, err - } - - ret := make([]fs.DirEntry, len(items)) - for i := range items { - ret[i] = dirEntry{items[i]} - } - - return ret, nil -} - -// FromIOFS adopts io/fs.FS to use it as afero.Fs -// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission -// To store modifications you may use afero.CopyOnWriteFs -type FromIOFS struct { - fs.FS -} - -var _ Fs = FromIOFS{} - -func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } - -func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } - -func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { - return notImplemented("mkdirall", path) -} - -func (f FromIOFS) Open(name string) (File, error) { - file, err := f.FS.Open(name) - if err != nil { - return nil, err - } - - return fromIOFSFile{File: file, name: name}, nil -} - -func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return f.Open(name) -} - -func (f FromIOFS) Remove(name string) error { - return notImplemented("remove", name) -} - -func (f FromIOFS) RemoveAll(path string) error { - return notImplemented("removeall", path) -} - -func (f FromIOFS) Rename(oldname, newname string) error { - return notImplemented("rename", oldname) -} - -func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } - -func (f FromIOFS) Name() string { return "fromiofs" } - -func (f FromIOFS) Chmod(name string, mode os.FileMode) error { - return notImplemented("chmod", name) -} - -func (f FromIOFS) Chown(name string, uid, gid int) error { - return notImplemented("chown", name) -} - -func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { - return notImplemented("chtimes", name) -} - -type fromIOFSFile struct { - fs.File - name string -} - -func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { - readerAt, ok := f.File.(io.ReaderAt) - if !ok { - return -1, notImplemented("readat", f.name) - } - - return readerAt.ReadAt(p, off) -} - -func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { - seeker, ok := f.File.(io.Seeker) - if !ok { - return -1, notImplemented("seek", f.name) - } - - return seeker.Seek(offset, whence) -} - -func (f fromIOFSFile) Write(p []byte) (n int, err error) { - return -1, notImplemented("write", f.name) -} - -func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { - return -1, notImplemented("writeat", f.name) -} - -func (f fromIOFSFile) Name() string { return f.name } - -func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(count) - if err != nil { - return nil, err - } - - ret := make([]os.FileInfo, len(entries)) - for i := range entries { - ret[i], err = entries[i].Info() - - if err != nil { - return nil, err - } - } - - return ret, nil -} - -func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { - rdfile, ok := f.File.(fs.ReadDirFile) - if !ok { - return nil, notImplemented("readdir", f.name) - } - - entries, err := rdfile.ReadDir(n) - if err != nil { - return nil, err - } - - ret := make([]string, len(entries)) - for i := range entries { - ret[i] = entries[i].Name() - } - - return ret, nil -} - -func (f fromIOFSFile) Sync() error { return nil } - -func (f fromIOFSFile) Truncate(size int64) error { - return notImplemented("truncate", f.name) -} - -func (f fromIOFSFile) WriteString(s string) (ret int, err error) { - return -1, notImplemented("writestring", f.name) -} - -func notImplemented(op, path string) error { - return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index a403133e..00000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextRandom() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir, -// opens the file for reading and writing, and returns the resulting *os.File. -// The filename is generated by taking pattern and adding a random -// string to the end. If pattern includes a "*", the random string -// replaces the last "*". -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, pattern string) (f File, err error) { - return TempFile(a.Fs, dir, pattern) -} - -func TempFile(fs Fs, dir, pattern string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - var prefix, suffix string - if pos := strings.LastIndex(pattern, "*"); pos != -1 { - prefix, suffix = pattern[:pos], pattern[pos+1:] - } else { - prefix = pattern - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0..00000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index 7db4b7de..00000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f..00000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5..00000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 5a20730c..00000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" -) - -const FilePathSeparator = string(filepath.Separator) - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time - uid int - gid int -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func SetUID(f *FileData, uid int) { - f.Lock() - f.uid = uid - f.Unlock() -} - -func SetGID(f *FileData, gid int) { - f.Lock() - f.gid = gid - f.Unlock() -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - if !f.fileData.dir { - return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} - } - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed == true { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - prev := atomic.LoadInt64(&f.at) - atomic.StoreInt64(&f.at, off) - n, err = f.Read(b) - atomic.StoreInt64(&f.at, prev) - return -} - -func (f *File) Truncate(size int64) error { - if f.closed == true { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - f.fileData.Lock() - defer f.fileData.Unlock() - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { - return 0, ErrFileClosed - } - switch whence { - case io.SeekStart: - atomic.StoreInt64(&f.at, offset) - case io.SeekCurrent: - atomic.AddInt64(&f.at, offset) - case io.SeekEnd: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.closed == true { - return 0, ErrFileClosed - } - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{00}, int(diff)), b...)...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index 5c265f92..00000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0755) - m.data[FilePathSeparator] = root - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file, 0) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, perm) - if err != nil { - //log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - //log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - perm &= chmodBits - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - item := mem.CreateDir(name) - mem.SetMode(item, os.ModeDir|perm) - m.getData()[name] = item - m.registerWithParent(item, perm) - m.mu.Unlock() - - return m.setFileMode(name, perm|os.ModeDir) -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - perm &= chmodBits - chmod := false - file, err := m.openWrite(name) - if err == nil && (flag&os.O_EXCL > 0) { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} - } - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - return file, m.setFileMode(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p := range m.getData() { - if strings.HasPrefix(p, path) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData, 0) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fileInfo, err := m.Stat(name) - return fileInfo, false, err -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - mode &= chmodBits - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits - - mode = prevOtherBits | mode - return m.setFileMode(name, mode) -} - -func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chown(name string, uid, gid int) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} - } - - mem.SetUID(f, uid) - mem.SetGID(f, gid) - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index f1366321..00000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chown(name string, uid, gid int) error { - return os.Chown(name, uid, gid) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} - -func (OsFs) SymlinkIfPossible(oldname, newname string) error { - return os.Symlink(oldname, newname) -} - -func (OsFs) ReadlinkIfPossible(name string) (string, error) { - return os.Readlink(name) -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f..00000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index bd8f9264..00000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,96 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { - return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} -} - -func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { - if srdr, ok := r.source.(LinkReader); ok { - return srdr.ReadlinkIfPossible(name) - } - - return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index ac359c62..00000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,224 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -// -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Chown(name string, uid, gid int) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chown(name, uid, gid) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - if err != nil { - return nil, err - } - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go deleted file mode 100644 index d1c6ea53..00000000 --- a/vendor/github.com/spf13/afero/symlink.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" -) - -// Symlinker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It indicates support for 3 symlink related interfaces that implement the -// behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink -type Symlinker interface { - Lstater - Linker - LinkReader -} - -// Linker is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, -// or the filesystem otherwise supports Symlink's. -type Linker interface { - SymlinkIfPossible(oldname, newname string) error -} - -// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system -// does not support Symlink's either directly or through its delegated filesystem. -// As expressed by support for the Linker interface. -var ErrNoSymlink = errors.New("symlink not supported") - -// LinkReader is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -type LinkReader interface { - ReadlinkIfPossible(name string) (string, error) -} - -// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system -// does not support the readlink operation either directly or through its delegated filesystem. -// As expressed by support for the LinkReader interface. -var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index 985363ee..00000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,317 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil - -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories. -// At the end of the directory view, the error is io.EOF if c > 0. -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - files := f.files[f.off:] - - if c <= 0 { - return files, nil - } - - if len(files) == 0 { - return nil, io.EOF - } - - if c > len(files) { - c = len(files) - } - - defer func() { f.off += c }() - return files[:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 4f253f48..00000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore deleted file mode 100644 index 53053a8a..00000000 --- a/vendor/github.com/spf13/cast/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.bench diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE deleted file mode 100644 index 4527efb9..00000000 --- a/vendor/github.com/spf13/cast/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile deleted file mode 100644 index f01a5dbb..00000000 --- a/vendor/github.com/spf13/cast/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2) - -.PHONY: check fmt lint test test-race vet test-cover-html help -.DEFAULT_GOAL := help - -check: test-race fmt vet lint ## Run tests and linters - -test: ## Run tests - go test ./... - -test-race: ## Run tests with race detector - go test -race ./... - -fmt: ## Run gofmt linter -ifeq "$(GOVERSION)" "12" - @for d in `go list` ; do \ - if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ - echo "^ improperly formatted go files" && echo && exit 1; \ - fi \ - done -endif - -lint: ## Run golint linter - @for d in `go list` ; do \ - if [ "`golint $$d | tee /dev/stderr`" ]; then \ - echo "^ golint errors!" && echo && exit 1; \ - fi \ - done - -vet: ## Run go vet linter - @if [ "`go vet | tee /dev/stderr`" ]; then \ - echo "^ go vet errors!" && echo && exit 1; \ - fi - -test-cover-html: ## Generate test coverage report - go test -coverprofile=coverage.out -covermode=count - go tool cover -func=coverage.out - -help: - @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md deleted file mode 100644 index 120a5734..00000000 --- a/vendor/github.com/spf13/cast/README.md +++ /dev/null @@ -1,75 +0,0 @@ -cast -==== -[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) -[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) - -Easy and safe casting from one type to another in Go - -Don’t Panic! ... Cast - -## What is Cast? - -Cast is a library to convert between different go types in a consistent and easy way. - -Cast provides simple functions to easily convert a number to a string, an -interface into a bool, etc. Cast does this intelligently when an obvious -conversion is possible. It doesn’t make any attempts to guess what you meant, -for example you can only convert a string to an int when it is a string -representation of an int such as “8â€. Cast was developed for use in -[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON -for meta data. - -## Why use Cast? - -When working with dynamic data in Go you often need to cast or convert the data -from one type into another. Cast goes beyond just using type assertion (though -it uses that when possible) to provide a very straightforward and convenient -library. - -If you are working with interfaces to handle things like dynamic content -you’ll need an easy way to convert an interface into a given type. This -is the library for you. - -If you are taking in data from YAML, TOML or JSON or other formats which lack -full types, then Cast is the library for you. - -## Usage - -Cast provides a handful of To_____ methods. These methods will always return -the desired type. **If input is provided that will not convert to that type, the -0 or nil value for that type will be returned**. - -Cast also provides identical methods To_____E. These return the same result as -the To_____ methods, plus an additional error which tells you if it successfully -converted. Using these methods you can tell the difference between when the -input matched the zero value or when the conversion failed and the zero value -was returned. - -The following examples are merely a sample of what is available. Please review -the code for a complete set. - -### Example ‘ToString’: - - cast.ToString("mayonegg") // "mayonegg" - cast.ToString(8) // "8" - cast.ToString(8.31) // "8.31" - cast.ToString([]byte("one time")) // "one time" - cast.ToString(nil) // "" - - var foo interface{} = "one more time" - cast.ToString(foo) // "one more time" - - -### Example ‘ToInt’: - - cast.ToInt(8) // 8 - cast.ToInt(8.31) // 8 - cast.ToInt("8") // 8 - cast.ToInt(true) // 1 - cast.ToInt(false) // 0 - - var eight interface{} = 8 - cast.ToInt(eight) // 8 - cast.ToInt(nil) // 0 - diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go deleted file mode 100644 index 0cfe9418..00000000 --- a/vendor/github.com/spf13/cast/cast.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Package cast provides easy and safe casting in Go. -package cast - -import "time" - -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} - -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} - -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) - return v -} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index c04af6a9..00000000 --- a/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1337 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - if i.(int) != 0 { - return true, nil - } - return false, nil - case string: - return strconv.ParseBool(i.(string)) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int: - return float32(s), nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int64(s), nil - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int32(s), nil - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int16(s), nil - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int8(s), nil - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return s, nil - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 0) - if err == nil { - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 32) - if err == nil { - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 16) - if err == nil { - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 8) - if err == nil { - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt(val) - } - return m, nil - case map[string]int: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToIntE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt64(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt64(val) - } - return m, nil - case map[string]int64: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToInt64E(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var ( - timeFormats = []timeFormat{ - timeFormat{time.RFC3339, timeFormatNumericTimezone}, - timeFormat{"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - timeFormat{time.RFC1123Z, timeFormatNumericTimezone}, - timeFormat{time.RFC1123, timeFormatNamedTimezone}, - timeFormat{time.RFC822Z, timeFormatNumericTimezone}, - timeFormat{time.RFC822, timeFormatNamedTimezone}, - timeFormat{time.RFC850, timeFormatNamedTimezone}, - timeFormat{"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - timeFormat{"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - timeFormat{"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - timeFormat{"2006-01-02 15:04:05", timeFormatNoTimezone}, - timeFormat{time.ANSIC, timeFormatNoTimezone}, - timeFormat{time.UnixDate, timeFormatNamedTimezone}, - timeFormat{time.RubyDate, timeFormatNumericTimezone}, - timeFormat{"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - timeFormat{"2006-01-02", timeFormatNoTimezone}, - timeFormat{"02 Jan 2006", timeFormatNoTimezone}, - timeFormat{"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - timeFormat{"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - timeFormat{time.Kitchen, timeFormatTimeOnly}, - timeFormat{time.Stamp, timeFormatTimeOnly}, - timeFormat{time.StampMilli, timeFormatTimeOnly}, - timeFormat{time.StampMicro, timeFormatTimeOnly}, - timeFormat{time.StampNano, timeFormatTimeOnly}, - } -) - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82..00000000 --- a/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore deleted file mode 100644 index c7b459e4..00000000 --- a/vendor/github.com/spf13/cobra/.gitignore +++ /dev/null @@ -1,39 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore -# swap -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -# session -Session.vim -# temporary -.netrwhist -*~ -# auto-generated tag files -tags - -*.exe -cobra.test -bin - -.idea/ -*.iml diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml deleted file mode 100644 index 0d6e6179..00000000 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ /dev/null @@ -1,48 +0,0 @@ -run: - deadline: 5m - -linters: - disable-all: true - enable: - #- bodyclose - - deadcode - #- depguard - #- dogsled - #- dupl - - errcheck - #- exhaustive - #- funlen - - gas - #- gochecknoinits - - goconst - #- gocritic - #- gocyclo - #- gofmt - - goimports - - golint - #- gomnd - #- goprintffuncname - #- gosec - #- gosimple - - govet - - ineffassign - - interfacer - #- lll - - maligned - - megacheck - #- misspell - #- nakedret - #- noctx - #- nolintlint - #- rowserrcheck - #- scopelint - #- staticcheck - - structcheck - #- stylecheck - #- typecheck - - unconvert - #- unparam - #- unused - - varcheck - #- whitespace - fast: false diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap deleted file mode 100644 index 94ec5306..00000000 --- a/vendor/github.com/spf13/cobra/.mailmap +++ /dev/null @@ -1,3 +0,0 @@ -Steve Francia -Bjørn Erik Pedersen -Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/CHANGELOG.md b/vendor/github.com/spf13/cobra/CHANGELOG.md deleted file mode 100644 index 8a23b4f8..00000000 --- a/vendor/github.com/spf13/cobra/CHANGELOG.md +++ /dev/null @@ -1,51 +0,0 @@ -# Cobra Changelog - -## v1.1.3 - -* **Fix:** release-branch.cobra1.1 only: Revert "Deprecate Go < 1.14" to maintain backward compatibility - -## v1.1.2 - -### Notable Changes - -* Bump license year to 2021 in golden files (#1309) @Bowbaq -* Enhance PowerShell completion with custom comp (#1208) @Luap99 -* Update gopkg.in/yaml.v2 to v2.4.0: The previous breaking change in yaml.v2 v2.3.0 has been reverted, see go-yaml/yaml#670 -* Documentation readability improvements (#1228 etc.) @zaataylor etc. -* Use golangci-lint: Repair warnings and errors resulting from linting (#1044) @umarcor - -## v1.1.1 - -* **Fix:** yaml.v2 2.3.0 contained a unintended breaking change. This release reverts to yaml.v2 v2.2.8 which has recent critical CVE fixes, but does not have the breaking changes. See https://github.com/spf13/cobra/pull/1259 for context. -* **Fix:** correct internal formatting for go-md2man v2 (which caused man page generation to be broken). See https://github.com/spf13/cobra/issues/1049 for context. - -## v1.1.0 - -### Notable Changes - -* Extend Go completions and revamp zsh comp (#1070) -* Fix man page doc generation - no auto generated tag when `cmd.DisableAutoGenTag = true` (#1104) @jpmcb -* Add completion for help command (#1136) -* Complete subcommands when TraverseChildren is set (#1171) -* Fix stderr printing functions (#894) -* fix: fish output redirection (#1247) - -## v1.0.0 - -Announcing v1.0.0 of Cobra. 🎉 - -### Notable Changes -* Fish completion (including support for Go custom completion) @marckhouzam -* API (urgent): Rename BashCompDirectives to ShellCompDirectives @marckhouzam -* Remove/replace SetOutput on Command - deprecated @jpmcb -* add support for autolabel stale PR @xchapter7x -* Add Labeler Actions @xchapter7x -* Custom completions coded in Go (instead of Bash) @marckhouzam -* Partial Revert of #922 @jharshman -* Add Makefile to project @jharshman -* Correct documentation for InOrStdin @desponda -* Apply formatting to templates @jharshman -* Revert change so help is printed on stdout again @marckhouzam -* Update md2man to v2.0.0 @pdf -* update viper to v1.4.0 @umarcor -* Update cmd/root.go example in README.md @jharshman diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md deleted file mode 100644 index 9d16f88f..00000000 --- a/vendor/github.com/spf13/cobra/CONDUCT.md +++ /dev/null @@ -1,37 +0,0 @@ -## Cobra User Contract - -### Versioning -Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. - -### Backward Compatibility -We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. - -### Deprecation -Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. - -### CVE -Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. - -### Communication -Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. - -### Breaking Changes -Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. - -There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. - -Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. - -Examples of breaking changes include: -- Removing or renaming exported constant, variable, type, or function. -- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... - - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. - -There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. - -### CI Testing -Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. - -### Disclaimer -Changes to this document and the contents therein are at the discretion of the maintainers. -None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md deleted file mode 100644 index 6f356e6a..00000000 --- a/vendor/github.com/spf13/cobra/CONTRIBUTING.md +++ /dev/null @@ -1,50 +0,0 @@ -# Contributing to Cobra - -Thank you so much for contributing to Cobra. We appreciate your time and help. -Here are some guidelines to help you get started. - -## Code of Conduct - -Be kind and respectful to the members of the community. Take time to educate -others who are seeking help. Harassment of any kind will not be tolerated. - -## Questions - -If you have questions regarding Cobra, feel free to ask it in the community -[#cobra Slack channel][cobra-slack] - -## Filing a bug or feature - -1. Before filing an issue, please check the existing issues to see if a - similar one was already opened. If there is one already opened, feel free - to comment on it. -1. If you believe you've found a bug, please provide detailed steps of - reproduction, the version of Cobra and anything else you believe will be - useful to help troubleshoot it (e.g. OS environment, environment variables, - etc...). Also state the current behavior vs. the expected behavior. -1. If you'd like to see a feature or an enhancement please open an issue with - a clear title and description of what the feature is and why it would be - beneficial to the project and its users. - -## Submitting changes - -1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to - sign a CLA. Please sign the CLA :slightly_smiling_face: -1. Tests: If you are submitting code, please ensure you have adequate tests - for the feature. Tests can be run via `go test ./...` or `make test`. -1. Since this is golang project, ensure the new code is properly formatted to - ensure code consistency. Run `make all`. - -### Quick steps to contribute - -1. Fork the project. -1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -1. Create your feature branch (`git checkout -b my-new-feature`) -1. Make changes and run tests (`make test`) -1. Add them to staging (`git add .`) -1. Commit your changes (`git commit -m 'Add some feature'`) -1. Push to the branch (`git push origin my-new-feature`) -1. Create new pull request - - -[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199 diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e26..00000000 --- a/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile deleted file mode 100644 index 472c73bf..00000000 --- a/vendor/github.com/spf13/cobra/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -BIN="./bin" -SRC=$(shell find . -name "*.go") - -ifeq (, $(shell which golangci-lint)) -$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") -endif - -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") -endif - -.PHONY: fmt lint test cobra_generator install_deps clean - -default: all - -all: fmt test cobra_generator - -fmt: - $(info ******************** checking formatting ********************) - @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) - -lint: - $(info ******************** running lint tools ********************) - golangci-lint run -v - -test: install_deps lint - $(info ******************** running tests ********************) - richgo test -v ./... - -cobra_generator: install_deps - $(info ******************** building generator ********************) - mkdir -p $(BIN) - make -C cobra all - -install_deps: - $(info ******************** downloading dependencies ********************) - go get -v ./... - -clean: - rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 074e3979..00000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,125 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Cobra is used in many Go projects such as [Kubernetes](http://kubernetes.io/), -[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to -name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. - -[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) -[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) - -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Usage](#usage) - * [Using the Cobra Generator](user_guide.md#using-the-cobra-generator) - * [Using the Cobra Library](user_guide.md#using-the-cobra-library) - * [Working with Flags](user_guide.md#working-with-flags) - * [Positional and Custom Arguments](user_guide.md#positional-and-custom-arguments) - * [Example](user_guide.md#example) - * [Help Command](user_guide.md#help-command) - * [Usage Message](user_guide.md#usage-message) - * [PreRun and PostRun Hooks](user_guide.md#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](user_guide.md#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](user_guide.md#generating-documentation-for-your-command) - * [Generating shell completions](user_guide.md#generating-shell-completions) -- [Contributing](CONTRIBUTING.md) -- [License](#license) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications read like sentences when used, and as a result, users -intuitively know how to interact with them. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) - -## Flags - -A flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/spf13/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: - - go get -u github.com/spf13/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Usage - -See [User Guide](user_guide.md). - -# License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go deleted file mode 100644 index 70e9b262..00000000 --- a/vendor/github.com/spf13/cobra/args.go +++ /dev/null @@ -1,109 +0,0 @@ -package cobra - -import ( - "fmt" - "strings" -) - -type PositionalArgs func(cmd *Command, args []string) error - -// Legacy arg validation has the following behaviour: -// - root commands with no subcommands can take arbitrary arguments -// - root commands with subcommands will do subcommand validity checking -// - subcommands will always accept arbitrary arguments -func legacyArgs(cmd *Command, args []string) error { - // no subcommand, always take args - if !cmd.HasSubCommands() { - return nil - } - - // root command with subcommands, do subcommand checking. - if !cmd.HasParent() && len(args) > 0 { - return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - return nil -} - -// NoArgs returns an error if any args are included. -func NoArgs(cmd *Command, args []string) error { - if len(args) > 0 { - return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) - } - return nil -} - -// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. -func OnlyValidArgs(cmd *Command, args []string) error { - if len(cmd.ValidArgs) > 0 { - // Remove any description that may be included in ValidArgs. - // A description is following a tab character. - var validArgs []string - for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) - } - - for _, v := range args { - if !stringInSlice(v, validArgs) { - return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - } - } - return nil -} - -// ArbitraryArgs never returns an error. -func ArbitraryArgs(cmd *Command, args []string) error { - return nil -} - -// MinimumNArgs returns an error if there is not at least N args. -func MinimumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < n { - return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) - } - return nil - } -} - -// MaximumNArgs returns an error if there are more than N args. -func MaximumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) > n { - return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactArgs returns an error if there are not exactly n args. -func ExactArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) != n { - return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactValidArgs returns an error if -// there are not exactly N positional args OR -// there are any positional args that are not in the `ValidArgs` field of `Command` -func ExactValidArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if err := ExactArgs(n)(cmd, args); err != nil { - return err - } - return OnlyValidArgs(cmd, args) - } -} - -// RangeArgs returns an error if the number of args is not within the expected range. -func RangeArgs(min int, max int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < min || len(args) > max { - return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) - } - return nil - } -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 733f4d12..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,685 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -// Annotations for Bash completion. -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" - BashCompCustom = "cobra_annotation_bash_completion_custom" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" - BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" -) - -func writePreamble(buf io.StringWriter, name string) { - WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(` -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__%[1]s_index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__%[1]s_contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__%[1]s_handle_go_custom_completion() -{ - __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" - - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - - local out requestComp lastParam lastChar comp directive args - - # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases - args=("${words[@]:1}") - requestComp="${words[0]} %[2]s ${args[*]}" - - lastParam=${words[$((${#words[@]}-1))]} - lastChar=${lastParam:$((${#lastParam}-1)):1} - __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" - - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" - requestComp="${requestComp} \"\"" - fi - - __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" - # Use eval to handle any environment variables and such - out=$(eval "${requestComp}" 2>/dev/null) - - # Extract the directive integer at the very end of the output following a colon (:) - directive=${out##*:} - # Remove the directive - out=${out%%:*} - if [ "${directive}" = "${out}" ]; then - # There is not directive specified - directive=0 - fi - __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" - __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out[*]}" - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - # Error code. No completion. - __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" - return - else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no space" - compopt -o nospace - fi - fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" - compopt +o default - fi - fi - fi - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local fullFilter filter filteringCmd - # Do not use quotes around the $out variable or else newline - # characters will be kept. - for filter in ${out[*]}; do - fullFilter+="$filter|" - done - - filteringCmd="_filedir $fullFilter" - __%[1]s_debug "File filtering command: $filteringCmd" - $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - local subDir - # Use printf to strip any trailing newline - subdir=$(printf "%%s" "${out[0]}") - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - __%[1]s_handle_subdirs_in_dir_flag "$subdir" - else - __%[1]s_debug "Listing directories in ." - _filedir -d - fi - else - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${out[*]}" -- "$cur") - fi -} - -__%[1]s_handle_reply() -{ - __%[1]s_debug "${FUNCNAME[0]}" - local comp - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${allflags[*]}" -- "$cur") - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%%=*}" - __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" - COMPREPLY=() - if [[ ${index} -ge 0 ]]; then - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zsh completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions+=("${must_have_one_noun[@]}") - elif [[ -n "${has_completion_function}" ]]; then - # if a go completion function is provided, defer to that function - __%[1]s_handle_go_custom_completion - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${completions[*]}" -- "$cur") - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${noun_aliases[*]}" -- "$cur") - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi - fi - - # available in bash-completion >= 2, not always present on macOS - if declare -F __ltrim_colon_completions >/dev/null; then - __ltrim_colon_completions "$cur" - fi - - # If there is only 1 completion and it is a flag with an = it will be completed - # but we don't want a space after the = - if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then - compopt -o nospace - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__%[1]s_handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__%[1]s_handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return -} - -__%[1]s_handle_flag() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" - if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - fi - - # skip the argument to a two word flag - if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__%[1]s_handle_noun() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__%[1]s_handle_command() -{ - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_%[1]s_root_command" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F "$next_command" >/dev/null && $next_command -} - -__%[1]s_handle_word() -{ - if [[ $c -ge $cword ]]; then - __%[1]s_handle_reply - return - fi - __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __%[1]s_handle_flag - elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then - __%[1]s_handle_command - elif [[ $c -eq 0 ]]; then - __%[1]s_handle_command - elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then - # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then - words[c]=${aliashash[${words[c]}]} - __%[1]s_handle_command - else - __%[1]s_handle_noun - fi - else - __%[1]s_handle_noun - fi - __%[1]s_handle_word -} - -`, name, ShellCompNoDescRequestCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) -} - -func writePostscript(buf io.StringWriter, name string) { - name = strings.Replace(name, ":", "__", -1) - WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(`{ - local cur prev words cword split - declare -A flaghash 2>/dev/null || : - declare -A aliashash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __%[1]s_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%[1]s") - local command_aliases=() - local must_have_one_flag=() - local must_have_one_noun=() - local has_completion_function - local last_command - local nouns=() - local noun_aliases=() - - __%[1]s_handle_word -} - -`, name)) - WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%s %s -else - complete -o default -o nospace -F __start_%s %s -fi - -`, name, name, name, name)) - WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") -} - -func writeCommands(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " commands=()\n") - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() && c != cmd.helpCommand { - continue - } - WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) - writeCmdAliases(buf, c) - } - WriteStringAndCheck(buf, "\n") -} - -func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) > 0 { - ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") - } else { - ext = "_filedir" - } - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - case BashCompCustom: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - if len(value) > 0 { - handlers := strings.Join(value, "; ") - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) - } else { - WriteStringAndCheck(buf, " flags_completion+=(:)\n") - } - case BashCompSubdirsInDir: - WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) - - var ext string - if len(value) == 1 { - ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] - } else { - ext = "_filedir -d" - } - WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) - } - } -} - -const cbn = "\")\n" - -func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { - name := flag.Shorthand - format := " " - if len(flag.NoOptDefVal) == 0 { - format += "two_word_" - } - format += "flags+=(\"-%s" + cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) -} - -func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { - name := flag.Name - format := " flags+=(\"--%s" - if len(flag.NoOptDefVal) == 0 { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s" + cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - } - writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) -} - -func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { - name := flag.Name - format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn - if len(flag.NoOptDefVal) == 0 { - format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn - } - WriteStringAndCheck(buf, fmt.Sprintf(format, name)) - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) - } -} - -// Setup annotations for go completions for registered flags -func prepareCustomAnnotationsForFlags(cmd *Command) { - flagCompletionMutex.RLock() - defer flagCompletionMutex.RUnlock() - for flag := range flagCompletionFunctions { - // Make sure the completion script calls the __*_go_custom_completion function for - // every registered flag. We need to do this here (and not when the flag was registered - // for completion) so that we can know the root command name for the prefix - // of ___go_custom_completion - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} - } -} - -func writeFlags(buf io.StringWriter, cmd *Command) { - prepareCustomAnnotationsForFlags(cmd) - WriteStringAndCheck(buf, ` flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - -`) - localNonPersistentFlags := cmd.LocalNonPersistentFlags() - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - // localNonPersistentFlags are used to stop the completion of subcommands when one is set - // if TraverseChildren is true we should allow to complete subcommands - if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren { - writeLocalNonPersistentFlag(buf, flag) - } - }) - cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - writeFlag(buf, flag, cmd) - if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag, cmd) - } - }) - - WriteStringAndCheck(buf, "\n") -} - -func writeRequiredFlag(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " must_have_one_flag=()\n") - flags := cmd.NonInheritedFlags() - flags.VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } - } - } - }) -} - -func writeRequiredNouns(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " must_have_one_noun=()\n") - sort.Strings(cmd.ValidArgs) - for _, value := range cmd.ValidArgs { - // Remove any description that may be included following a tab character. - // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) - } - if cmd.ValidArgsFunction != nil { - WriteStringAndCheck(buf, " has_completion_function=1\n") - } -} - -func writeCmdAliases(buf io.StringWriter, cmd *Command) { - if len(cmd.Aliases) == 0 { - return - } - - sort.Strings(cmd.Aliases) - - WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) - for _, value := range cmd.Aliases { - WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) - WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) - } - WriteStringAndCheck(buf, ` fi`) - WriteStringAndCheck(buf, "\n") -} -func writeArgAliases(buf io.StringWriter, cmd *Command) { - WriteStringAndCheck(buf, " noun_aliases=()\n") - sort.Strings(cmd.ArgAliases) - for _, value := range cmd.ArgAliases { - WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) - } -} - -func gen(buf io.StringWriter, cmd *Command) { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() && c != cmd.helpCommand { - continue - } - gen(buf, c) - } - commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - commandName = strings.Replace(commandName, ":", "__", -1) - - if cmd.Root() == cmd { - WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) - } else { - WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) - } - - WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) - WriteStringAndCheck(buf, "\n") - WriteStringAndCheck(buf, " command_aliases=()\n") - WriteStringAndCheck(buf, "\n") - - writeCommands(buf, cmd) - writeFlags(buf, cmd) - writeRequiredFlag(buf, cmd) - writeRequiredNouns(buf, cmd) - writeArgAliases(buf, cmd) - WriteStringAndCheck(buf, "}\n\n") -} - -// GenBashCompletion generates bash completion file and writes to the passed writer. -func (c *Command) GenBashCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - writePreamble(buf, c.Name()) - if len(c.BashCompletionFunction) > 0 { - buf.WriteString(c.BashCompletionFunction + "\n") - } - gen(buf, c) - writePostscript(buf, c.Name()) - - _, err := buf.WriteTo(w) - return err -} - -func nonCompletableFlag(flag *pflag.Flag) bool { - return flag.Hidden || len(flag.Deprecated) > 0 -} - -// GenBashCompletionFile generates bash completion file. -func (c *Command) GenBashCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletion(outFile) -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md deleted file mode 100644 index 52919b2f..00000000 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ /dev/null @@ -1,93 +0,0 @@ -# Generating Bash Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. - -**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. - -The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. - -Some code that works in kubernetes: - -```bash -const ( - bash_completion_func = `__kubectl_parse_get() -{ - local kubectl_output out - if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then - out=($(echo "${kubectl_output}" | awk '{print $1}')) - COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) - fi -} - -__kubectl_get_resource() -{ - if [[ ${#nouns[@]} -eq 0 ]]; then - return 1 - fi - __kubectl_parse_get ${nouns[${#nouns[@]} -1]} - if [[ $? -eq 0 ]]; then - return 0 - fi -} - -__kubectl_custom_func() { - case ${last_command} in - kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) - __kubectl_get_resource - return - ;; - *) - ;; - esac -} -`) -``` - -And then I set that in my command definition: - -```go -cmds := &cobra.Command{ - Use: "kubectl", - Short: "kubectl controls the Kubernetes cluster manager", - Long: `kubectl controls the Kubernetes cluster manager. - -Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, - Run: runHelp, - BashCompletionFunction: bash_completion_func, -} -``` - -The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! - -Similarly, for flags: - -```go - annotation := make(map[string][]string) - annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} - - flag := &pflag.Flag{ - Name: "namespace", - Usage: usage, - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` -value, e.g.: - -```bash -__kubectl_get_namespaces() -{ - local template - template="{{ range .items }}{{ .metadata.name }} {{ end }}" - local kubectl_out - if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then - COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) - fi -} -``` diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go deleted file mode 100644 index 8859b57c..00000000 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ /dev/null @@ -1,302 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genBashComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func genBashComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - - WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- - -__%[1]s_debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Macs have bash3 for which the bash-completion package doesn't include -# _init_completion. This is a minimal version of that function. -__%[1]s_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -# This function calls the %[1]s program to obtain the completion -# results and the directive. It fills the 'out' and 'directive' vars. -__%[1]s_get_completion_results() { - local requestComp lastParam lastChar args - - # Prepare the command to request completions for the program. - # Calling ${words[0]} instead of directly %[1]s allows to handle aliases - args=("${words[@]:1}") - requestComp="${words[0]} %[2]s ${args[*]}" - - lastParam=${words[$((${#words[@]}-1))]} - lastChar=${lastParam:$((${#lastParam}-1)):1} - __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" - - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "Adding extra empty parameter" - requestComp="${requestComp} ''" - fi - - # When completing a flag with an = (e.g., %[1]s -n=) - # bash focuses on the part after the =, so we need to remove - # the flag part from $cur - if [[ "${cur}" == -*=* ]]; then - cur="${cur#*=}" - fi - - __%[1]s_debug "Calling ${requestComp}" - # Use eval to handle any environment variables and such - out=$(eval "${requestComp}" 2>/dev/null) - - # Extract the directive integer at the very end of the output following a colon (:) - directive=${out##*:} - # Remove the directive - out=${out%%:*} - if [ "${directive}" = "${out}" ]; then - # There is not directive specified - directive=0 - fi - __%[1]s_debug "The completion directive is: ${directive}" - __%[1]s_debug "The completions are: ${out[*]}" -} - -__%[1]s_process_completion_results() { - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - # Error code. No completion. - __%[1]s_debug "Received error from custom completion go code" - return - else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "Activating no space" - compopt -o nospace - else - __%[1]s_debug "No space directive not supported in this version of bash" - fi - fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then - __%[1]s_debug "Activating no file completion" - compopt +o default - else - __%[1]s_debug "No file completion directive not supported in this version of bash" - fi - fi - fi - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local fullFilter filter filteringCmd - - # Do not use quotes around the $out variable or else newline - # characters will be kept. - for filter in ${out[*]}; do - fullFilter+="$filter|" - done - - filteringCmd="_filedir $fullFilter" - __%[1]s_debug "File filtering command: $filteringCmd" - $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - - # Use printf to strip any trailing newline - local subdir - subdir=$(printf "%%s" "${out[0]}") - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return - else - __%[1]s_debug "Listing directories in ." - _filedir -d - fi - else - __%[1]s_handle_standard_completion_case - fi - - __%[1]s_handle_special_char "$cur" : - __%[1]s_handle_special_char "$cur" = -} - -__%[1]s_handle_standard_completion_case() { - local tab comp - tab=$(printf '\t') - - local longest=0 - # Look for the longest completion so that we can format things nicely - while IFS='' read -r comp; do - # Strip any description before checking the length - comp=${comp%%%%$tab*} - # Only consider the completions that match - comp=$(compgen -W "$comp" -- "$cur") - if ((${#comp}>longest)); then - longest=${#comp} - fi - done < <(printf "%%s\n" "${out[@]}") - - local completions=() - while IFS='' read -r comp; do - if [ -z "$comp" ]; then - continue - fi - - __%[1]s_debug "Original comp: $comp" - comp="$(__%[1]s_format_comp_descriptions "$comp" "$longest")" - __%[1]s_debug "Final comp: $comp" - completions+=("$comp") - done < <(printf "%%s\n" "${out[@]}") - - while IFS='' read -r comp; do - COMPREPLY+=("$comp") - done < <(compgen -W "${completions[*]}" -- "$cur") - - # If there is a single completion left, remove the description text - if [ ${#COMPREPLY[*]} -eq 1 ]; then - __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%% *}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY=() - COMPREPLY+=("$comp") - fi -} - -__%[1]s_handle_special_char() -{ - local comp="$1" - local char=$2 - if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then - local word=${comp%%"${comp##*${char}}"} - local idx=${#COMPREPLY[*]} - while [[ $((--idx)) -ge 0 ]]; do - COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} - done - fi -} - -__%[1]s_format_comp_descriptions() -{ - local tab - tab=$(printf '\t') - local comp="$1" - local longest=$2 - - # Properly format the description string which follows a tab character if there is one - if [[ "$comp" == *$tab* ]]; then - desc=${comp#*$tab} - comp=${comp%%%%$tab*} - - # $COLUMNS stores the current shell width. - # Remove an extra 4 because we add 2 spaces and 2 parentheses. - maxdesclength=$(( COLUMNS - longest - 4 )) - - # Make sure we can fit a description of at least 8 characters - # if we are to align the descriptions. - if [[ $maxdesclength -gt 8 ]]; then - # Add the proper number of spaces to align the descriptions - for ((i = ${#comp} ; i < longest ; i++)); do - comp+=" " - done - else - # Don't pad the descriptions so we can fit more text after the completion - maxdesclength=$(( COLUMNS - ${#comp} - 4 )) - fi - - # If there is enough space for any description text, - # truncate the descriptions that are too long for the shell width - if [ $maxdesclength -gt 0 ]; then - if [ ${#desc} -gt $maxdesclength ]; then - desc=${desc:0:$(( maxdesclength - 1 ))} - desc+="…" - fi - comp+=" ($desc)" - fi - fi - - # Must use printf to escape all special characters - printf "%%q" "${comp}" -} - -__start_%[1]s() -{ - local cur prev words cword split - - COMPREPLY=() - - # Call _init_completion from the bash-completion package - # to prepare the arguments properly - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -n "=:" || return - else - __%[1]s_init_completion -n "=:" || return - fi - - __%[1]s_debug - __%[1]s_debug "========= starting completion logic ==========" - __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $cword location, so we need - # to truncate the command-line ($words) up to the $cword location. - words=("${words[@]:0:$cword+1}") - __%[1]s_debug "Truncated words[*]: ${words[*]}," - - local out directive - __%[1]s_get_completion_results - __%[1]s_process_completion_results -} - -if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%[1]s %[1]s -else - complete -o default -o nospace -F __start_%[1]s %[1]s -fi - -# ex: ts=4 sw=4 et filetype=sh -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) -} - -// GenBashCompletionFileV2 generates Bash completion version 2. -func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenBashCompletionV2(outFile, includeDesc) -} - -// GenBashCompletionV2 generates Bash completion file version 2 -// and writes it to the passed writer. -func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { - return c.genBashCompletion(w, includeDesc) -} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index d6cbfd71..00000000 --- a/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "os" - "reflect" - "strconv" - "strings" - "text/template" - "time" - "unicode" -) - -var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "trimTrailingWhitespaces": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, -} - -var initializers []func() - -// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing -// to automatically enable in CLI tools. -// Set this to true to enable it. -var EnablePrefixMatching = false - -// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -// To disable sorting, set it to false. -var EnableCommandSorting = true - -// MousetrapHelpText enables an information splash screen on Windows -// if the CLI is started from explorer.exe. -// To disable the mousetrap, just set this variable to blank string (""). -// Works only on Microsoft Windows. -var MousetrapHelpText = `This is a command line tool. - -You need to open cmd.exe and run it from there. -` - -// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows -// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. -// To disable the mousetrap, just set MousetrapHelpText to blank string (""). -// Works only on Microsoft Windows. -var MousetrapDisplayDuration = 5 * time.Second - -// AddTemplateFunc adds a template function that's available to Usage and Help -// template generation. -func AddTemplateFunc(name string, tmplFunc interface{}) { - templateFuncs[name] = tmplFunc -} - -// AddTemplateFuncs adds multiple template functions that are available to Usage and -// Help template generation. -func AddTemplateFuncs(tmplFuncs template.FuncMap) { - for k, v := range tmplFuncs { - templateFuncs[k] = v - } -} - -// OnInitialize sets the passed functions to be run when each command's -// Execute method is called. -func OnInitialize(y ...func()) { - initializers = append(initializers, y...) -} - -// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -// ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -func trimRightSpace(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. - -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. -func appendIfNotPresent(s, stringToAppend string) string { - if strings.Contains(s, stringToAppend) { - return s - } - return s + " " + stringToAppend -} - -// rpad adds padding to the right of a string. -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} - -// ld compares two strings and returns the levenshtein distance between them. -func ld(s, t string, ignoreCase bool) int { - if ignoreCase { - s = strings.ToLower(s) - t = strings.ToLower(t) - } - d := make([][]int, len(s)+1) - for i := range d { - d[i] = make([]int, len(t)+1) - } - for i := range d { - d[i][0] = i - } - for j := range d[0] { - d[0][j] = j - } - for j := 1; j <= len(t); j++ { - for i := 1; i <= len(s); i++ { - if s[i-1] == t[j-1] { - d[i][j] = d[i-1][j-1] - } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] - } - if d[i-1][j-1] < min { - min = d[i-1][j-1] - } - d[i][j] = min + 1 - } - } - - } - return d[len(s)][len(t)] -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. -func CheckErr(msg interface{}) { - if msg != nil { - fmt.Fprintln(os.Stderr, "Error:", msg) - os.Exit(1) - } -} - -// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. -func WriteStringAndCheck(b io.StringWriter, s string) { - _, err := b.WriteString(s) - CheckErr(err) -} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 2cc18891..00000000 --- a/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1680 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -// FParseErrWhitelist configures Flag parse errors to be ignored -type FParseErrWhitelist flag.ParseErrorsWhitelist - -// Command is just that, a command for your application. -// E.g. 'go run ...' - 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Use is the one-line usage message. - // Recommended syntax is as follow: - // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. - // ... indicates that you can specify multiple values for the previous argument. - // | indicates mutually exclusive information. You can use the argument to the left of the separator or the - // argument to the right of the separator. You cannot use both arguments in a single use of the command. - // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are - // optional, they are enclosed in brackets ([ ]). - // Example: add [-F file | -D dir]... [-f format] profile - Use string - - // Aliases is an array of aliases that can be used instead of the first word in Use. - Aliases []string - - // SuggestFor is an array of command names for which this command will be suggested - - // similar to aliases but only suggests. - SuggestFor []string - - // Short is the short description shown in the 'help' output. - Short string - - // Long is the long message shown in the 'help ' output. - Long string - - // Example is examples of how to use the command. - Example string - - // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string - // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. - // It is a dynamic version of using ValidArgs. - // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - - // Expected arguments - Args PositionalArgs - - // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the shell completion, - // but accepted if entered manually. - ArgAliases []string - - // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. - // For portability with other shells, it is recommended to instead use ValidArgsFunction - BashCompletionFunction string - - // Deprecated defines, if this command is deprecated and should print this string when used. - Deprecated string - - // Annotations are key/value pairs that can be used by applications to identify or - // group commands. - Annotations map[string]string - - // Version defines the version for this command. If this value is non-empty and the command does not - // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. A shorthand "v" flag will also be added if the - // command does not define one. - Version string - - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name. - // - // PersistentPreRun: children of this command will inherit and execute. - PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error. - PersistentPreRunE func(cmd *Command, args []string) error - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error. - PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this. - Run func(cmd *Command, args []string) - // RunE: Run but returns an error. - RunE func(cmd *Command, args []string) error - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error. - PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun. - PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error. - PersistentPostRunE func(cmd *Command, args []string) error - - // args is actual args parsed from flags. - args []string - // flagErrorBuf contains all error messages from pflag. - flagErrorBuf *bytes.Buffer - // flags is full set of flags. - flags *flag.FlagSet - // pflags contains persistent flags. - pflags *flag.FlagSet - // lflags contains local flags. - lflags *flag.FlagSet - // iflags contains inherited flags. - iflags *flag.FlagSet - // parentsPflags is all persistent flags of cmd's parents. - parentsPflags *flag.FlagSet - // globNormFunc is the global normalization function - // that we can use on every pflag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - - // usageFunc is usage func defined by user. - usageFunc func(*Command) error - // usageTemplate is usage template defined by user. - usageTemplate string - // flagErrorFunc is func defined by user and it's called when the parsing of - // flags returns an error. - flagErrorFunc func(*Command, error) error - // helpTemplate is help template defined by user. - helpTemplate string - // helpFunc is help func defined by user. - helpFunc func(*Command, []string) - // helpCommand is command with usage 'help'. If it's not defined by user, - // cobra uses default help command. - helpCommand *Command - // versionTemplate is the version template defined by user. - versionTemplate string - - // inReader is a reader defined by the user that replaces stdin - inReader io.Reader - // outWriter is a writer defined by the user that replaces stdout - outWriter io.Writer - // errWriter is a writer defined by the user that replaces stderr - errWriter io.Writer - - //FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - // CompletionOptions is a set of options to control the handling of shell completion - CompletionOptions CompletionOptions - - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - - ctx context.Context - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int -} - -// Context returns underlying command context. If command wasn't -// executed with ExecuteContext Context returns Background context. -func (c *Command) Context() context.Context { - return c.ctx -} - -// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -// Deprecated: Use SetOut and/or SetErr instead -func (c *Command) SetOutput(output io.Writer) { - c.outWriter = output - c.errWriter = output -} - -// SetOut sets the destination for usage messages. -// If newOut is nil, os.Stdout is used. -func (c *Command) SetOut(newOut io.Writer) { - c.outWriter = newOut -} - -// SetErr sets the destination for error messages. -// If newErr is nil, os.Stderr is used. -func (c *Command) SetErr(newErr io.Writer) { - c.errWriter = newErr -} - -// SetIn sets the source for input data -// If newIn is nil, os.Stdin is used. -func (c *Command) SetIn(newIn io.Reader) { - c.inReader = newIn -} - -// SetUsageFunc sets usage function. Usage can be defined by application. -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// SetUsageTemplate sets usage template. Can be defined by Application. -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// SetFlagErrorFunc sets a function to generate an error when flag parsing -// fails. -func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { - c.flagErrorFunc = f -} - -// SetHelpFunc sets help function. Can be defined by Application. -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -// SetHelpCommand sets help command. -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// SetHelpTemplate sets help template to be used. Application can use it to set custom template. -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetVersionTemplate sets version template to be used. Application can use it to set custom template. -func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -// OutOrStdout returns output to stdout. -func (c *Command) OutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -// OutOrStderr returns output to stderr -func (c *Command) OutOrStderr() io.Writer { - return c.getOut(os.Stderr) -} - -// ErrOrStderr returns output to stderr -func (c *Command) ErrOrStderr() io.Writer { - return c.getErr(os.Stderr) -} - -// InOrStdin returns input to stdin -func (c *Command) InOrStdin() io.Reader { - return c.getIn(os.Stdin) -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.outWriter != nil { - return c.outWriter - } - if c.HasParent() { - return c.parent.getOut(def) - } - return def -} - -func (c *Command) getErr(def io.Writer) io.Writer { - if c.errWriter != nil { - return c.errWriter - } - if c.HasParent() { - return c.parent.getErr(def) - } - return def -} - -func (c *Command) getIn(def io.Reader) io.Reader { - if c.inReader != nil { - return c.inReader - } - if c.HasParent() { - return c.parent.getIn(def) - } - return def -} - -// UsageFunc returns either the function set by SetUsageFunc for this command -// or a parent, or it returns a default usage function. -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - if c.HasParent() { - return c.Parent().UsageFunc() - } - return func(c *Command) error { - c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) - if err != nil { - c.PrintErrln(err) - } - return err - } -} - -// Usage puts out the usage for the command. -// Used when a user provides invalid input. -// Can be defined by user by overriding UsageFunc. -func (c *Command) Usage() error { - return c.UsageFunc()(c) -} - -// HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function with default help behavior. -func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc - } - if c.HasParent() { - return c.Parent().HelpFunc() - } - return func(c *Command, a []string) { - c.mergePersistentFlags() - // The help should be sent to stdout - // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) - if err != nil { - c.PrintErrln(err) - } - } -} - -// Help puts out the help for the command. -// Used when a user calls help [command]. -// Can be defined by user by overriding HelpFunc. -func (c *Command) Help() error { - c.HelpFunc()(c, []string{}) - return nil -} - -// UsageString returns usage string. -func (c *Command) UsageString() string { - // Storing normal writers - tmpOutput := c.outWriter - tmpErr := c.errWriter - - bb := new(bytes.Buffer) - c.outWriter = bb - c.errWriter = bb - - CheckErr(c.Usage()) - - // Setting things back to normal - c.outWriter = tmpOutput - c.errWriter = tmpErr - - return bb.String() -} - -// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this -// command or a parent, or it returns a function which returns the original -// error. -func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { - if c.flagErrorFunc != nil { - return c.flagErrorFunc - } - - if c.HasParent() { - return c.parent.FlagErrorFunc() - } - return func(c *Command, err error) error { - return err - } -} - -var minUsagePadding = 25 - -// UsagePadding return padding for the usage. -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } - return c.parent.commandsMaxUseLen -} - -var minCommandPathPadding = 11 - -// CommandPathPadding return padding for the command path. -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } - return c.parent.commandsMaxCommandPathLen -} - -var minNamePadding = 11 - -// NamePadding returns padding for the name. -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } - return c.parent.commandsMaxNameLen -} - -// UsageTemplate returns usage template for the command. -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` -} - -// HelpTemplate return help template for the command. -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` -} - -// VersionTemplate return version template for the command. -func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate - } - - if c.HasParent() { - return c.parent.VersionTemplate() - } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` -} - -func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { - flag := fs.Lookup(name) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { - if len(name) == 0 { - return false - } - - flag := fs.ShorthandLookup(name[:1]) - if flag == nil { - return false - } - return flag.NoOptDefVal != "" -} - -func stripFlags(args []string, c *Command) []string { - if len(args) == 0 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - flags := c.Flags() - -Loop: - for len(args) > 0 { - s := args[0] - args = args[1:] - switch { - case s == "--": - // "--" terminates the flags - break Loop - case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): - // If '--flag arg' then - // delete arg from args. - fallthrough // (do the same as below) - case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): - // If '-f arg' then - // delete 'arg' from args or break the loop if len(args) <= 1. - if len(args) <= 1 { - break Loop - } else { - args = args[1:] - continue - } - case s != "" && !strings.HasPrefix(s, "-"): - commands = append(commands, s) - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret - } - } - return args -} - -func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || - (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) -} - -// Find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) == 0 { - return c, innerArgs - } - nextSubCmd := argsWOflags[0] - - cmd := c.findNext(nextSubCmd) - if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) - } - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - if commandFound.Args == nil { - return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) - } - return commandFound, a, nil -} - -func (c *Command) findSuggestions(arg string) string { - if c.DisableSuggestions { - return "" - } - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - suggestionsString := "" - if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - return suggestionsString -} - -func (c *Command) findNext(next string) *Command { - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == next || cmd.HasAlias(next) { - cmd.commandCalledAs.name = next - return cmd - } - if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { - matches = append(matches, cmd) - } - } - - if len(matches) == 1 { - return matches[0] - } - - return nil -} - -// Traverse the command tree to find the command, and parse args for -// each parent. -func (c *Command) Traverse(args []string) (*Command, []string, error) { - flags := []string{} - inFlag := false - - for i, arg := range args { - switch { - // A long flag with a space separated value - case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) - flags = append(flags, arg) - continue - // A short flag with a space separated value - case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): - inFlag = true - flags = append(flags, arg) - continue - // The value for a flag - case inFlag: - inFlag = false - flags = append(flags, arg) - continue - // A flag without a value, or with an `=` separated value - case isFlagArg(arg): - flags = append(flags, arg) - continue - } - - cmd := c.findNext(arg) - if cmd == nil { - return c, args, nil - } - - if err := c.ParseFlags(flags); err != nil { - return nil, args, err - } - return cmd.Traverse(args[i+1:]) - } - return c, args, nil -} - -// SuggestionsFor provides suggestions for the typedName. -func (c *Command) SuggestionsFor(typedName string) []string { - suggestions := []string{} - for _, cmd := range c.commands { - if cmd.IsAvailableCommand() { - levenshteinDistance := ld(typedName, cmd.Name(), true) - suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance - suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) - if suggestByLevenshtein || suggestByPrefix { - suggestions = append(suggestions, cmd.Name()) - } - for _, explicitSuggestion := range cmd.SuggestFor { - if strings.EqualFold(typedName, explicitSuggestion) { - suggestions = append(suggestions, cmd.Name()) - } - } - } - } - return suggestions -} - -// VisitParents visits all parents of the command and invokes fn on each parent. -func (c *Command) VisitParents(fn func(*Command)) { - if c.HasParent() { - fn(c.Parent()) - c.Parent().VisitParents(fn) - } -} - -// Root finds root command. -func (c *Command) Root() *Command { - if c.HasParent() { - return c.Parent().Root() - } - return c -} - -// ArgsLenAtDash will return the length of c.Flags().Args at the moment -// when a -- was found during args parsing. -func (c *Command) ArgsLenAtDash() int { - return c.Flags().ArgsLenAtDash() -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - // initialize help and version flag at the last point possible to allow for user - // overriding - c.InitDefaultHelpFlag() - c.InitDefaultVersionFlag() - - err = c.ParseFlags(a) - if err != nil { - return c.FlagErrorFunc()(c, err) - } - - // If help is called, regardless of other flags, return we want help. - // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") - if err != nil { - // should be impossible to get here as we always declare a help - // flag in InitDefaultHelpFlag() - c.Println("\"help\" flag declared as non-bool. Please correct your code") - return err - } - - if helpVal { - return flag.ErrHelp - } - - // for back-compat, only add version flag behavior if version is defined - if c.Version != "" { - versionVal, err := c.Flags().GetBool("version") - if err != nil { - c.Println("\"version\" flag declared as non-bool. Please correct your code") - return err - } - if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) - if err != nil { - c.Println(err) - } - return err - } - } - - if !c.Runnable() { - return flag.ErrHelp - } - - c.preRun() - - argWoFlags := c.Flags().Args() - if c.DisableFlagParsing { - argWoFlags = a - } - - if err := c.ValidateArgs(argWoFlags); err != nil { - return err - } - - for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRunE != nil { - if err := p.PersistentPreRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - break - } - } - if c.PreRunE != nil { - if err := c.PreRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - if err := c.validateRequiredFlags(); err != nil { - return err - } - if c.RunE != nil { - if err := c.RunE(c, argWoFlags); err != nil { - return err - } - } else { - c.Run(c, argWoFlags) - } - if c.PostRunE != nil { - if err := c.PostRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - break - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -// ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs -// functions. -func (c *Command) ExecuteContext(ctx context.Context) error { - c.ctx = ctx - return c.Execute() -} - -// Execute uses the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() error { - _, err := c.ExecuteC() - return err -} - -// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs -// functions. -func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { - c.ctx = ctx - return c.ExecuteC() -} - -// ExecuteC executes the command. -func (c *Command) ExecuteC() (cmd *Command, err error) { - if c.ctx == nil { - c.ctx = context.Background() - } - - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().ExecuteC() - } - - // windows hook - if preExecHookFn != nil { - preExecHookFn(c) - } - - // initialize help at the last point to allow for user overriding - c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.initDefaultCompletionCmd() - - args := c.args - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } - - // initialize the hidden command to be used for shell completion - c.initCompleteCmd(args) - - var flags []string - if c.TraverseChildren { - cmd, flags, err = c.Traverse(args) - } else { - cmd, flags, err = c.Find(args) - } - if err != nil { - // If found parse to a subcommand and then failed, talk about the subcommand - if cmd != nil { - c = cmd - } - if !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) - c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) - } - return c, err - } - - cmd.commandCalledAs.called = true - if cmd.commandCalledAs.name == "" { - cmd.commandCalledAs.name = cmd.Name() - } - - // We have to pass global context to children command - // if context is present on the parent command. - if cmd.ctx == nil { - cmd.ctx = c.ctx - } - - err = cmd.execute(flags) - if err != nil { - // Always show help if requested, even if SilenceErrors is in - // effect - if err == flag.ErrHelp { - cmd.HelpFunc()(cmd, args) - return cmd, nil - } - - // If root command has SilenceErrors flagged, - // all subcommands should respect it - if !cmd.SilenceErrors && !c.SilenceErrors { - c.PrintErrln("Error:", err.Error()) - } - - // If root command has SilenceUsage flagged, - // all subcommands should respect it - if !cmd.SilenceUsage && !c.SilenceUsage { - c.Println(cmd.UsageString()) - } - } - return cmd, err -} - -func (c *Command) ValidateArgs(args []string) error { - if c.Args == nil { - return nil - } - return c.Args(c, args) -} - -func (c *Command) validateRequiredFlags() error { - if c.DisableFlagParsing { - return nil - } - - flags := c.Flags() - missingFlagNames := []string{} - flags.VisitAll(func(pflag *flag.Flag) { - requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] - if !found { - return - } - if (requiredAnnotation[0] == "true") && !pflag.Changed { - missingFlagNames = append(missingFlagNames, pflag.Name) - } - }) - - if len(missingFlagNames) > 0 { - return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) - } - return nil -} - -// InitDefaultHelpFlag adds default help flag to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help flag, it will do nothing. -func (c *Command) InitDefaultHelpFlag() { - c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { - usage := "help for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - c.Flags().BoolP("help", "h", false, usage) - } -} - -// InitDefaultVersionFlag adds default version flag to c. -// It is called automatically by executing the c. -// If c already has a version flag, it will do nothing. -// If c.Version is empty, it will do nothing. -func (c *Command) InitDefaultVersionFlag() { - if c.Version == "" { - return - } - - c.mergePersistentFlags() - if c.Flags().Lookup("version") == nil { - usage := "version for " - if c.Name() == "" { - usage += "this command" - } else { - usage += c.Name() - } - if c.Flags().ShorthandLookup("v") == nil { - c.Flags().BoolP("version", "v", false, usage) - } else { - c.Flags().Bool("version", false, usage) - } - } -} - -// InitDefaultHelpCmd adds default help command to c. -// It is called automatically by executing the c or by calling help and usage. -// If c already has help command or c has no subcommands, it will do nothing. -func (c *Command) InitDefaultHelpCmd() { - if !c.HasSubCommands() { - return - } - - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string - cmd, _, e := c.Root().Find(args) - if e != nil { - return nil, ShellCompDirectiveNoFileComp - } - if cmd == nil { - // Root help command. - cmd = c.Root() - } - for _, subCmd := range cmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - } - } - return completions, ShellCompDirectiveNoFileComp - }, - Run: func(c *Command, args []string) { - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q\n", args) - CheckErr(c.Root().Usage()) - } else { - cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - CheckErr(cmd.Help()) - } - }, - } - } - c.RemoveCommand(c.helpCommand) - c.AddCommand(c.helpCommand) -} - -// ResetCommands delete parent, subcommand and help command from c. -func (c *Command) ResetCommands() { - c.parent = nil - c.commands = nil - c.helpCommand = nil - c.parentsPflags = nil -} - -// Sorts commands by their names. -type commandSorterByName []*Command - -func (c commandSorterByName) Len() int { return len(c) } -func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } - -// Commands returns a sorted slice of child commands. -func (c *Command) Commands() []*Command { - // do not sort commands if it already sorted or sorting was disabled - if EnableCommandSorting && !c.commandsAreSorted { - sort.Sort(commandSorterByName(c.commands)) - c.commandsAreSorted = true - } - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If global normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - c.commandsAreSorted = false - } -} - -// RemoveCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.OutOrStderr(), i...) -} - -// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. -func (c *Command) Println(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) -} - -// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. -func (c *Command) Printf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) -} - -// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErr(i ...interface{}) { - fmt.Fprint(c.ErrOrStderr(), i...) -} - -// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrln(i ...interface{}) { - c.PrintErr(fmt.Sprintln(i...)) -} - -// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. -func (c *Command) PrintErrf(format string, i ...interface{}) { - c.PrintErr(fmt.Sprintf(format, i...)) -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - if c.HasParent() { - return c.Parent().CommandPath() + " " + c.Name() - } - return c.Name() -} - -// UseLine puts out the full usage for a given command (including parents). -func (c *Command) UseLine() string { - var useline string - if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use - } else { - useline = c.Use - } - if c.DisableFlagsInUseLine { - return useline - } - if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { - useline += " [flags]" - } - return useline -} - -// DebugFlags used to determine which flags have been assigned to which commands -// and which persist. -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// HasAlias determines if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if a == s { - return true - } - } - return false -} - -// CalledAs returns the command name or alias that was used to invoke -// this command or an empty string if the command has not been called. -func (c *Command) CalledAs() string { - if c.commandCalledAs.called { - return c.commandCalledAs.name - } - return "" -} - -// hasNameOrAliasPrefix returns true if the Name or any of aliases start -// with prefix -func (c *Command) hasNameOrAliasPrefix(prefix string) bool { - if strings.HasPrefix(c.Name(), prefix) { - c.commandCalledAs.name = c.Name() - return true - } - for _, alias := range c.Aliases { - if strings.HasPrefix(alias, prefix) { - c.commandCalledAs.name = alias - return true - } - } - return false -} - -// NameAndAliases returns a list of the command name and all aliases -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -// HasExample determines if the command has example. -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Runnable determines if the command is itself runnable. -func (c *Command) Runnable() bool { - return c.Run != nil || c.RunE != nil -} - -// HasSubCommands determines if the command has children commands. -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -// IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands). -func (c *Command) IsAvailableCommand() bool { - if len(c.Deprecated) != 0 || c.Hidden { - return false - } - - if c.HasParent() && c.Parent().helpCommand == c { - return false - } - - if c.Runnable() || c.HasAvailableSubCommands() { - return true - } - - return false -} - -// IsAdditionalHelpTopicCommand determines if a command is an additional -// help topic command; additional help topic command is determined by the -// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that -// are runnable/hidden/deprecated. -// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. -func (c *Command) IsAdditionalHelpTopicCommand() bool { - // if a command is runnable, deprecated, or hidden it is not a 'help' command - if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { - return false - } - - // if any non-help sub commands are found, the command is not a 'help' command - for _, sub := range c.commands { - if !sub.IsAdditionalHelpTopicCommand() { - return false - } - } - - // the command either has no sub commands, or no non-help sub commands - return true -} - -// HasHelpSubCommands determines if a command has any available 'help' sub commands -// that need to be shown in the usage/help default template under 'additional help -// topics'. -func (c *Command) HasHelpSubCommands() bool { - // return true on the first found available 'help' sub command - for _, sub := range c.commands { - if sub.IsAdditionalHelpTopicCommand() { - return true - } - } - - // the command either has no sub commands, or no available 'help' sub commands - return false -} - -// HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands'. -func (c *Command) HasAvailableSubCommands() bool { - // return true on the first found available (non deprecated/help/hidden) - // sub command - for _, sub := range c.commands { - if sub.IsAvailableCommand() { - return true - } - } - - // the command either has no sub commands, or no available (non deprecated/help/hidden) - // sub commands - return false -} - -// HasParent determines if the command is a child command. -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Flags returns the complete FlagSet that applies -// to this command (local and persistent declared here and by all parents). -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - } - - return c.flags -} - -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. -func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { - persistentFlags := c.PersistentFlags() - - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.LocalFlags().VisitAll(func(f *flag.Flag) { - if persistentFlags.Lookup(f.Name) == nil { - out.AddFlag(f) - } - }) - return out -} - -// LocalFlags returns the local FlagSet specifically set in the current command. -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - } - c.lflags.SortFlags = c.Flags().SortFlags - if c.globNormFunc != nil { - c.lflags.SetNormalizeFunc(c.globNormFunc) - } - - addToLocal := func(f *flag.Flag) { - if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { - c.lflags.AddFlag(f) - } - } - c.Flags().VisitAll(addToLocal) - c.PersistentFlags().VisitAll(addToLocal) - return c.lflags -} - -// InheritedFlags returns all flags which were inherited from parent commands. -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.iflags.SetOutput(c.flagErrorBuf) - } - - local := c.LocalFlags() - if c.globNormFunc != nil { - c.iflags.SetNormalizeFunc(c.globNormFunc) - } - - c.parentsPflags.VisitAll(func(f *flag.Flag) { - if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - c.iflags.AddFlag(f) - } - }) - return c.iflags -} - -// NonInheritedFlags returns all flags which were not inherited from parent commands. -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// PersistentFlags returns the persistent FlagSet specifically set in the current command. -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// ResetFlags deletes all flags from command. -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) - - c.lflags = nil - c.iflags = nil - c.parentsPflags = nil -} - -// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// HasPersistentFlags checks if the command contains persistent flags. -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// HasLocalFlags checks if the command has flags specifically declared locally. -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -// HasInheritedFlags checks if the command has flags inherited from its parent command. -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated. -func (c *Command) HasAvailableFlags() bool { - return c.Flags().HasAvailableFlags() -} - -// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. -func (c *Command) HasAvailablePersistentFlags() bool { - return c.PersistentFlags().HasAvailableFlags() -} - -// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden -// or deprecated. -func (c *Command) HasAvailableLocalFlags() bool { - return c.LocalFlags().HasAvailableFlags() -} - -// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are -// not hidden or deprecated. -func (c *Command) HasAvailableInheritedFlags() bool { - return c.InheritedFlags().HasAvailableFlags() -} - -// Flag climbs up the command tree looking for matching flag. -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// Recursively find matching persistent flag. -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil { - c.updateParentsPflags() - flag = c.parentsPflags.Lookup(name) - } - return -} - -// ParseFlags parses persistent flag tree and local flags. -func (c *Command) ParseFlags(args []string) error { - if c.DisableFlagParsing { - return nil - } - - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - beforeErrorBufLen := c.flagErrorBuf.Len() - c.mergePersistentFlags() - - // do it here after merging all flags and just before parse - c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) - - err := c.Flags().Parse(args) - // Print warnings if they occurred (e.g. deprecated flag messages). - if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { - c.Print(c.flagErrorBuf.String()) - } - - return err -} - -// Parent returns a commands parent command. -func (c *Command) Parent() *Command { - return c.parent -} - -// mergePersistentFlags merges c.PersistentFlags() to c.Flags() -// and adds missing persistent flags of all parents. -func (c *Command) mergePersistentFlags() { - c.updateParentsPflags() - c.Flags().AddFlagSet(c.PersistentFlags()) - c.Flags().AddFlagSet(c.parentsPflags) -} - -// updateParentsPflags updates c.parentsPflags by adding -// new persistent flags of all parents. -// If c.parentsPflags == nil, it makes new. -func (c *Command) updateParentsPflags() { - if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.parentsPflags.SetOutput(c.flagErrorBuf) - c.parentsPflags.SortFlags = false - } - - if c.globNormFunc != nil { - c.parentsPflags.SetNormalizeFunc(c.globNormFunc) - } - - c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) - - c.VisitParents(func(parent *Command) { - c.parentsPflags.AddFlagSet(parent.PersistentFlags()) - }) -} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go deleted file mode 100644 index 6159c1cc..00000000 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package cobra - -var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go deleted file mode 100644 index 8768b173..00000000 --- a/vendor/github.com/spf13/cobra/command_win.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build windows - -package cobra - -import ( - "fmt" - "os" - "time" - - "github.com/inconshreveable/mousetrap" -) - -var preExecHookFn = preExecHook - -func preExecHook(c *Command) { - if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - if MousetrapDisplayDuration > 0 { - time.Sleep(MousetrapDisplayDuration) - } else { - c.Println("Press return to continue...") - fmt.Scanln() - } - os.Exit(1) - } -} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go deleted file mode 100644 index b849b9c8..00000000 --- a/vendor/github.com/spf13/cobra/completions.go +++ /dev/null @@ -1,781 +0,0 @@ -package cobra - -import ( - "fmt" - "os" - "strings" - "sync" - - "github.com/spf13/pflag" -) - -const ( - // ShellCompRequestCmd is the name of the hidden command that is used to request - // completion results from the program. It is used by the shell completion scripts. - ShellCompRequestCmd = "__complete" - // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request - // completion results without their description. It is used by the shell completion scripts. - ShellCompNoDescRequestCmd = "__completeNoDesc" -) - -// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} - -// lock for reading and writing from flagCompletionFunctions -var flagCompletionMutex = &sync.RWMutex{} - -// ShellCompDirective is a bit map representing the different behaviors the shell -// can be instructed to have once completions have been provided. -type ShellCompDirective int - -type flagCompError struct { - subCommand string - flagName string -} - -func (e *flagCompError) Error() string { - return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" -} - -const ( - // ShellCompDirectiveError indicates an error occurred and completions should be ignored. - ShellCompDirectiveError ShellCompDirective = 1 << iota - - // ShellCompDirectiveNoSpace indicates that the shell should not add a space - // after the completion even if there is a single completion provided. - ShellCompDirectiveNoSpace - - // ShellCompDirectiveNoFileComp indicates that the shell should not provide - // file completion even when no completion is provided. - ShellCompDirectiveNoFileComp - - // ShellCompDirectiveFilterFileExt indicates that the provided completions - // should be used as file extension filters. - // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() - // is a shortcut to using this directive explicitly. The BashCompFilenameExt - // annotation can also be used to obtain the same behavior for flags. - ShellCompDirectiveFilterFileExt - - // ShellCompDirectiveFilterDirs indicates that only directory names should - // be provided in file completion. To request directory names within another - // directory, the returned completions should specify the directory within - // which to search. The BashCompSubdirsInDir annotation can be used to - // obtain the same behavior but only for flags. - ShellCompDirectiveFilterDirs - - // =========================================================================== - - // All directives using iota should be above this one. - // For internal use. - shellCompDirectiveMaxValue - - // ShellCompDirectiveDefault indicates to let the shell perform its default - // behavior after completions have been provided. - // This one must be last to avoid messing up the iota count. - ShellCompDirectiveDefault ShellCompDirective = 0 -) - -const ( - // Constants for the completion command - compCmdName = "completion" - compCmdNoDescFlagName = "no-descriptions" - compCmdNoDescFlagDesc = "disable completion descriptions" - compCmdNoDescFlagDefault = false -) - -// CompletionOptions are the options to control shell completion -type CompletionOptions struct { - // DisableDefaultCmd prevents Cobra from creating a default 'completion' command - DisableDefaultCmd bool - // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag - // for shells that support completion descriptions - DisableNoDescFlag bool - // DisableDescriptions turns off all completion descriptions for shells - // that support them - DisableDescriptions bool -} - -// NoFileCompletions can be used to disable file completion for commands that should -// not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return nil, ShellCompDirectiveNoFileComp -} - -// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { - flag := c.Flag(flagName) - if flag == nil { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) - } - flagCompletionMutex.Lock() - defer flagCompletionMutex.Unlock() - - if _, exists := flagCompletionFunctions[flag]; exists { - return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) - } - flagCompletionFunctions[flag] = f - return nil -} - -// Returns a string listing the different directive enabled in the specified parameter -func (d ShellCompDirective) string() string { - var directives []string - if d&ShellCompDirectiveError != 0 { - directives = append(directives, "ShellCompDirectiveError") - } - if d&ShellCompDirectiveNoSpace != 0 { - directives = append(directives, "ShellCompDirectiveNoSpace") - } - if d&ShellCompDirectiveNoFileComp != 0 { - directives = append(directives, "ShellCompDirectiveNoFileComp") - } - if d&ShellCompDirectiveFilterFileExt != 0 { - directives = append(directives, "ShellCompDirectiveFilterFileExt") - } - if d&ShellCompDirectiveFilterDirs != 0 { - directives = append(directives, "ShellCompDirectiveFilterDirs") - } - if len(directives) == 0 { - directives = append(directives, "ShellCompDirectiveDefault") - } - - if d >= shellCompDirectiveMaxValue { - return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) - } - return strings.Join(directives, ", ") -} - -// Adds a special hidden command that can be used to request custom completions. -func (c *Command) initCompleteCmd(args []string) { - completeCmd := &Command{ - Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), - Aliases: []string{ShellCompNoDescRequestCmd}, - DisableFlagsInUseLine: true, - Hidden: true, - DisableFlagParsing: true, - Args: MinimumNArgs(1), - Short: "Request shell completion choices for the specified command-line", - Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", - "to request completion choices for the specified command-line.", ShellCompRequestCmd), - Run: func(cmd *Command, args []string) { - finalCmd, completions, directive, err := cmd.getCompletions(args) - if err != nil { - CompErrorln(err.Error()) - // Keep going for multiple reasons: - // 1- There could be some valid completions even though there was an error - // 2- Even without completions, we need to print the directive - } - - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) - for _, comp := range completions { - if noDescriptions { - // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] - } - - // Make sure we only write the first line to the output. - // This is needed if a description contains a linebreak. - // Otherwise the shell scripts will interpret the other lines as new flags - // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] - - // Finally trim the completion. This is especially important to get rid - // of a trailing tab when there are no description following it. - // For example, a sub-command without a description should not be completed - // with a tab at the end (or else zsh will show a -- following it - // although there is no description). - comp = strings.TrimSpace(comp) - - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) - } - - // As the last printout, print the completion directive for the completion script to parse. - // The directive integer must be that last character following a single colon (:). - // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) - - // Print some helpful info to stderr for the user to understand. - // Output from stderr must be ignored by the completion script. - fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) - }, - } - c.AddCommand(completeCmd) - subCmd, _, err := c.Find(args) - if err != nil || subCmd.Name() != ShellCompRequestCmd { - // Only create this special command if it is actually being called. - // This reduces possible side-effects of creating such a command; - // for example, having this command would cause problems to a - // cobra program that only consists of the root command, since this - // command would cause the root command to suddenly have a subcommand. - c.RemoveCommand(completeCmd) - } -} - -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { - // The last argument, which is not completely typed by the user, - // should not be part of the list of arguments - toComplete := args[len(args)-1] - trimmedArgs := args[:len(args)-1] - - var finalCmd *Command - var finalArgs []string - var err error - // Find the real command for which completion must be performed - // check if we need to traverse here to parse local flags on parent commands - if c.Root().TraverseChildren { - finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) - } else { - finalCmd, finalArgs, err = c.Root().Find(trimmedArgs) - } - if err != nil { - // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) - } - finalCmd.ctx = c.ctx - - // Check if we are doing flag value completion before parsing the flags. - // This is important because if we are completing a flag value, we need to also - // remove the flag name argument from the list of finalArgs or else the parsing - // could fail due to an invalid value (incomplete) for the flag. - flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) - - // Check if interspersed is false or -- was set on a previous arg. - // This works by counting the arguments. Normally -- is not counted as arg but - // if -- was already set or interspersed is false and there is already one arg then - // the extra added -- is counted as arg. - flagCompletion := true - _ = finalCmd.ParseFlags(append(finalArgs, "--")) - newArgCount := finalCmd.Flags().NArg() - - // Parse the flags early so we can check if required flags are set - if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) - } - - realArgCount := finalCmd.Flags().NArg() - if newArgCount > realArgCount { - // don't do flag completion (see above) - flagCompletion = false - } - // Error while attempting to parse flags - if flagErr != nil { - // If error type is flagCompError and we don't want flagCompletion we should ignore the error - if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr - } - } - - if flag != nil && flagCompletion { - // Check if we are completing a flag value subject to annotations - if validExts, present := flag.Annotations[BashCompFilenameExt]; present { - if len(validExts) != 0 { - // File completion filtered by extensions - return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil - } - - // The annotation requests simple file completion. There is no reason to do - // that since it is the default behavior anyway. Let's ignore this annotation - // in case the program also registered a completion function for this flag. - // Even though it is a mistake on the program's side, let's be nice when we can. - } - - if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { - if len(subDir) == 1 { - // Directory completion from within a directory - return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil - } - // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil - } - } - - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires - // the flag name to be complete - if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { - var completions []string - - // First check for required flags - completions = completeRequireFlags(finalCmd, toComplete) - - // If we have not found any required flags, only then can we show regular flags - if len(completions) == 0 { - doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || - strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) - // we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteFlags(flag) - }) - } - - directive := ShellCompDirectiveNoFileComp - if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { - // If there is a single completion, the shell usually adds a space - // after the completion. We don't want that if the flag ends with an = - directive = ShellCompDirectiveNoSpace - } - return finalCmd, completions, directive, nil - } - - // We only remove the flags from the arguments if DisableFlagParsing is not set. - // This is important for commands which have requested to do their own flag completion. - if !finalCmd.DisableFlagParsing { - finalArgs = finalCmd.Flags().Args() - } - - var completions []string - directive := ShellCompDirectiveDefault - if flag == nil { - foundLocalNonPersistentFlag := false - // If TraverseChildren is true on the root command we don't check for - // local flags because we can use a local flag on a parent command - if !finalCmd.Root().TraverseChildren { - // Check if there are any local, non-persistent flags on the command-line - localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { - foundLocalNonPersistentFlag = true - } - }) - } - - // Complete subcommand names, including the help command - if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { - // We only complete sub-commands if: - // - there are no arguments on the command-line and - // - there are no local, non-persistent flags on the command-line or TraverseChildren is true - for _, subCmd := range finalCmd.Commands() { - if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { - if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) - } - directive = ShellCompDirectiveNoFileComp - } - } - } - - // Complete required flags even without the '-' prefix - completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) - - // Always complete ValidArgs, even if we are completing a subcommand name. - // This is for commands that have both subcommands and ValidArgs. - if len(finalCmd.ValidArgs) > 0 { - if len(finalArgs) == 0 { - // ValidArgs are only for the first argument - for _, validArg := range finalCmd.ValidArgs { - if strings.HasPrefix(validArg, toComplete) { - completions = append(completions, validArg) - } - } - directive = ShellCompDirectiveNoFileComp - - // If no completions were found within commands or ValidArgs, - // see if there are any ArgAliases that should be completed. - if len(completions) == 0 { - for _, argAlias := range finalCmd.ArgAliases { - if strings.HasPrefix(argAlias, toComplete) { - completions = append(completions, argAlias) - } - } - } - } - - // If there are ValidArgs specified (even if they don't match), we stop completion. - // Only one of ValidArgs or ValidArgsFunction can be used for a single command. - return finalCmd, completions, directive, nil - } - - // Let the logic continue so as to add any ValidArgsFunction completions, - // even if we already found sub-commands. - // This is for commands that have subcommands but also specify a ValidArgsFunction. - } - - // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) - if flag != nil && flagCompletion { - flagCompletionMutex.RLock() - completionFn = flagCompletionFunctions[flag] - flagCompletionMutex.RUnlock() - } else { - completionFn = finalCmd.ValidArgsFunction - } - if completionFn != nil { - // Go custom completion defined for this flag or command. - // Call the registered completion function to get the completions. - var comps []string - comps, directive = completionFn(finalCmd, finalArgs, toComplete) - completions = append(completions, comps...) - } - - return finalCmd, completions, directive, nil -} - -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { - if nonCompletableFlag(flag) { - return []string{} - } - - var completions []string - flagName := "--" + flag.Name - if strings.HasPrefix(flagName, toComplete) { - // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - - // Why suggest both long forms: --flag and --flag= ? - // This forces the user to *always* have to type either an = or a space after the flag name. - // Let's be nice and avoid making users have to do that. - // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. - // The = form will still work, we just won't suggest it. - // This also makes the list of suggested flags shorter as we avoid all the = forms. - // - // if len(flag.NoOptDefVal) == 0 { - // // Flag requires a value, so it can be suffixed with = - // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - // } - } - - flagName = "-" + flag.Shorthand - if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) - } - - return completions -} - -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string - - doCompleteRequiredFlags := func(flag *pflag.Flag) { - if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { - if !flag.Changed { - // If the flag is not already present, we suggest it as a completion - completions = append(completions, getFlagNameCompletions(flag, toComplete)...) - } - } - } - - // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands - // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and - // non-inherited flags. - finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - doCompleteRequiredFlags(flag) - }) - - return completions -} - -func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { - if finalCmd.DisableFlagParsing { - // We only do flag completion if we are allowed to parse flags - // This is important for commands which have requested to do their own flag completion. - return nil, args, lastArg, nil - } - - var flagName string - trimmedArgs := args - flagWithEqual := false - orgLastArg := lastArg - - // When doing completion of a flag name, as soon as an argument starts with - // a '-' we know it is a flag. We cannot use isFlagArg() here as that function - // requires the flag name to be complete - if len(lastArg) > 0 && lastArg[0] == '-' { - if index := strings.Index(lastArg, "="); index >= 0 { - // Flag with an = - if strings.HasPrefix(lastArg[:index], "--") { - // Flag has full name - flagName = lastArg[2:index] - } else { - // Flag is shorthand - // We have to get the last shorthand flag name - // e.g. `-asd` => d to provide the correct completion - // https://github.com/spf13/cobra/issues/1257 - flagName = lastArg[index-1 : index] - } - lastArg = lastArg[index+1:] - flagWithEqual = true - } else { - // Normal flag completion - return nil, args, lastArg, nil - } - } - - if len(flagName) == 0 { - if len(args) > 0 { - prevArg := args[len(args)-1] - if isFlagArg(prevArg) { - // Only consider the case where the flag does not contain an =. - // If the flag contains an = it means it has already been fully processed, - // so we don't need to deal with it here. - if index := strings.Index(prevArg, "="); index < 0 { - if strings.HasPrefix(prevArg, "--") { - // Flag has full name - flagName = prevArg[2:] - } else { - // Flag is shorthand - // We have to get the last shorthand flag name - // e.g. `-asd` => d to provide the correct completion - // https://github.com/spf13/cobra/issues/1257 - flagName = prevArg[len(prevArg)-1:] - } - // Remove the uncompleted flag or else there could be an error created - // for an invalid value for that flag - trimmedArgs = args[:len(args)-1] - } - } - } - } - - if len(flagName) == 0 { - // Not doing flag completion - return nil, trimmedArgs, lastArg, nil - } - - flag := findFlag(finalCmd, flagName) - if flag == nil { - // Flag not supported by this command, the interspersed option might be set so return the original args - return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} - } - - if !flagWithEqual { - if len(flag.NoOptDefVal) != 0 { - // We had assumed dealing with a two-word flag but the flag is a boolean flag. - // In that case, there is no value following it, so we are not really doing flag completion. - // Reset everything to do noun completion. - trimmedArgs = args - flag = nil - } - } - - return flag, trimmedArgs, lastArg, nil -} - -// initDefaultCompletionCmd adds a default 'completion' command to c. -// This function will do nothing if any of the following is true: -// 1- the feature has been explicitly disabled by the program, -// 2- c has no subcommands (to avoid creating one), -// 3- c already has a 'completion' command provided by the program. -func (c *Command) initDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { - return - } - - for _, cmd := range c.commands { - if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { - // A completion command is already available - return - } - } - - haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions - - completionCmd := &Command{ - Use: compCmdName, - Short: "generate the autocompletion script for the specified shell", - Long: fmt.Sprintf(` -Generate the autocompletion script for %[1]s for the specified shell. -See each sub-command's help for details on how to use the generated script. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - } - c.AddCommand(completionCmd) - - out := c.OutOrStdout() - noDesc := c.CompletionOptions.DisableDescriptions - shortDesc := "generate the autocompletion script for %s" - bash := &Command{ - Use: "bash", - Short: fmt.Sprintf(shortDesc, "bash"), - Long: fmt.Sprintf(` -Generate the autocompletion script for the bash shell. - -This script depends on the 'bash-completion' package. -If it is not installed already, you can install it via your OS's package manager. - -To load completions in your current shell session: -$ source <(%[1]s completion bash) - -To load completions for every new session, execute once: -Linux: - $ %[1]s completion bash > /etc/bash_completion.d/%[1]s -MacOS: - $ %[1]s completion bash > /usr/local/etc/bash_completion.d/%[1]s - -You will need to start a new shell for this setup to take effect. - `, c.Root().Name()), - Args: NoArgs, - DisableFlagsInUseLine: true, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - return cmd.Root().GenBashCompletionV2(out, !noDesc) - }, - } - if haveNoDescFlag { - bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - zsh := &Command{ - Use: "zsh", - Short: fmt.Sprintf(shortDesc, "zsh"), - Long: fmt.Sprintf(` -Generate the autocompletion script for the zsh shell. - -If shell completion is not already enabled in your environment you will need -to enable it. You can execute the following once: - -$ echo "autoload -U compinit; compinit" >> ~/.zshrc - -To load completions for every new session, execute once: -# Linux: -$ %[1]s completion zsh > "${fpath[1]}/_%[1]s" -# macOS: -$ %[1]s completion zsh > /usr/local/share/zsh/site-functions/_%[1]s - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - if noDesc { - return cmd.Root().GenZshCompletionNoDesc(out) - } - return cmd.Root().GenZshCompletion(out) - }, - } - if haveNoDescFlag { - zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - fish := &Command{ - Use: "fish", - Short: fmt.Sprintf(shortDesc, "fish"), - Long: fmt.Sprintf(` -Generate the autocompletion script for the fish shell. - -To load completions in your current shell session: -$ %[1]s completion fish | source - -To load completions for every new session, execute once: -$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish - -You will need to start a new shell for this setup to take effect. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - return cmd.Root().GenFishCompletion(out, !noDesc) - }, - } - if haveNoDescFlag { - fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - powershell := &Command{ - Use: "powershell", - Short: fmt.Sprintf(shortDesc, "powershell"), - Long: fmt.Sprintf(` -Generate the autocompletion script for powershell. - -To load completions in your current shell session: -PS C:\> %[1]s completion powershell | Out-String | Invoke-Expression - -To load completions for every new session, add the output of the above command -to your powershell profile. -`, c.Root().Name()), - Args: NoArgs, - ValidArgsFunction: NoFileCompletions, - RunE: func(cmd *Command, args []string) error { - if noDesc { - return cmd.Root().GenPowerShellCompletion(out) - } - return cmd.Root().GenPowerShellCompletionWithDesc(out) - - }, - } - if haveNoDescFlag { - powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) - } - - completionCmd.AddCommand(bash, zsh, fish, powershell) -} - -func findFlag(cmd *Command, name string) *pflag.Flag { - flagSet := cmd.Flags() - if len(name) == 1 { - // First convert the short flag into a long flag - // as the cmd.Flag() search only accepts long flags - if short := flagSet.ShorthandLookup(name); short != nil { - name = short.Name - } else { - set := cmd.InheritedFlags() - if short = set.ShorthandLookup(name); short != nil { - name = short.Name - } else { - return nil - } - } - } - return cmd.Flag(name) -} - -// CompDebug prints the specified string to the same file as where the -// completion script prints its logs. -// Note that completion printouts should never be on stdout as they would -// be wrongly interpreted as actual completion choices by the completion script. -func CompDebug(msg string, printToStdErr bool) { - msg = fmt.Sprintf("[Debug] %s", msg) - - // Such logs are only printed when the user has set the environment - // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. - if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { - f, err := os.OpenFile(path, - os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err == nil { - defer f.Close() - WriteStringAndCheck(f, msg) - } - } - - if printToStdErr { - // Must print to stderr for this not to be read by the completion script. - fmt.Fprint(os.Stderr, msg) - } -} - -// CompDebugln prints the specified string with a newline at the end -// to the same file as where the completion script prints its logs. -// Such logs are only printed when the user has set the environment -// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. -func CompDebugln(msg string, printToStdErr bool) { - CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) -} - -// CompError prints the specified completion message to stderr. -func CompError(msg string) { - msg = fmt.Sprintf("[Error] %s", msg) - CompDebug(msg, true) -} - -// CompErrorln prints the specified completion message to stderr with a newline at the end. -func CompErrorln(msg string) { - CompError(fmt.Sprintf("%s\n", msg)) -} diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go deleted file mode 100644 index bb57fd56..00000000 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ /dev/null @@ -1,219 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "strings" -) - -func genFishComp(buf io.StringWriter, name string, includeDesc bool) { - // Variables should not contain a '-' or ':' character - nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) - - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) - WriteStringAndCheck(buf, fmt.Sprintf(` -function __%[1]s_debug - set -l file "$BASH_COMP_DEBUG_FILE" - if test -n "$file" - echo "$argv" >> $file - end -end - -function __%[1]s_perform_completion - __%[1]s_debug "Starting __%[1]s_perform_completion" - - # Extract all args except the last one - set -l args (commandline -opc) - # Extract the last arg and escape it in case it is a space - set -l lastArg (string escape -- (commandline -ct)) - - __%[1]s_debug "args: $args" - __%[1]s_debug "last arg: $lastArg" - - set -l requestComp "$args[1] %[3]s $args[2..-1] $lastArg" - - __%[1]s_debug "Calling $requestComp" - set -l results (eval $requestComp 2> /dev/null) - - # Some programs may output extra empty lines after the directive. - # Let's ignore them or else it will break completion. - # Ref: https://github.com/spf13/cobra/issues/1279 - for line in $results[-1..1] - if test (string trim -- $line) = "" - # Found an empty line, remove it - set results $results[1..-2] - else - # Found non-empty line, we have our proper output - break - end - end - - set -l comps $results[1..-2] - set -l directiveLine $results[-1] - - # For Fish, when completing a flag with an = (e.g., -n=) - # completions must be prefixed with the flag - set -l flagPrefix (string match -r -- '-.*=' "$lastArg") - - __%[1]s_debug "Comps: $comps" - __%[1]s_debug "DirectiveLine: $directiveLine" - __%[1]s_debug "flagPrefix: $flagPrefix" - - for comp in $comps - printf "%%s%%s\n" "$flagPrefix" "$comp" - end - - printf "%%s\n" "$directiveLine" -end - -# This function does two things: -# - Obtain the completions and store them in the global __%[1]s_comp_results -# - Return false if file completion should be performed -function __%[1]s_prepare_completions - __%[1]s_debug "" - __%[1]s_debug "========= starting completion logic ==========" - - # Start fresh - set --erase __%[1]s_comp_results - - set -l results (__%[1]s_perform_completion) - __%[1]s_debug "Completion results: $results" - - if test -z "$results" - __%[1]s_debug "No completion, probably due to a failure" - # Might as well do file completion, in case it helps - return 1 - end - - set -l directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] - - __%[1]s_debug "Completions are: $__%[1]s_comp_results" - __%[1]s_debug "Directive is: $directive" - - set -l shellCompDirectiveError %[4]d - set -l shellCompDirectiveNoSpace %[5]d - set -l shellCompDirectiveNoFileComp %[6]d - set -l shellCompDirectiveFilterFileExt %[7]d - set -l shellCompDirectiveFilterDirs %[8]d - - if test -z "$directive" - set directive 0 - end - - set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) - if test $compErr -eq 1 - __%[1]s_debug "Received error directive: aborting." - # Might as well do file completion, in case it helps - return 1 - end - - set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) - set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) - if test $filefilter -eq 1; or test $dirfilter -eq 1 - __%[1]s_debug "File extension filtering or directory filtering not supported" - # Do full file completion instead - return 1 - end - - set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) - set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) - - __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" - - # If we want to prevent a space, or if file completion is NOT disabled, - # we need to count the number of valid completions. - # To do so, we will filter on prefix as the completions we have received - # may not already be filtered so as to allow fish to match on different - # criteria than the prefix. - if test $nospace -ne 0; or test $nofiles -eq 0 - set -l prefix (commandline -t | string escape --style=regex) - __%[1]s_debug "prefix: $prefix" - - set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) - set --global __%[1]s_comp_results $completions - __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" - - # Important not to quote the variable for count to work - set -l numComps (count $__%[1]s_comp_results) - __%[1]s_debug "numComps: $numComps" - - if test $numComps -eq 1; and test $nospace -ne 0 - # We must first split on \t to get rid of the descriptions to be - # able to check what the actual completion will be. - # We don't need descriptions anyway since there is only a single - # real completion which the shell will expand immediately. - set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) - - # Fish won't add a space if the completion ends with any - # of the following characters: @=/:., - set -l lastChar (string sub -s -1 -- $split) - if not string match -r -q "[@=/:.,]" -- "$lastChar" - # In other cases, to support the "nospace" directive we trick the shell - # by outputting an extra, longer completion. - __%[1]s_debug "Adding second completion to perform nospace directive" - set --global __%[1]s_comp_results $split[1] $split[1]. - __%[1]s_debug "Completions are now: $__%[1]s_comp_results" - end - end - - if test $numComps -eq 0; and test $nofiles -eq 0 - # To be consistent with bash and zsh, we only trigger file - # completion when there are no other completions - __%[1]s_debug "Requesting file completion" - return 1 - end - end - - return 0 -end - -# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves -# so we can properly delete any completions provided by another script. -# Only do this if the program can be found, or else fish may print some errors; besides, -# the existing completions will only be loaded if the program can be found. -if type -q "%[2]s" - # The space after the program name is essential to trigger completion for the program - # and not completion of the program name itself. - # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. - complete --do-complete "%[2]s " > /dev/null 2>&1 -end - -# Remove any pre-existing completions for the program since we will be handling all of them. -complete -c %[2]s -e - -# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results -# which provides the program's completion choices. -complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - -`, nameForVar, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) -} - -// GenFishCompletion generates fish completion file and writes to the passed writer. -func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genFishComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -// GenFishCompletionFile generates fish completion file. -func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenFishCompletion(outFile, includeDesc) -} diff --git a/vendor/github.com/spf13/cobra/fish_completions.md b/vendor/github.com/spf13/cobra/fish_completions.md deleted file mode 100644 index 19b2ed12..00000000 --- a/vendor/github.com/spf13/cobra/fish_completions.md +++ /dev/null @@ -1,4 +0,0 @@ -## Generating Fish Completions For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go deleted file mode 100644 index 59234c09..00000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ /dev/null @@ -1,285 +0,0 @@ -// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but -// can be downloaded separately for windows 7 or 8.1). - -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- - -function __%[1]s_debug { - if ($env:BASH_COMP_DEBUG_FILE) { - "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" - } -} - -filter __%[1]s_escapeStringWithSpecialChars { -`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` -} - -Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { - param( - $WordToComplete, - $CommandAst, - $CursorPosition - ) - - # Get the current command line and convert into a string - $Command = $CommandAst.CommandElements - $Command = "$Command" - - __%[1]s_debug "" - __%[1]s_debug "========= starting completion logic ==========" - __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $CursorPosition location, so we need - # to truncate the command-line ($Command) up to the $CursorPosition location. - # Make sure the $Command is longer then the $CursorPosition before we truncate. - # This happens because the $Command does not include the last space. - if ($Command.Length -gt $CursorPosition) { - $Command=$Command.Substring(0,$CursorPosition) - } - __%[1]s_debug "Truncated command: $Command" - - $ShellCompDirectiveError=%[3]d - $ShellCompDirectiveNoSpace=%[4]d - $ShellCompDirectiveNoFileComp=%[5]d - $ShellCompDirectiveFilterFileExt=%[6]d - $ShellCompDirectiveFilterDirs=%[7]d - - # Prepare the command to request completions for the program. - # Split the command at the first space to separate the program and arguments. - $Program,$Arguments = $Command.Split(" ",2) - $RequestComp="$Program %[2]s $Arguments" - __%[1]s_debug "RequestComp: $RequestComp" - - # we cannot use $WordToComplete because it - # has the wrong values if the cursor was moved - # so use the last argument - if ($WordToComplete -ne "" ) { - $WordToComplete = $Arguments.Split(" ")[-1] - } - __%[1]s_debug "New WordToComplete: $WordToComplete" - - - # Check for flag with equal sign - $IsEqualFlag = ($WordToComplete -Like "--*=*" ) - if ( $IsEqualFlag ) { - __%[1]s_debug "Completing equal sign flag" - # Remove the flag part - $Flag,$WordToComplete = $WordToComplete.Split("=",2) - } - - if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go method. - __%[1]s_debug "Adding extra empty parameter" -`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` - } - - __%[1]s_debug "Calling $RequestComp" - #call the command store the output in $out and redirect stderr and stdout to null - # $Out is an array contains each line per element - Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null - - - # get directive from last line - [int]$Directive = $Out[-1].TrimStart(':') - if ($Directive -eq "") { - # There is no directive specified - $Directive = 0 - } - __%[1]s_debug "The completion directive is: $Directive" - - # remove directive (last element) from out - $Out = $Out | Where-Object { $_ -ne $Out[-1] } - __%[1]s_debug "The completions are: $Out" - - if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { - # Error code. No completion. - __%[1]s_debug "Received error from custom completion go code" - return - } - - $Longest = 0 - $Values = $Out | ForEach-Object { - #Split the output in name and description -`+" $Name, $Description = $_.Split(\"`t\",2)"+` - __%[1]s_debug "Name: $Name Description: $Description" - - # Look for the longest completion so that we can format things nicely - if ($Longest -lt $Name.Length) { - $Longest = $Name.Length - } - - # Set the description to a one space string if there is none set. - # This is needed because the CompletionResult does not accept an empty string as argument - if (-Not $Description) { - $Description = " " - } - @{Name="$Name";Description="$Description"} - } - - - $Space = " " - if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { - # remove the space here - __%[1]s_debug "ShellCompDirectiveNoSpace is called" - $Space = "" - } - - if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or - (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { - __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" - - # return here to prevent the completion of the extensions - return - } - - $Values = $Values | Where-Object { - # filter the result - $_.Name -like "$WordToComplete*" - - # Join the flag back if we have an equal sign flag - if ( $IsEqualFlag ) { - __%[1]s_debug "Join the equal sign flag back to the completion value" - $_.Name = $Flag + "=" + $_.Name - } - } - - if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { - __%[1]s_debug "ShellCompDirectiveNoFileComp is called" - - if ($Values.Length -eq 0) { - # Just print an empty string here so the - # shell does not start to complete paths. - # We cannot use CompletionResult here because - # it does not accept an empty string as argument. - "" - return - } - } - - # Get the current mode - $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function - __%[1]s_debug "Mode: $Mode" - - $Values | ForEach-Object { - - # store temporary because switch will overwrite $_ - $comp = $_ - - # PowerShell supports three different completion modes - # - TabCompleteNext (default windows style - on each key press the next option is displayed) - # - Complete (works like bash) - # - MenuComplete (works like zsh) - # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function - - # CompletionResult Arguments: - # 1) CompletionText text to be used as the auto completion result - # 2) ListItemText text to be displayed in the suggestion list - # 3) ResultType type of completion result - # 4) ToolTip text for the tooltip with details about the object - - switch ($Mode) { - - # bash like - "Complete" { - - if ($Values.Length -eq 1) { - __%[1]s_debug "Only one completion left" - - # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - - } else { - # Add the proper number of spaces to align the descriptions - while($comp.Name.Length -lt $Longest) { - $comp.Name = $comp.Name + " " - } - - # Check for empty description and only add parentheses if needed - if ($($comp.Description) -eq " " ) { - $Description = "" - } else { - $Description = " ($($comp.Description))" - } - - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") - } - } - - # zsh like - "MenuComplete" { - # insert space after value - # MenuComplete will automatically show the ToolTip of - # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - } - - # TabCompleteNext and in case we get something unknown - Default { - # Like MenuComplete but we don't want to add a space here because - # the user need to press space anyway to get the completion. - # Description will not be shown because thats not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") - } - } - - } -} -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) -} - -func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genPowerShellComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.genPowerShellCompletion(outFile, includeDesc) -} - -// GenPowerShellCompletionFile generates powershell completion file without descriptions. -func (c *Command) GenPowerShellCompletionFile(filename string) error { - return c.genPowerShellCompletionFile(filename, false) -} - -// GenPowerShellCompletion generates powershell completion file without descriptions -// and writes it to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - return c.genPowerShellCompletion(w, false) -} - -// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. -func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { - return c.genPowerShellCompletionFile(filename, true) -} - -// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions -// and writes it to the passed writer. -func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { - return c.genPowerShellCompletion(w, true) -} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md deleted file mode 100644 index c449f1e5..00000000 --- a/vendor/github.com/spf13/cobra/powershell_completions.md +++ /dev/null @@ -1,3 +0,0 @@ -# Generating PowerShell Completions For Your Own cobra.Command - -Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md deleted file mode 100644 index d98a71e3..00000000 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ /dev/null @@ -1,38 +0,0 @@ -## Projects using Cobra - -- [Arduino CLI](https://github.com/arduino/arduino-cli) -- [Bleve](http://www.blevesearch.com/) -- [CockroachDB](http://www.cockroachlabs.com/) -- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) -- [Delve](https://github.com/derekparker/delve) -- [Docker (distribution)](https://github.com/docker/distribution) -- [Etcd](https://etcd.io/) -- [Gardener](https://github.com/gardener/gardenctl) -- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) -- [Git Bump](https://github.com/erdaltsksn/git-bump) -- [Github CLI](https://github.com/cli/cli) -- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) -- [Golangci-lint](https://golangci-lint.run) -- [GopherJS](http://www.gopherjs.org/) -- [Helm](https://helm.sh) -- [Hugo](https://gohugo.io) -- [Istio](https://istio.io) -- [Kool](https://github.com/kool-dev/kool) -- [Kubernetes](http://kubernetes.io/) -- [Linkerd](https://linkerd.io/) -- [Mattermost-server](https://github.com/mattermost/mattermost-server) -- [Metal Stack CLI](https://github.com/metal-stack/metalctl) -- [Moby (former Docker)](https://github.com/moby/moby) -- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -- [OpenShift](https://www.openshift.com/) -- [Ory Hydra](https://github.com/ory/hydra) -- [Ory Kratos](https://github.com/ory/kratos) -- [Pouch](https://github.com/alibaba/pouch) -- [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -- [Prototool](https://github.com/uber/prototool) -- [Random](https://github.com/erdaltsksn/random) -- [Rclone](https://rclone.org/) -- [Skaffold](https://skaffold.dev/) -- [Tendermint](https://github.com/tendermint/tendermint) -- [Twitch CLI](https://github.com/twitchdev/twitch-cli) -- [Werf](https://werf.io/) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go deleted file mode 100644 index d99bf91e..00000000 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ /dev/null @@ -1,84 +0,0 @@ -package cobra - -import ( - "github.com/spf13/pflag" -) - -// MarkFlagRequired instructs the various shell completion implementations to -// prioritize the named flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired instructs the various shell completion implementations to -// prioritize the named persistent flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired instructs the various shell completion implementations to -// prioritize the named flag when performing completion, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for the named flag to the specified file extensions. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// The bash completion script will call the bash function f for the flag. -// -// This will only work for bash completion. -// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows -// to register a Go function which will work across all shells. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename instructs the various shell completion -// implementations to limit completions for the named persistent flag to the -// specified file extensions. -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename instructs the various shell completion implementations to -// limit completions for the named flag to the specified file extensions. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// The bash completion script will call the bash function f for the flag. -// -// This will only work for bash completion. -// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows -// to register a Go function which will work across all shells. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// limit completions for the named flag to directory names. -func (c *Command) MarkFlagDirname(name string) error { - return MarkFlagDirname(c.Flags(), name) -} - -// MarkPersistentFlagDirname instructs the various shell completion -// implementations to limit completions for the named persistent flag to -// directory names. -func (c *Command) MarkPersistentFlagDirname(name string) error { - return MarkFlagDirname(c.PersistentFlags(), name) -} - -// MarkFlagDirname instructs the various shell completion implementations to -// limit completions for the named flag to directory names. -func MarkFlagDirname(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{}) -} diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md deleted file mode 100644 index 4ba06a11..00000000 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ /dev/null @@ -1,546 +0,0 @@ -# Generating shell completions - -Cobra can generate shell completions for multiple shells. -The currently supported shells are: -- Bash -- Zsh -- fish -- PowerShell - -Cobra will automatically provide your program with a fully functional `completion` command, -similarly to how it provides the `help` command. - -## Creating your own completion command - -If you do not wish to use the default `completion` command, you can choose to -provide your own, which will take precedence over the default one. (This also provides -backwards-compatibility with programs that already have their own `completion` command.) - -If you are using the generator, you can create a completion command by running - -```bash -cobra add completion -``` -and then modifying the generated `cmd/completion.go` file to look something like this -(writing the shell script to stdout allows the most flexible use): - -```go -var completionCmd = &cobra.Command{ - Use: "completion [bash|zsh|fish|powershell]", - Short: "Generate completion script", - Long: `To load completions: - -Bash: - - $ source <(yourprogram completion bash) - - # To load completions for each session, execute once: - # Linux: - $ yourprogram completion bash > /etc/bash_completion.d/yourprogram - # macOS: - $ yourprogram completion bash > /usr/local/etc/bash_completion.d/yourprogram - -Zsh: - - # If shell completion is not already enabled in your environment, - # you will need to enable it. You can execute the following once: - - $ echo "autoload -U compinit; compinit" >> ~/.zshrc - - # To load completions for each session, execute once: - $ yourprogram completion zsh > "${fpath[1]}/_yourprogram" - - # You will need to start a new shell for this setup to take effect. - -fish: - - $ yourprogram completion fish | source - - # To load completions for each session, execute once: - $ yourprogram completion fish > ~/.config/fish/completions/yourprogram.fish - -PowerShell: - - PS> yourprogram completion powershell | Out-String | Invoke-Expression - - # To load completions for every new session, run: - PS> yourprogram completion powershell > yourprogram.ps1 - # and source this file from your PowerShell profile. -`, - DisableFlagsInUseLine: true, - ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), - Run: func(cmd *cobra.Command, args []string) { - switch args[0] { - case "bash": - cmd.Root().GenBashCompletion(os.Stdout) - case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) - case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) - case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) - } - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. - -## Adapting the default completion command - -Cobra provides a few options for the default `completion` command. To configure such options you must set -the `CompletionOptions` field on the *root* command. - -To tell Cobra *not* to provide the default `completion` command: -``` -rootCmd.CompletionOptions.DisableDefaultCmd = true -``` - -To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: -``` -rootCmd.CompletionOptions.DisableNoDescFlag = true -``` - -To tell Cobra to completely disable descriptions for completions: -``` -rootCmd.CompletionOptions.DisableDescriptions = true -``` - -# Customizing completions - -The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. - -## Completion of nouns - -### Static completion of nouns - -Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. -For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. -Some simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - cobra.CheckErr(RunGet(f, out, cmd, args)) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: - -```bash -$ kubectl get [tab][tab] -node pod replicationcontroller service -``` - -#### Aliases for nouns - -If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -$ kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of -replication controllers following `rc`. - -### Dynamic completion of nouns - -In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. -Simplified code from `helm status` looks like: - -```go -cmd := &cobra.Command{ - Use: "status RELEASE_NAME", - Short: "Display the status of the named release", - Long: status_long, - RunE: func(cmd *cobra.Command, args []string) { - RunGet(args[0]) - }, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) != 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp - }, -} -``` -Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. -Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: - -```bash -$ helm status [tab][tab] -harbor notary rook thanos -``` -You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` -```go -// Indicates that the shell will perform its default behavior after completions -// have been provided (this implies none of the other directives). -ShellCompDirectiveDefault - -// Indicates an error occurred and completions should be ignored. -ShellCompDirectiveError - -// Indicates that the shell should not add a space after the completion, -// even if there is a single completion provided. -ShellCompDirectiveNoSpace - -// Indicates that the shell should not provide file completion even when -// no completion is provided. -ShellCompDirectiveNoFileComp - -// Indicates that the returned completions should be used as file extension filters. -// For example, to complete only files of the form *.json or *.yaml: -// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt -// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() -// is a shortcut to using this directive explicitly. -// -ShellCompDirectiveFilterFileExt - -// Indicates that only directory names should be provided in file completion. -// For example: -// return nil, ShellCompDirectiveFilterDirs -// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. -// -// To request directory names within another directory, the returned completions -// should specify a single directory name within which to search. For example, -// to complete directories within "themes/": -// return []string{"themes"}, ShellCompDirectiveFilterDirs -// -ShellCompDirectiveFilterDirs -``` - -***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. - -#### Debugging - -Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: -```bash -$ helm __complete status har -harbor -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: -```bash -$ helm __complete status "" -harbor -notary -rook -thanos -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: -```go -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and optionally prints to stderr. -cobra.CompDebug(msg string, printToStdErr bool) { -cobra.CompDebugln(msg string, printToStdErr bool) - -// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE -// is set to a file path) and to stderr. -cobra.CompError(msg string) -cobra.CompErrorln(msg string) -``` -***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. - -## Completions for flags - -### Mark flags as required - -Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -$ kubectl exec [tab][tab] --c --container= -p --pod= -``` - -### Specify dynamic flag completion - -As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. - -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault -}) -``` -Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: - -```bash -$ helm status --output [tab][tab] -json table yaml -``` - -#### Debugging - -You can also easily debug your Go completion code for flags: -```bash -$ helm __complete status --output "" -json -table -yaml -:4 -Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr -``` -***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. - -### Specify valid filename extensions for flags that take a filename - -To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: -```go -flagName := "output" -cmd.MarkFlagFilename(flagName, "yaml", "json") -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) -``` - -### Limit flag completions to directory names - -To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: -```go -flagName := "output" -cmd.MarkFlagDirname(flagName) -``` -or -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return nil, cobra.ShellCompDirectiveFilterDirs -}) -``` -To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: -```go -flagName := "output" -cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs -}) -``` -### Descriptions for completions - -Cobra provides support for completion descriptions. Such descriptions are supported for each shell -(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). -For commands and flags, Cobra will provide the descriptions automatically, based on usage information. -For example, using zsh: -``` -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release -``` -while using fish: -``` -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) -``` - -Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: -```go -ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp -}} -``` -or -```go -ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} -``` -## Bash completions - -### Dependencies - -The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) - -### Aliases - -You can also configure `bash` aliases for your program and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$ aliasname -completion firstcommand secondcommand -``` -### Bash legacy dynamic completions - -For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. -Please refer to [Bash Completions](bash_completions.md) for details. - -### Bash completion V2 - -Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling -`GenBashCompletion()` or `GenBashCompletionFile()`. - -A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or -`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion -(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion -solution described in this document. -Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash -completion V2 solution which provides the following extra features: -- Supports completion descriptions (like the other shells) -- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) -- Streamlined user experience thanks to a completion behavior aligned with the other shells - -`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` -you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra -will provide the description automatically based on usage information. You can choose to make this option configurable by -your users. - -``` -# With descriptions -$ helm s[tab][tab] -search (search for a keyword in charts) status (display the status of the named release) -show (show information of a chart) - -# Without descriptions -$ helm s[tab][tab] -search show status -``` -**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. -## Zsh completions - -Cobra supports native zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` and be named -`_`. You will need to start a new shell for the completions to become available. - -Zsh supports descriptions for completions. Cobra will provide the description automatically, -based on usage information. Cobra provides a way to completely disable such descriptions by -using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make -this a configurable option to your users. -``` -# With descriptions -$ helm s[tab] -search -- search for a keyword in charts -show -- show information of a chart -status -- displays the status of the named release - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. - * You should instead use `RegisterFlagCompletionFunc()`. - -### Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. -Please refer to [Zsh Completions](zsh_completions.md) for details. - -## fish completions - -Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. -``` -# With descriptions -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# Without descriptions -$ helm s[tab] -search show status -``` -*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `fish`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `fish`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) - -## PowerShell completions - -Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. - -The script is designed to support all three PowerShell completion modes: - -* TabCompleteNext (default windows style - on each key press the next option is displayed) -* Complete (works like bash) -* MenuComplete (works like zsh) - -You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. - -Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -``` -# With descriptions and Mode 'Complete' -$ helm s[tab] -search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) - -# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. -$ helm s[tab] -search show status - -search for a keyword in charts - -# Without descriptions -$ helm s[tab] -search show status -``` - -### Limitations - -* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). - * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). -* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. - * You should instead use `RegisterFlagCompletionFunc()`. -* The following flag completion annotations are not supported and will be ignored for `powershell`: - * `BashCompFilenameExt` (filtering by file extension) - * `BashCompSubdirsInDir` (filtering by directory) -* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: - * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) - * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) -* Similarly, the following completion directives are not supported and will be ignored for `powershell`: - * `ShellCompDirectiveFilterFileExt` (filtering by file extension) - * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md deleted file mode 100644 index 311abce2..00000000 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ /dev/null @@ -1,637 +0,0 @@ -# User Guide - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - â–¾ appName/ - â–¾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := os.UserHomeDir() - cobra.CheckErr(err) - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigType("yaml") - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Returning and handling errors - -If you wish to return an error to the caller of a command, `RunE` can be used. - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(tryCmd) -} - -var tryCmd = &cobra.Command{ - Use: "try", - Short: "Try and possibly fail at something", - RunE: func(cmd *cobra.Command, args []string) error { - if err := someFunc(); err != nil { - return err - } - return nil - }, -} -``` - -The error can then be caught at the execute function call. - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent', meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally, which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default, Cobra only parses local flags on the target command, and any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example, the persistent flag `author` is bound with `viper`. -**Note**: the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. - -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") -``` - -Or, for persistent flags: -```go -rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkPersistentFlagRequired("region") -``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires a color argument") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable, meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). - -## Generating shell completions - -Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go deleted file mode 100644 index 1afec30e..00000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ /dev/null @@ -1,258 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// GenZshCompletionFile generates zsh completion file including descriptions. -func (c *Command) GenZshCompletionFile(filename string) error { - return c.genZshCompletionFile(filename, true) -} - -// GenZshCompletion generates zsh completion file including descriptions -// and writes it to the passed writer. -func (c *Command) GenZshCompletion(w io.Writer) error { - return c.genZshCompletion(w, true) -} - -// GenZshCompletionFileNoDesc generates zsh completion file without descriptions. -func (c *Command) GenZshCompletionFileNoDesc(filename string) error { - return c.genZshCompletionFile(filename, false) -} - -// GenZshCompletionNoDesc generates zsh completion file without descriptions -// and writes it to the passed writer. -func (c *Command) GenZshCompletionNoDesc(w io.Writer) error { - return c.genZshCompletion(w, false) -} - -// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was -// not consistent with Bash completion. It has therefore been disabled. -// Instead, when no other completion is specified, file completion is done by -// default for every argument. One can disable file completion on a per-argument -// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. -// To achieve file extension filtering, one can use ValidArgsFunction and -// ShellCompDirectiveFilterFileExt. -// -// Deprecated -func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { - return nil -} - -// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore -// been disabled. -// To achieve the same behavior across all shells, one can use -// ValidArgs (for the first argument only) or ValidArgsFunction for -// any argument (can include the first one also). -// -// Deprecated -func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { - return nil -} - -func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.genZshCompletion(outFile, includeDesc) -} - -func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { - buf := new(bytes.Buffer) - genZshComp(buf, c.Name(), includeDesc) - _, err := buf.WriteTo(w) - return err -} - -func genZshComp(buf io.StringWriter, name string, includeDesc bool) { - compCmd := ShellCompRequestCmd - if !includeDesc { - compCmd = ShellCompNoDescRequestCmd - } - WriteStringAndCheck(buf, fmt.Sprintf(`#compdef _%[1]s %[1]s - -# zsh completion for %-36[1]s -*- shell-script -*- - -__%[1]s_debug() -{ - local file="$BASH_COMP_DEBUG_FILE" - if [[ -n ${file} ]]; then - echo "$*" >> "${file}" - fi -} - -_%[1]s() -{ - local shellCompDirectiveError=%[3]d - local shellCompDirectiveNoSpace=%[4]d - local shellCompDirectiveNoFileComp=%[5]d - local shellCompDirectiveFilterFileExt=%[6]d - local shellCompDirectiveFilterDirs=%[7]d - - local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace - local -a completions - - __%[1]s_debug "\n========= starting completion logic ==========" - __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" - - # The user could have moved the cursor backwards on the command-line. - # We need to trigger completion from the $CURRENT location, so we need - # to truncate the command-line ($words) up to the $CURRENT location. - # (We cannot use $CURSOR as its value does not work when a command is an alias.) - words=("${=words[1,CURRENT]}") - __%[1]s_debug "Truncated words[*]: ${words[*]}," - - lastParam=${words[-1]} - lastChar=${lastParam[-1]} - __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" - - # For zsh, when completing a flag with an = (e.g., %[1]s -n=) - # completions must be prefixed with the flag - setopt local_options BASH_REMATCH - if [[ "${lastParam}" =~ '-.*=' ]]; then - # We are dealing with a flag with an = - flagPrefix="-P ${BASH_REMATCH}" - fi - - # Prepare the command to obtain completions - requestComp="${words[1]} %[2]s ${words[2,-1]}" - if [ "${lastChar}" = "" ]; then - # If the last parameter is complete (there is a space following it) - # We add an extra empty parameter so we can indicate this to the go completion code. - __%[1]s_debug "Adding extra empty parameter" - requestComp="${requestComp} \"\"" - fi - - __%[1]s_debug "About to call: eval ${requestComp}" - - # Use eval to handle any environment variables and such - out=$(eval ${requestComp} 2>/dev/null) - __%[1]s_debug "completion output: ${out}" - - # Extract the directive integer following a : from the last line - local lastLine - while IFS='\n' read -r line; do - lastLine=${line} - done < <(printf "%%s\n" "${out[@]}") - __%[1]s_debug "last line: ${lastLine}" - - if [ "${lastLine[1]}" = : ]; then - directive=${lastLine[2,-1]} - # Remove the directive including the : and the newline - local suffix - (( suffix=${#lastLine}+2)) - out=${out[1,-$suffix]} - else - # There is no directive specified. Leave $out as is. - __%[1]s_debug "No directive found. Setting do default" - directive=0 - fi - - __%[1]s_debug "directive: ${directive}" - __%[1]s_debug "completions: ${out}" - __%[1]s_debug "flagPrefix: ${flagPrefix}" - - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then - __%[1]s_debug "Completion received error. Ignoring completions." - return - fi - - while IFS='\n' read -r comp; do - if [ -n "$comp" ]; then - # If requested, completions are returned with a description. - # The description is preceded by a TAB character. - # For zsh's _describe, we need to use a : instead of a TAB. - # We first need to escape any : as part of the completion itself. - comp=${comp//:/\\:} - - local tab=$(printf '\t') - comp=${comp//$tab/:} - - __%[1]s_debug "Adding completion: ${comp}" - completions+=${comp} - lastComp=$comp - fi - done < <(printf "%%s\n" "${out[@]}") - - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - __%[1]s_debug "Activating nospace." - noSpace="-S ''" - fi - - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then - # File extension filtering - local filteringCmd - filteringCmd='_files' - for filter in ${completions[@]}; do - if [ ${filter[1]} != '*' ]; then - # zsh requires a glob pattern to do file filtering - filter="\*.$filter" - fi - filteringCmd+=" -g $filter" - done - filteringCmd+=" ${flagPrefix}" - - __%[1]s_debug "File filtering command: $filteringCmd" - _arguments '*:filename:'"$filteringCmd" - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then - # File completion for directories only - local subDir - subdir="${completions[1]}" - if [ -n "$subdir" ]; then - __%[1]s_debug "Listing directories in $subdir" - pushd "${subdir}" >/dev/null 2>&1 - else - __%[1]s_debug "Listing directories in ." - fi - - local result - _arguments '*:dirname:_files -/'" ${flagPrefix}" - result=$? - if [ -n "$subdir" ]; then - popd >/dev/null 2>&1 - fi - return $result - else - __%[1]s_debug "Calling _describe" - if eval _describe "completions" completions $flagPrefix $noSpace; then - __%[1]s_debug "_describe found some completions" - - # Return the success of having called _describe - return 0 - else - __%[1]s_debug "_describe did not find completions." - __%[1]s_debug "Checking if we should do file completion." - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - __%[1]s_debug "deactivating file completion" - - # We must return an error code here to let zsh know that there were no - # completions found by _describe; this is what will trigger other - # matching algorithms to attempt to find completions. - # For example zsh can match letters in the middle of words. - return 1 - else - # Perform file completion - __%[1]s_debug "Activating file completion" - - # We must return the result of this command, so it must be the - # last command, or else we must store its result to return it. - _arguments '*:filename:_files'" ${flagPrefix}" - fi - fi - fi -} - -# don't run the completion function when being source-ed or eval-ed -if [ "$funcstack[1]" = "_%[1]s" ]; then - _%[1]s -fi -`, name, compCmd, - ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs)) -} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md deleted file mode 100644 index 7cff6178..00000000 --- a/vendor/github.com/spf13/cobra/zsh_completions.md +++ /dev/null @@ -1,48 +0,0 @@ -## Generating Zsh Completion For Your cobra.Command - -Please refer to [Shell Completions](shell_completions.md) for details. - -## Zsh completions standardization - -Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. - -### Deprecation summary - -See further below for more details on these deprecations. - -* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. -* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. -* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. - * Instead use `ValidArgsFunction`. - -### Behavioral changes - -**Noun completion** -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| -|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| -`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| -|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| -|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| - -**Flag-value completion** - -|Old behavior|New behavior| -|---|---| -|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| -|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| -|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| -|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| -|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| -|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| - -**Improvements** - -* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) -* File completion by default if no other completions found -* Handling of required flags -* File extension filtering no longer mutually exclusive with bash usage -* Completion of directory names *within* another directory -* Support for `=` form of flags diff --git a/vendor/github.com/spf13/jwalterweatherman/.gitignore b/vendor/github.com/spf13/jwalterweatherman/.gitignore deleted file mode 100644 index a71f88af..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.bench -go.sum \ No newline at end of file diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE deleted file mode 100644 index 4527efb9..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md deleted file mode 100644 index 932a23fc..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/README.md +++ /dev/null @@ -1,148 +0,0 @@ -jWalterWeatherman -================= - -Seamless printing to the terminal (stdout) and logging to a io.Writer -(file) that’s as easy to use as fmt.Println. - -![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg) -Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422) - -JWW is primarily a wrapper around the excellent standard log library. It -provides a few advantages over using the standard log library alone. - -1. Ready to go out of the box. -2. One library for both printing to the terminal and logging (to files). -3. Really easy to log to either a temp file or a file you specify. - - -I really wanted a very straightforward library that could seamlessly do -the following things. - -1. Replace all the println, printf, etc statements thoughout my code with - something more useful -2. Allow the user to easily control what levels are printed to stdout -3. Allow the user to easily control what levels are logged -4. Provide an easy mechanism (like fmt.Println) to print info to the user - which can be easily logged as well -5. Due to 2 & 3 provide easy verbose mode for output and logs -6. Not have any unnecessary initialization cruft. Just use it. - -# Usage - -## Step 1. Use it -Put calls throughout your source based on type of feedback. -No initialization or setup needs to happen. Just start calling things. - -Available Loggers are: - - * TRACE - * DEBUG - * INFO - * WARN - * ERROR - * CRITICAL - * FATAL - -These each are loggers based on the log standard library and follow the -standard usage. Eg. - -```go - import ( - jww "github.com/spf13/jwalterweatherman" - ) - - ... - - if err != nil { - - // This is a pretty serious error and the user should know about - // it. It will be printed to the terminal as well as logged under the - // default thresholds. - - jww.ERROR.Println(err) - } - - if err2 != nil { - // This error isn’t going to materially change the behavior of the - // application, but it’s something that may not be what the user - // expects. Under the default thresholds, Warn will be logged, but - // not printed to the terminal. - - jww.WARN.Println(err2) - } - - // Information that’s relevant to what’s happening, but not very - // important for the user. Under the default thresholds this will be - // discarded. - - jww.INFO.Printf("information %q", response) - -``` - -NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook: - -```go -notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) -notepad.WARN.Println("Some warning"") -``` - -_Why 7 levels?_ - -Maybe you think that 7 levels are too much for any application... and you -are probably correct. Just because there are seven levels doesn’t mean -that you should be using all 7 levels. Pick the right set for your needs. -Remember they only have to mean something to your project. - -## Step 2. Optionally configure JWW - -Under the default thresholds : - - * Debug, Trace & Info goto /dev/null - * Warn and above is logged (when a log file/io.Writer is provided) - * Error and above is printed to the terminal (stdout) - -### Changing the thresholds - -The threshold can be changed at any time, but will only affect calls that -execute after the change was made. - -This is very useful if your application has a verbose mode. Of course you -can decide what verbose means to you or even have multiple levels of -verbosity. - - -```go - import ( - jww "github.com/spf13/jwalterweatherman" - ) - - if Verbose { - jww.SetLogThreshold(jww.LevelTrace) - jww.SetStdoutThreshold(jww.LevelInfo) - } -``` - -Note that JWW's own internal output uses log levels as well, so set the log -level before making any other calls if you want to see what it's up to. - - -### Setting a log file - -JWW can log to any `io.Writer`: - - -```go - - jww.SetLogOutput(customWriter) - -``` - - -# More information - -This is an early release. I’ve been using it for a while and this is the -third interface I’ve tried. I like this one pretty well, but no guarantees -that it won’t change a bit. - -I wrote this for use in [hugo](https://gohugo.io). If you are looking -for a static website engine that’s super fast please checkout Hugo. diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go deleted file mode 100644 index a018c15c..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "io" - "io/ioutil" - "log" - "os" -) - -var ( - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - defaultNotepad *Notepad -) - -func reloadDefaultNotepad() { - TRACE = defaultNotepad.TRACE - DEBUG = defaultNotepad.DEBUG - INFO = defaultNotepad.INFO - WARN = defaultNotepad.WARN - ERROR = defaultNotepad.ERROR - CRITICAL = defaultNotepad.CRITICAL - FATAL = defaultNotepad.FATAL - - LOG = defaultNotepad.LOG - FEEDBACK = defaultNotepad.FEEDBACK -} - -func init() { - defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) - reloadDefaultNotepad() -} - -// SetLogThreshold set the log threshold for the default notepad. Trace by default. -func SetLogThreshold(threshold Threshold) { - defaultNotepad.SetLogThreshold(threshold) - reloadDefaultNotepad() -} - -// SetLogOutput set the log output for the default notepad. Discarded by default. -func SetLogOutput(handle io.Writer) { - defaultNotepad.SetLogOutput(handle) - reloadDefaultNotepad() -} - -// SetStdoutThreshold set the standard output threshold for the default notepad. -// Info by default. -func SetStdoutThreshold(threshold Threshold) { - defaultNotepad.SetStdoutThreshold(threshold) - reloadDefaultNotepad() -} - -// SetStdoutOutput set the stdout output for the default notepad. Default is stdout. -func SetStdoutOutput(handle io.Writer) { - defaultNotepad.outHandle = handle - defaultNotepad.init() - reloadDefaultNotepad() -} - -// SetPrefix set the prefix for the default logger. Empty by default. -func SetPrefix(prefix string) { - defaultNotepad.SetPrefix(prefix) - reloadDefaultNotepad() -} - -// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default. -func SetFlags(flags int) { - defaultNotepad.SetFlags(flags) - reloadDefaultNotepad() -} - -// SetLogListeners configures the default logger with one or more log listeners. -func SetLogListeners(l ...LogListener) { - defaultNotepad.logListeners = l - defaultNotepad.init() - reloadDefaultNotepad() -} - -// Level returns the current global log threshold. -func LogThreshold() Threshold { - return defaultNotepad.logThreshold -} - -// Level returns the current global output threshold. -func StdoutThreshold() Threshold { - return defaultNotepad.stdoutThreshold -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func GetLogThreshold() Threshold { - return defaultNotepad.GetLogThreshold() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func GetStdoutThreshold() Threshold { - return defaultNotepad.GetStdoutThreshold() -} diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go deleted file mode 100644 index 41285f3d..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/log_counter.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "io" - "sync/atomic" -) - -// Counter is an io.Writer that increments a counter on Write. -type Counter struct { - count uint64 -} - -func (c *Counter) incr() { - atomic.AddUint64(&c.count, 1) -} - -// Reset resets the counter. -func (c *Counter) Reset() { - atomic.StoreUint64(&c.count, 0) -} - -// Count returns the current count. -func (c *Counter) Count() uint64 { - return atomic.LoadUint64(&c.count) -} - -func (c *Counter) Write(p []byte) (n int, err error) { - c.incr() - return len(p), nil -} - -// LogCounter creates a LogListener that counts log statements >= the given threshold. -func LogCounter(counter *Counter, t1 Threshold) LogListener { - return func(t2 Threshold) io.Writer { - if t2 < t1 { - // Not interested in this threshold. - return nil - } - return counter - } -} diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go deleted file mode 100644 index cc7957bf..00000000 --- a/vendor/github.com/spf13/jwalterweatherman/notepad.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "fmt" - "io" - "io/ioutil" - "log" -) - -type Threshold int - -func (t Threshold) String() string { - return prefixes[t] -} - -const ( - LevelTrace Threshold = iota - LevelDebug - LevelInfo - LevelWarn - LevelError - LevelCritical - LevelFatal -) - -var prefixes map[Threshold]string = map[Threshold]string{ - LevelTrace: "TRACE", - LevelDebug: "DEBUG", - LevelInfo: "INFO", - LevelWarn: "WARN", - LevelError: "ERROR", - LevelCritical: "CRITICAL", - LevelFatal: "FATAL", -} - -// Notepad is where you leave a note! -type Notepad struct { - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - loggers [7]**log.Logger - logHandle io.Writer - outHandle io.Writer - logThreshold Threshold - stdoutThreshold Threshold - prefix string - flags int - - logListeners []LogListener -} - -// A LogListener can ble supplied to a Notepad to listen on log writes for a given -// threshold. This can be used to capture log events in unit tests and similar. -// Note that this function will be invoked once for each log threshold. If -// the given threshold is not of interest to you, return nil. -// Note that these listeners will receive log events for a given threshold, even -// if the current configuration says not to log it. That way you can count ERRORs even -// if you don't print them to the console. -type LogListener func(t Threshold) io.Writer - -// NewNotepad creates a new Notepad. -func NewNotepad( - outThreshold Threshold, - logThreshold Threshold, - outHandle, logHandle io.Writer, - prefix string, flags int, - logListeners ...LogListener, -) *Notepad { - - n := &Notepad{logListeners: logListeners} - - n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL} - n.outHandle = outHandle - n.logHandle = logHandle - n.stdoutThreshold = outThreshold - n.logThreshold = logThreshold - - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - - n.flags = flags - - n.LOG = log.New(n.logHandle, - "LOG: ", - n.flags) - n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG} - - n.init() - return n -} - -// init creates the loggers for each level depending on the notepad thresholds. -func (n *Notepad) init() { - logAndOut := io.MultiWriter(n.outHandle, n.logHandle) - - for t, logger := range n.loggers { - threshold := Threshold(t) - prefix := n.prefix + threshold.String() + " " - - switch { - case threshold >= n.logThreshold && threshold >= n.stdoutThreshold: - *logger = log.New(n.createLogWriters(threshold, logAndOut), prefix, n.flags) - - case threshold >= n.logThreshold: - *logger = log.New(n.createLogWriters(threshold, n.logHandle), prefix, n.flags) - - case threshold >= n.stdoutThreshold: - *logger = log.New(n.createLogWriters(threshold, n.outHandle), prefix, n.flags) - - default: - *logger = log.New(n.createLogWriters(threshold, ioutil.Discard), prefix, n.flags) - } - } -} - -func (n *Notepad) createLogWriters(t Threshold, handle io.Writer) io.Writer { - if len(n.logListeners) == 0 { - return handle - } - writers := []io.Writer{handle} - for _, l := range n.logListeners { - w := l(t) - if w != nil { - writers = append(writers, w) - } - } - - if len(writers) == 1 { - return handle - } - - return io.MultiWriter(writers...) -} - -// SetLogThreshold changes the threshold above which messages are written to the -// log file. -func (n *Notepad) SetLogThreshold(threshold Threshold) { - n.logThreshold = threshold - n.init() -} - -// SetLogOutput changes the file where log messages are written. -func (n *Notepad) SetLogOutput(handle io.Writer) { - n.logHandle = handle - n.init() -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func (n *Notepad) GetLogThreshold() Threshold { - return n.logThreshold -} - -// SetStdoutThreshold changes the threshold above which messages are written to the -// standard output. -func (n *Notepad) SetStdoutThreshold(threshold Threshold) { - n.stdoutThreshold = threshold - n.init() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func (n *Notepad) GetStdoutThreshold() Threshold { - return n.stdoutThreshold -} - -// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between -// brackets at the beginning of the line. An empty prefix won't be displayed at all. -func (n *Notepad) SetPrefix(prefix string) { - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - n.init() -} - -// SetFlags choose which flags the logger will display (after prefix and message -// level). See the package log for more informations on this. -func (n *Notepad) SetFlags(flags int) { - n.flags = flags - n.init() -} - -// Feedback writes plainly to the outHandle while -// logging with the standard extra information (date, file, etc). -type Feedback struct { - out *log.Logger - log *log.Logger -} - -func (fb *Feedback) Println(v ...interface{}) { - fb.output(fmt.Sprintln(v...)) -} - -func (fb *Feedback) Printf(format string, v ...interface{}) { - fb.output(fmt.Sprintf(format, v...)) -} - -func (fb *Feedback) Print(v ...interface{}) { - fb.output(fmt.Sprint(v...)) -} - -func (fb *Feedback) output(s string) { - if fb.out != nil { - fb.out.Output(2, s) - } - if fb.log != nil { - fb.log.Output(2, s) - } -} diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore deleted file mode 100644 index c3da2901..00000000 --- a/vendor/github.com/spf13/pflag/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea/* - diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index 00d04cb9..00000000 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -sudo: false - -language: go - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip - -matrix: - allow_failures: - - go: tip - -install: - - go get golang.org/x/lint/golint - - export PATH=$GOPATH/bin:$PATH - - go install ./... - -script: - - verify/all.sh -v - - go test ./... diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfe..00000000 --- a/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index 7eacc5bd..00000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helper functions available to get the value stored in a Flag if you have a FlagSet but find -it difficult to keep up with all of the pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index c4c5c0bf..00000000 --- a/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import "strconv" - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -func boolConv(sval string) (interface{}, error) { - return strconv.ParseBool(sval) -} - -// GetBool return the bool value of a flag with the given name -func (f *FlagSet) GetBool(name string) (bool, error) { - val, err := f.getFlagType(name, "bool", boolConv) - if err != nil { - return false, err - } - return val.(bool), nil -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - return f.BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - b := CommandLine.BoolP(name, shorthand, value, usage) - return b -} diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go deleted file mode 100644 index 3731370d..00000000 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ /dev/null @@ -1,185 +0,0 @@ -package pflag - -import ( - "io" - "strconv" - "strings" -) - -// -- boolSlice Value -type boolSliceValue struct { - value *[]bool - changed bool -} - -func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { - bsv := new(boolSliceValue) - bsv.value = p - *bsv.value = val - return bsv -} - -// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. -// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. -func (s *boolSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse boolean values into slice - out := make([]bool, 0, len(boolStrSlice)) - for _, boolStr := range boolStrSlice { - b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) - if err != nil { - return err - } - out = append(out, b) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *boolSliceValue) Type() string { - return "boolSlice" -} - -// String defines a "native" format for this boolean slice flag value. -func (s *boolSliceValue) String() string { - - boolStrSlice := make([]string, len(*s.value)) - for i, b := range *s.value { - boolStrSlice[i] = strconv.FormatBool(b) - } - - out, _ := writeAsCSV(boolStrSlice) - - return "[" + out + "]" -} - -func (s *boolSliceValue) fromString(val string) (bool, error) { - return strconv.ParseBool(val) -} - -func (s *boolSliceValue) toString(val bool) string { - return strconv.FormatBool(val) -} - -func (s *boolSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *boolSliceValue) Replace(val []string) error { - out := make([]bool, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *boolSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func boolSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []bool{}, nil - } - ss := strings.Split(val, ",") - out := make([]bool, len(ss)) - for i, t := range ss { - var err error - out[i], err = strconv.ParseBool(t) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetBoolSlice returns the []bool value of a flag with the given name. -func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { - val, err := f.getFlagType(name, "boolSlice", boolSliceConv) - if err != nil { - return []bool{}, err - } - return val.([]bool), nil -} - -// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, "", value, usage) - return &p -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func BoolSlice(name string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, "", value, usage) -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go deleted file mode 100644 index 67d53045..00000000 --- a/vendor/github.com/spf13/pflag/bytes.go +++ /dev/null @@ -1,209 +0,0 @@ -package pflag - -import ( - "encoding/base64" - "encoding/hex" - "fmt" - "strings" -) - -// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded -type bytesHexValue []byte - -// String implements pflag.Value.String. -func (bytesHex bytesHexValue) String() string { - return fmt.Sprintf("%X", []byte(bytesHex)) -} - -// Set implements pflag.Value.Set. -func (bytesHex *bytesHexValue) Set(value string) error { - bin, err := hex.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesHex = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesHexValue) Type() string { - return "bytesHex" -} - -func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue { - *p = val - return (*bytesHexValue)(p) -} - -func bytesHexConv(sval string) (interface{}, error) { - - bin, err := hex.DecodeString(sval) - - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesHex return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesHex(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesHex", bytesHexConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHexVar defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesHexVar(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, "", usage) -} - -// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. -func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage) -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, "", value, usage) - return p -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesHexVarP(p, name, shorthand, value, usage) - return p -} - -// BytesHex defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesHex(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, "", value, usage) -} - -// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. -func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesHexP(name, shorthand, value, usage) -} - -// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded -type bytesBase64Value []byte - -// String implements pflag.Value.String. -func (bytesBase64 bytesBase64Value) String() string { - return base64.StdEncoding.EncodeToString([]byte(bytesBase64)) -} - -// Set implements pflag.Value.Set. -func (bytesBase64 *bytesBase64Value) Set(value string) error { - bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value)) - - if err != nil { - return err - } - - *bytesBase64 = bin - - return nil -} - -// Type implements pflag.Value.Type. -func (*bytesBase64Value) Type() string { - return "bytesBase64" -} - -func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value { - *p = val - return (*bytesBase64Value)(p) -} - -func bytesBase64ValueConv(sval string) (interface{}, error) { - - bin, err := base64.StdEncoding.DecodeString(sval) - if err == nil { - return bin, nil - } - - return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) -} - -// GetBytesBase64 return the []byte value of a flag with the given name -func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) { - val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv) - - if err != nil { - return []byte{}, err - } - - return val.([]byte), nil -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - f.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64Var defines an []byte flag with specified name, default value, and usage string. -// The argument p points to an []byte variable in which to store the value of the flag. -func BytesBase64Var(p *[]byte, name string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage) -} - -// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) { - CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage) -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, "", value, usage) - return p -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - p := new([]byte) - f.BytesBase64VarP(p, name, shorthand, value, usage) - return p -} - -// BytesBase64 defines an []byte flag with specified name, default value, and usage string. -// The return value is the address of an []byte variable that stores the value of the flag. -func BytesBase64(name string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, "", value, usage) -} - -// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash. -func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte { - return CommandLine.BytesBase64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go deleted file mode 100644 index a0b2679f..00000000 --- a/vendor/github.com/spf13/pflag/count.go +++ /dev/null @@ -1,96 +0,0 @@ -package pflag - -import "strconv" - -// -- count Value -type countValue int - -func newCountValue(val int, p *int) *countValue { - *p = val - return (*countValue)(p) -} - -func (i *countValue) Set(s string) error { - // "+1" means that no specific value was passed, so increment - if s == "+1" { - *i = countValue(*i + 1) - return nil - } - v, err := strconv.ParseInt(s, 0, 0) - *i = countValue(v) - return err -} - -func (i *countValue) Type() string { - return "count" -} - -func (i *countValue) String() string { return strconv.Itoa(int(*i)) } - -func countConv(sval string) (interface{}, error) { - i, err := strconv.Atoi(sval) - if err != nil { - return nil, err - } - return i, nil -} - -// GetCount return the int value of a flag with the given name -func (f *FlagSet) GetCount(name string) (int, error) { - val, err := f.getFlagType(name, "count", countConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// CountVar defines a count flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) CountVar(p *int, name string, usage string) { - f.CountVarP(p, name, "", usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { - flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "+1" -} - -// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set -func CountVar(p *int, name string, usage string) { - CommandLine.CountVar(p, name, usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func CountVarP(p *int, name, shorthand string, usage string) { - CommandLine.CountVarP(p, name, shorthand, usage) -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value every time it is found on the command line -func (f *FlagSet) Count(name string, usage string) *int { - p := new(int) - f.CountVarP(p, name, "", usage) - return p -} - -// CountP is like Count only takes a shorthand for the flag name. -func (f *FlagSet) CountP(name, shorthand string, usage string) *int { - p := new(int) - f.CountVarP(p, name, shorthand, usage) - return p -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func Count(name string, usage string) *int { - return CommandLine.CountP(name, "", usage) -} - -// CountP is like Count only takes a shorthand for the flag name. -func CountP(name, shorthand string, usage string) *int { - return CommandLine.CountP(name, shorthand, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index e9debef8..00000000 --- a/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "time" -) - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -func durationConv(sval string) (interface{}, error) { - return time.ParseDuration(sval) -} - -// GetDuration return the duration value of a flag with the given name -func (f *FlagSet) GetDuration(name string) (time.Duration, error) { - val, err := f.getFlagType(name, "duration", durationConv) - if err != nil { - return 0, err - } - return val.(time.Duration), nil -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go deleted file mode 100644 index badadda5..00000000 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strings" - "time" -) - -// -- durationSlice Value -type durationSliceValue struct { - value *[]time.Duration - changed bool -} - -func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { - dsv := new(durationSliceValue) - dsv.value = p - *dsv.value = val - return dsv -} - -func (s *durationSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *durationSliceValue) Type() string { - return "durationSlice" -} - -func (s *durationSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%s", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *durationSliceValue) fromString(val string) (time.Duration, error) { - return time.ParseDuration(val) -} - -func (s *durationSliceValue) toString(val time.Duration) string { - return fmt.Sprintf("%s", val) -} - -func (s *durationSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *durationSliceValue) Replace(val []string) error { - out := make([]time.Duration, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *durationSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func durationSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []time.Duration{}, nil - } - ss := strings.Split(val, ",") - out := make([]time.Duration, len(ss)) - for i, d := range ss { - var err error - out[i], err = time.ParseDuration(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetDurationSlice returns the []time.Duration value of a flag with the given name -func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { - val, err := f.getFlagType(name, "durationSlice", durationSliceConv) - if err != nil { - return []time.Duration{}, err - } - return val.([]time.Duration), nil -} - -// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. -// The argument p points to a []time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. -// The argument p points to a duration[] variable in which to store the value of the flag. -func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) -} - -// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { - CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, "", value, usage) - return &p -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - p := []time.Duration{} - f.DurationSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a []time.Duration variable that stores the value of the flag. -func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, "", value, usage) -} - -// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. -func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { - return CommandLine.DurationSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index 24a5036e..00000000 --- a/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the GNU extensions to the POSIX recommendations -for command-line options. See -http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -Usage: - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - - import flag "github.com/spf13/pflag" - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") -If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - flag.Parse() -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") - } - flag.VarP(&flagval, "varname", "v", "help message") -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -Command line flag syntax: - --flag // boolean flags only - --flag=x - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - goflag "flag" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - // ContinueOnError will return an err from Parse() if an error is found - ContinueOnError ErrorHandling = iota - // ExitOnError will call os.Exit(2) if an error is found when parsing - ExitOnError - // PanicOnError will panic() if an error is found when parsing flags - PanicOnError -) - -// ParseErrorsWhitelist defines the parsing errors that can be ignored -type ParseErrorsWhitelist struct { - // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags - UnknownFlags bool -} - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - // SortFlags is used to indicate, if user wants to have sorted flags in - // help/usage messages. - SortFlags bool - - // ParseErrorsWhitelist is used to configure a whitelist of errors - ParseErrorsWhitelist ParseErrorsWhitelist - - name string - parsed bool - actual map[NormalizedName]*Flag - orderedActual []*Flag - sortedActual []*Flag - formal map[NormalizedName]*Flag - orderedFormal []*Flag - sortedFormal []*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName - - addedGoFlagSets []*goflag.FlagSet -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string // default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text - ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// SliceValue is a secondary interface to all flags which hold a list -// of values. This allows full control over the value of list flags, -// and avoids complicated marshalling and unmarshalling to csv. -type SliceValue interface { - // Append adds the specified value to the end of the flag value list. - Append(string) error - // Replace will fully overwrite any data currently in the flag value list. - Replace([]string) error - // GetSlice returns the flag value list as an array of strings. - GetSlice() []string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -// SetNormalizeFunc allows you to add a function which can translate flag names. -// Flags added to the FlagSet will be translated and then when anything tries to -// look up the flag that will also be translated. So it would be possible to create -// a flag named "getURL" and have it translated to "geturl". A user could then pass -// "--getUrl" which may also be translated to "geturl" and everything will work. -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - f.sortedFormal = f.sortedFormal[:0] - for fname, flag := range f.formal { - nname := f.normalizeFlagName(flag.Name) - if fname == nname { - continue - } - flag.Name = string(nname) - delete(f.formal, fname) - f.formal[nname] = flag - if _, set := f.actual[fname]; set { - delete(f.actual, fname) - f.actual[nname] = flag - } - } -} - -// GetNormalizeFunc returns the previously set NormalizeFunc of a function which -// does no translation, if not set previously. -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - if len(f.formal) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.formal) != len(f.sortedFormal) { - f.sortedFormal = sortFlags(f.formal) - } - flags = f.sortedFormal - } else { - flags = f.orderedFormal - } - - for _, flag := range flags { - fn(flag) - } -} - -// HasFlags returns a bool to indicate if the FlagSet has any flags defined. -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// that are not hidden. -func (f *FlagSet) HasAvailableFlags() bool { - for _, flag := range f.formal { - if !flag.Hidden { - return true - } - } - return false -} - -// VisitAll visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - if len(f.actual) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.actual) != len(f.sortedActual) { - f.sortedActual = sortFlags(f.actual) - } - flags = f.sortedActual - } else { - flags = f.orderedActual - } - - for _, flag := range flags { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -// It panics, if len(name) > 1. -func (f *FlagSet) ShorthandLookup(name string) *Flag { - if name == "" { - return nil - } - if len(name) > 1 { - msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - c := name[0] - return f.shorthands[c] -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// func to return a given type for a given flag name -func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { - flag := f.Lookup(name) - if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) - return nil, err - } - - if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) - return nil, err - } - - sval := flag.Value.String() - result, err := convFunc(sval) - if err != nil { - return nil, err - } - return result, nil -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. -func (f *FlagSet) ArgsLenAtDash() int { - return f.argsLenAtDash -} - -// MarkDeprecated indicated that a flag is deprecated in your program. It will -// continue to function but will not show up in help or usage messages. Using -// this flag will also print the given usageMessage. -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.Deprecated = usageMessage - flag.Hidden = true - return nil -} - -// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your -// program. It will continue to function but will not show up in help or usage -// messages. Using this flag will also print the given usageMessage. -func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.ShorthandDeprecated = usageMessage - return nil -} - -// MarkHidden sets a flag to 'hidden' in your program. It will continue to -// function but will not show up in help or usage messages. -func (f *FlagSet) MarkHidden(name string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Hidden = true - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// ShorthandLookup returns the Flag structure of the short handed flag, -// returning nil if none exists. -func ShorthandLookup(name string) *Flag { - return CommandLine.ShorthandLookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - - err := flag.Value.Set(value) - if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) - } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) - } - - if !flag.Changed { - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) - - flag.Changed = true - } - - if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. -// This is sometimes used by spf13/cobra programs which want to generate additional -// bash completion information. -func (f *FlagSet) SetAnnotation(name, key string, values []string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[key] = values - return nil -} - -// Changed returns true if the flag was explicitly set during Parse() and false -// otherwise -func (f *FlagSet) Changed(name string) bool { - flag := f.Lookup(name) - // If a flag doesn't exist, it wasn't changed.... - if flag == nil { - return false - } - return flag.Changed -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) -} - -// defaultIsZeroValue returns true if the default value for this flag represents -// a zero value. -func (f *Flag) defaultIsZeroValue() bool { - switch f.Value.(type) { - case boolFlag: - return f.DefValue == "false" - case *durationValue: - // Beginning in Go 1.7, duration zero values are "0s" - return f.DefValue == "0" || f.DefValue == "0s" - case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: - return f.DefValue == "0" - case *stringValue: - return f.DefValue == "" - case *ipValue, *ipMaskValue, *ipNetValue: - return f.DefValue == "" - case *intSliceValue, *stringSliceValue, *stringArrayValue: - return f.DefValue == "[]" - default: - switch f.Value.String() { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true - } - return false - } -} - -// UnquoteUsage extracts a back-quoted name from the usage -// string for a flag and returns it and the un-quoted usage. -// Given "a `name` to show" it returns ("name", "a name to show"). -// If there are no back quotes, the name is an educated guess of the -// type of the flag's value, or the empty string if the flag is boolean. -func UnquoteUsage(flag *Flag) (name string, usage string) { - // Look for a back-quoted name, but avoid the strings package. - usage = flag.Usage - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name = usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break // Only one back quote; use type name. - } - } - - name = flag.Value.Type() - switch name { - case "bool": - name = "" - case "float64": - name = "float" - case "int64": - name = "int" - case "uint64": - name = "uint" - case "stringSlice": - name = "strings" - case "intSlice": - name = "ints" - case "uintSlice": - name = "uints" - case "boolSlice": - name = "bools" - } - - return -} - -// Splits the string `s` on whitespace into an initial substring up to -// `i` runes in length and the remainder. Will go `slop` over `i` if -// that encompasses the entire string (which allows the caller to -// avoid short orphan words on the final line). -func wrapN(i, slop int, s string) (string, string) { - if i+slop > len(s) { - return s, "" - } - - w := strings.LastIndexAny(s[:i], " \t\n") - if w <= 0 { - return s, "" - } - nlPos := strings.LastIndex(s[:i], "\n") - if nlPos > 0 && nlPos < w { - return s[:nlPos], s[nlPos+1:] - } - return s[:w], s[w+1:] -} - -// Wraps the string `s` to a maximum width `w` with leading indent -// `i`. The first line is not indented (this is assumed to be done by -// caller). Pass `w` == 0 to do no wrapping -func wrap(i, w int, s string) string { - if w == 0 { - return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - // space between indent i and end of line width w into which - // we should wrap the text. - wrap := w - i - - var r, l string - - // Not enough space for sensible wrapping. Wrap as a block on - // the next line instead. - if wrap < 24 { - i = 16 - wrap = w - i - r += "\n" + strings.Repeat(" ", i) - } - // If still not enough space then don't even try to wrap. - if wrap < 24 { - return strings.Replace(s, "\n", r, -1) - } - - // Try to avoid short orphan words on the final line, by - // allowing wrapN to go a bit over if that would fit in the - // remainder of the line. - slop := 5 - wrap = wrap - slop - - // Handle first line, which is indented by the caller (or the - // special case above) - l, s = wrapN(wrap, slop, s) - r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1) - - // Now wrap the rest - for s != "" { - var t string - - t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1) - } - - return r - -} - -// FlagUsagesWrapped returns a string containing the usage information -// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no -// wrapping) -func (f *FlagSet) FlagUsagesWrapped(cols int) string { - buf := new(bytes.Buffer) - - lines := make([]string, 0, len(f.formal)) - - maxlen := 0 - f.VisitAll(func(flag *Flag) { - if flag.Hidden { - return - } - - line := "" - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) - } else { - line = fmt.Sprintf(" --%s", flag.Name) - } - - varname, usage := UnquoteUsage(flag) - if varname != "" { - line += " " + varname - } - if flag.NoOptDefVal != "" { - switch flag.Value.Type() { - case "string": - line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": - if flag.NoOptDefVal != "true" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - case "count": - if flag.NoOptDefVal != "+1" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - default: - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - } - - // This special character will be replaced with spacing once the - // correct alignment is calculated - line += "\x00" - if len(line) > maxlen { - maxlen = len(line) - } - - line += usage - if !flag.defaultIsZeroValue() { - if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) - } else { - line += fmt.Sprintf(" (default %s)", flag.DefValue) - } - } - if len(flag.Deprecated) != 0 { - line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated) - } - - lines = append(lines, line) - }) - - for _, line := range lines { - sidx := strings.Index(line, "\x00") - spacing := strings.Repeat(" ", maxlen-sidx) - // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) - } - - return buf.String() -} - -// FlagUsages returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { - return f.FlagUsagesWrapped(0) -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -// By default it prints a simple header and calls PrintDefaults; for details about the -// format of the output and how to control it, see the documentation for PrintDefaults. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// VarPF is like VarP, but returns the flag created -func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) - return flag -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - f.VarPF(value, name, shorthand, usage) -} - -// AddFlag will add the flag to the FlagSet -func (f *FlagSet) AddFlag(flag *Flag) { - normalizedFlagName := f.normalizeFlagName(flag.Name) - - _, alreadyThere := f.formal[normalizedFlagName] - if alreadyThere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - f.orderedFormal = append(f.orderedFormal, flag) - - if flag.Shorthand == "" { - return - } - if len(flag.Shorthand) > 1 { - msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - used, alreadyThere := f.shorthands[c] - if alreadyThere { - msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - f.shorthands[c] = flag -} - -// AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored. -func (f *FlagSet) AddFlagSet(newSet *FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(flag *Flag) { - if f.Lookup(flag.Name) == nil { - f.AddFlag(flag) - } - }) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) - f.usage() - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -//--unknown (args will be empty) -//--unknown --next-flag ... (args will be --next-flag ...) -//--unknown arg ... (args will be arg ...) -func stripUnknownFlagValue(args []string) []string { - if len(args) == 0 { - //--unknown - return args - } - - first := args[0] - if len(first) > 0 && first[0] == '-' { - //--unknown --next-flag ... - return args - } - - //--unknown arg ... (args will be arg ...) - if len(args) > 1 { - return args[1:] - } - return nil -} - -func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, exists := f.formal[f.normalizeFlagName(name)] - - if !exists { - switch { - case name == "help": - f.usage() - return a, ErrHelp - case f.ParseErrorsWhitelist.UnknownFlags: - // --unknown=unknownval arg ... - // we do not want to lose arg in this case - if len(split) >= 2 { - return a, nil - } - - return stripUnknownFlagValue(a), nil - default: - err = f.failf("unknown flag: --%s", name) - return - } - } - - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if flag.NoOptDefVal != "" { - // '--flag' (arg was optional) - value = flag.NoOptDefVal - } else if len(a) > 0 { - // '--flag arg' - value = a[0] - a = a[1:] - } else { - // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) - return - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { - outArgs = args - - if strings.HasPrefix(shorthands, "test.") { - return - } - - outShorts = shorthands[1:] - c := shorthands[0] - - flag, exists := f.shorthands[c] - if !exists { - switch { - case c == 'h': - f.usage() - err = ErrHelp - return - case f.ParseErrorsWhitelist.UnknownFlags: - // '-f=arg arg ...' - // we do not want to lose arg in this case - if len(shorthands) > 2 && shorthands[1] == '=' { - outShorts = "" - return - } - - outArgs = stripUnknownFlagValue(outArgs) - return - default: - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - } - - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - // '-f=arg' - value = shorthands[2:] - outShorts = "" - } else if flag.NoOptDefVal != "" { - // '-f' (arg was optional) - value = flag.NoOptDefVal - } else if len(shorthands) > 1 { - // '-farg' - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - // '-f arg' - value = args[0] - outArgs = args[1:] - } else { - // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - - if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - - err = fn(flag, value) - if err != nil { - f.failf(err.Error()) - } - return -} - -func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - shorthands := s[1:] - - // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.argsLenAtDash = len(f.args) - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args, fn) - } else { - args, err = f.parseShortArg(s, args, fn) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - if f.addedGoFlagSets != nil { - for _, goFlagSet := range f.addedGoFlagSets { - goFlagSet.Parse(nil) - } - } - f.parsed = true - - if len(arguments) < 0 { - return nil - } - - f.args = make([]string, 0, len(arguments)) - - set := func(flag *Flag, value string) error { - return f.Set(flag.Name, value) - } - - err := f.parseArgs(arguments, set) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - fmt.Println(err) - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -type parseFunc func(flag *Flag, value string) error - -// ParseAll parses flag definitions from the argument list, which should not -// include the command name. The arguments for fn are flag and value. Must be -// called after all flags in the FlagSet are defined and before flags are -// accessed by the program. The return value will be ErrHelp if -help was set -// but not defined. -func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - - err := f.parseArgs(arguments, fn) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. -// The arguments for fn are flag and value. Must be called after all flags are -// defined and before flags are accessed by the program. -func ParseAll(fn func(flag *Flag, value string) error) { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.ParseAll(os.Args[1:], fn) -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name, -// error handling property and SortFlags set to true. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - argsLenAtDash: -1, - interspersed: true, - SortFlags: true, - } - return f -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling - f.argsLenAtDash = -1 -} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index a243f81f..00000000 --- a/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } - -func float32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseFloat(sval, 32) - if err != nil { - return 0, err - } - return float32(v), nil -} - -// GetFloat32 return the float32 value of a flag with the given name -func (f *FlagSet) GetFloat32(name string) (float32, error) { - val, err := f.getFlagType(name, "float32", float32Conv) - if err != nil { - return 0, err - } - return val.(float32), nil -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go deleted file mode 100644 index caa35274..00000000 --- a/vendor/github.com/spf13/pflag/float32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float32Slice Value -type float32SliceValue struct { - value *[]float32 - changed bool -} - -func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { - isv := new(float32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return err - } - out[i] = float32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float32SliceValue) Type() string { - return "float32Slice" -} - -func (s *float32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float32SliceValue) fromString(val string) (float32, error) { - t64, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(t64), nil -} - -func (s *float32SliceValue) toString(val float32) string { - return fmt.Sprintf("%f", val) -} - -func (s *float32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float32SliceValue) Replace(val []string) error { - out := make([]float32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float32{}, nil - } - ss := strings.Split(val, ",") - out := make([]float32, len(ss)) - for i, d := range ss { - var err error - var temp64 float64 - temp64, err = strconv.ParseFloat(d, 32) - if err != nil { - return nil, err - } - out[i] = float32(temp64) - - } - return out, nil -} - -// GetFloat32Slice return the []float32 value of a flag with the given name -func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { - val, err := f.getFlagType(name, "float32Slice", float32SliceConv) - if err != nil { - return []float32{}, err - } - return val.([]float32), nil -} - -// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. -// The argument p points to a []float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. -// The argument p points to a float32[] variable in which to store the value of the flag. -func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) -} - -// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { - CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - p := []float32{} - f.Float32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float32Slice defines a []float32 flag with specified name, default value, and usage string. -// The return value is the address of a []float32 variable that stores the value of the flag. -func Float32Slice(name string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, "", value, usage) -} - -// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. -func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { - return CommandLine.Float32SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 04b5492a..00000000 --- a/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } - -func float64Conv(sval string) (interface{}, error) { - return strconv.ParseFloat(sval, 64) -} - -// GetFloat64 return the float64 value of a flag with the given name -func (f *FlagSet) GetFloat64(name string) (float64, error) { - val, err := f.getFlagType(name, "float64", float64Conv) - if err != nil { - return 0, err - } - return val.(float64), nil -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go deleted file mode 100644 index 85bf3073..00000000 --- a/vendor/github.com/spf13/pflag/float64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- float64Slice Value -type float64SliceValue struct { - value *[]float64 - changed bool -} - -func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { - isv := new(float64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *float64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *float64SliceValue) Type() string { - return "float64Slice" -} - -func (s *float64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%f", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *float64SliceValue) fromString(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -func (s *float64SliceValue) toString(val float64) string { - return fmt.Sprintf("%f", val) -} - -func (s *float64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *float64SliceValue) Replace(val []string) error { - out := make([]float64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *float64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func float64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []float64{}, nil - } - ss := strings.Split(val, ",") - out := make([]float64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseFloat(d, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetFloat64Slice return the []float64 value of a flag with the given name -func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { - val, err := f.getFlagType(name, "float64Slice", float64SliceConv) - if err != nil { - return []float64{}, err - } - return val.([]float64), nil -} - -// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. -// The argument p points to a []float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. -// The argument p points to a float64[] variable in which to store the value of the flag. -func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) -} - -// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { - CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - p := []float64{} - f.Float64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Float64Slice defines a []float64 flag with specified name, default value, and usage string. -// The return value is the address of a []float64 variable that stores the value of the flag. -func Float64Slice(name string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, "", value, usage) -} - -// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. -func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { - return CommandLine.Float64SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go deleted file mode 100644 index d3dd72b7..00000000 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "reflect" - "strings" -) - -// flagValueWrapper implements pflag.Value around a flag.Value. The main -// difference here is the addition of the Type method that returns a string -// name of the type. As this is generally unknown, we approximate that with -// reflection. -type flagValueWrapper struct { - inner goflag.Value - flagType string -} - -// We are just copying the boolFlag interface out of goflag as that is what -// they use to decide if a flag should get "true" when no arg is given. -type goBoolFlag interface { - goflag.Value - IsBoolFlag() bool -} - -func wrapFlagValue(v goflag.Value) Value { - // If the flag.Value happens to also be a pflag.Value, just use it directly. - if pv, ok := v.(Value); ok { - return pv - } - - pv := &flagValueWrapper{ - inner: v, - } - - t := reflect.TypeOf(v) - if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { - t = t.Elem() - } - - pv.flagType = strings.TrimSuffix(t.Name(), "Value") - return pv -} - -func (v *flagValueWrapper) String() string { - return v.inner.String() -} - -func (v *flagValueWrapper) Set(s string) error { - return v.inner.Set(s) -} - -func (v *flagValueWrapper) Type() string { - return v.flagType -} - -// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag -// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei -// with both `-v` and `--v` in flags. If the golang flag was more than a single -// character (ex: `verbose`) it will only be accessible via `--verbose` -func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: goflag.Name, - Usage: goflag.Usage, - Value: wrapFlagValue(goflag.Value), - // Looks like golang flags don't set DefValue correctly :-( - //DefValue: goflag.DefValue, - DefValue: goflag.Value.String(), - } - // Ex: if the golang flag was -v, allow both -v and --v to work - if len(flag.Name) == 1 { - flag.Shorthand = flag.Name - } - if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { - flag.NoOptDefVal = "true" - } - return flag -} - -// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet -func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { - if f.Lookup(goflag.Name) != nil { - return - } - newflag := PFlagFromGoFlag(goflag) - f.AddFlag(newflag) -} - -// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet -func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(goflag *goflag.Flag) { - f.AddGoFlag(goflag) - }) - if f.addedGoFlagSets == nil { - f.addedGoFlagSets = make([]*goflag.FlagSet, 0) - } - f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) -} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index 1474b89d..00000000 --- a/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return strconv.Itoa(int(*i)) } - -func intConv(sval string) (interface{}, error) { - return strconv.Atoi(sval) -} - -// GetInt return the int value of a flag with the given name -func (f *FlagSet) GetInt(name string) (int, error) { - val, err := f.getFlagType(name, "int", intConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go deleted file mode 100644 index f1a01d05..00000000 --- a/vendor/github.com/spf13/pflag/int16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int16 Value -type int16Value int16 - -func newInt16Value(val int16, p *int16) *int16Value { - *p = val - return (*int16Value)(p) -} - -func (i *int16Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 16) - *i = int16Value(v) - return err -} - -func (i *int16Value) Type() string { - return "int16" -} - -func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 16) - if err != nil { - return 0, err - } - return int16(v), nil -} - -// GetInt16 returns the int16 value of a flag with the given name -func (f *FlagSet) GetInt16(name string) (int16, error) { - val, err := f.getFlagType(name, "int16", int16Conv) - if err != nil { - return 0, err - } - return val.(int16), nil -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - f.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16Var defines an int16 flag with specified name, default value, and usage string. -// The argument p points to an int16 variable in which to store the value of the flag. -func Int16Var(p *int16, name string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, "", usage) -} - -// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. -func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { - CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, "", value, usage) - return p -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { - p := new(int16) - f.Int16VarP(p, name, shorthand, value, usage) - return p -} - -// Int16 defines an int16 flag with specified name, default value, and usage string. -// The return value is the address of an int16 variable that stores the value of the flag. -func Int16(name string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, "", value, usage) -} - -// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. -func Int16P(name, shorthand string, value int16, usage string) *int16 { - return CommandLine.Int16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 9b95944f..00000000 --- a/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 32) - if err != nil { - return 0, err - } - return int32(v), nil -} - -// GetInt32 return the int32 value of a flag with the given name -func (f *FlagSet) GetInt32(name string) (int32, error) { - val, err := f.getFlagType(name, "int32", int32Conv) - if err != nil { - return 0, err - } - return val.(int32), nil -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go deleted file mode 100644 index ff128ff0..00000000 --- a/vendor/github.com/spf13/pflag/int32_slice.go +++ /dev/null @@ -1,174 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int32Slice Value -type int32SliceValue struct { - value *[]int32 - changed bool -} - -func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { - isv := new(int32SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int32SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return err - } - out[i] = int32(temp64) - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int32SliceValue) Type() string { - return "int32Slice" -} - -func (s *int32SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int32SliceValue) fromString(val string) (int32, error) { - t64, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(t64), nil -} - -func (s *int32SliceValue) toString(val int32) string { - return fmt.Sprintf("%d", val) -} - -func (s *int32SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int32SliceValue) Replace(val []string) error { - out := make([]int32, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int32SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int32SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int32{}, nil - } - ss := strings.Split(val, ",") - out := make([]int32, len(ss)) - for i, d := range ss { - var err error - var temp64 int64 - temp64, err = strconv.ParseInt(d, 0, 32) - if err != nil { - return nil, err - } - out[i] = int32(temp64) - - } - return out, nil -} - -// GetInt32Slice return the []int32 value of a flag with the given name -func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { - val, err := f.getFlagType(name, "int32Slice", int32SliceConv) - if err != nil { - return []int32{}, err - } - return val.([]int32), nil -} - -// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. -// The argument p points to a []int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. -// The argument p points to a int32[] variable in which to store the value of the flag. -func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) -} - -// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { - CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - p := []int32{} - f.Int32SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int32Slice defines a []int32 flag with specified name, default value, and usage string. -// The return value is the address of a []int32 variable that stores the value of the flag. -func Int32Slice(name string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, "", value, usage) -} - -// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. -func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { - return CommandLine.Int32SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0026d781..00000000 --- a/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int64Conv(sval string) (interface{}, error) { - return strconv.ParseInt(sval, 0, 64) -} - -// GetInt64 return the int64 value of a flag with the given name -func (f *FlagSet) GetInt64(name string) (int64, error) { - val, err := f.getFlagType(name, "int64", int64Conv) - if err != nil { - return 0, err - } - return val.(int64), nil -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go deleted file mode 100644 index 25464638..00000000 --- a/vendor/github.com/spf13/pflag/int64_slice.go +++ /dev/null @@ -1,166 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- int64Slice Value -type int64SliceValue struct { - value *[]int64 - changed bool -} - -func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { - isv := new(int64SliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *int64SliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *int64SliceValue) Type() string { - return "int64Slice" -} - -func (s *int64SliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *int64SliceValue) fromString(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -func (s *int64SliceValue) toString(val int64) string { - return fmt.Sprintf("%d", val) -} - -func (s *int64SliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *int64SliceValue) Replace(val []string) error { - out := make([]int64, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *int64SliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func int64SliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int64{}, nil - } - ss := strings.Split(val, ",") - out := make([]int64, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.ParseInt(d, 0, 64) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetInt64Slice return the []int64 value of a flag with the given name -func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { - val, err := f.getFlagType(name, "int64Slice", int64SliceConv) - if err != nil { - return []int64{}, err - } - return val.([]int64), nil -} - -// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. -// The argument p points to a []int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. -// The argument p points to a int64[] variable in which to store the value of the flag. -func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) -} - -// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { - CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, "", value, usage) - return &p -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - p := []int64{} - f.Int64SliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// Int64Slice defines a []int64 flag with specified name, default value, and usage string. -// The return value is the address of a []int64 variable that stores the value of the flag. -func Int64Slice(name string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, "", value, usage) -} - -// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. -func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { - return CommandLine.Int64SliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index 4da92228..00000000 --- a/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 8) - if err != nil { - return 0, err - } - return int8(v), nil -} - -// GetInt8 return the int8 value of a flag with the given name -func (f *FlagSet) GetInt8(name string) (int8, error) { - val, err := f.getFlagType(name, "int8", int8Conv) - if err != nil { - return 0, err - } - return val.(int8), nil -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go deleted file mode 100644 index e71c39d9..00000000 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ /dev/null @@ -1,158 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- intSlice Value -type intSliceValue struct { - value *[]int - changed bool -} - -func newIntSliceValue(val []int, p *[]int) *intSliceValue { - isv := new(intSliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *intSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *intSliceValue) Type() string { - return "intSlice" -} - -func (s *intSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *intSliceValue) Append(val string) error { - i, err := strconv.Atoi(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *intSliceValue) Replace(val []string) error { - out := make([]int, len(val)) - for i, d := range val { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *intSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = strconv.Itoa(d) - } - return out -} - -func intSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int{}, nil - } - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetIntSlice return the []int value of a flag with the given name -func (f *FlagSet) GetIntSlice(name string) ([]int, error) { - val, err := f.getFlagType(name, "intSlice", intSliceConv) - if err != nil { - return []int{}, err - } - return val.([]int), nil -} - -// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. -// The argument p points to a []int variable in which to store the value of the flag. -func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSliceVar defines a int[] flag with specified name, default value, and usage string. -// The argument p points to a int[] variable in which to store the value of the flag. -func IntSliceVar(p *[]int, name string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, "", value, usage) - return &p -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func IntSlice(name string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, "", value, usage) -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func IntSliceP(name, shorthand string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index 3d414ba6..00000000 --- a/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(strings.TrimSpace(s)) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -func ipConv(sval string) (interface{}, error) { - ip := net.ParseIP(sval) - if ip != nil { - return ip, nil - } - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) -} - -// GetIP return the net.IP value of a flag with the given name -func (f *FlagSet) GetIP(name string) (net.IP, error) { - val, err := f.getFlagType(name, "ip", ipConv) - if err != nil { - return nil, err - } - return val.(net.IP), nil -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go deleted file mode 100644 index 775faae4..00000000 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ /dev/null @@ -1,186 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipSlice Value -type ipSliceValue struct { - value *[]net.IP - changed bool -} - -func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { - ipsv := new(ipSliceValue) - ipsv.value = p - *ipsv.value = val - return ipsv -} - -// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. -// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. -func (s *ipSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IP, 0, len(ipStrSlice)) - for _, ipStr := range ipStrSlice { - ip := net.ParseIP(strings.TrimSpace(ipStr)) - if ip == nil { - return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) - } - out = append(out, ip) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipSliceValue) Type() string { - return "ipSlice" -} - -// String defines a "native" format for this net.IP slice flag value. -func (s *ipSliceValue) String() string { - - ipStrSlice := make([]string, len(*s.value)) - for i, ip := range *s.value { - ipStrSlice[i] = ip.String() - } - - out, _ := writeAsCSV(ipStrSlice) - - return "[" + out + "]" -} - -func (s *ipSliceValue) fromString(val string) (net.IP, error) { - return net.ParseIP(strings.TrimSpace(val)), nil -} - -func (s *ipSliceValue) toString(val net.IP) string { - return val.String() -} - -func (s *ipSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *ipSliceValue) Replace(val []string) error { - out := make([]net.IP, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *ipSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func ipSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IP{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IP, len(ss)) - for i, sval := range ss { - ip := net.ParseIP(strings.TrimSpace(sval)) - if ip == nil { - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) - } - out[i] = ip - } - return out, nil -} - -// GetIPSlice returns the []net.IP value of a flag with the given name -func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { - val, err := f.getFlagType(name, "ipSlice", ipSliceConv) - if err != nil { - return []net.IP{}, err - } - return val.([]net.IP), nil -} - -// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of that flag. -func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPSlice(name string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, "", value, usage) -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 5bd44bd2..00000000 --- a/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,122 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strconv" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - if len(s) != 8 { - return nil - } - // net.IPMask.String() actually outputs things like ffffff00 - // so write a horrible parser for that as well :-( - m := []int{} - for i := 0; i < 4; i++ { - b := "0x" + s[2*i:2*i+2] - d, err := strconv.ParseInt(b, 0, 0) - if err != nil { - return nil - } - m = append(m, int(d)) - } - s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) - mask = net.ParseIP(s) - if mask == nil { - return nil - } - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -func parseIPv4Mask(sval string) (interface{}, error) { - mask := ParseIPv4Mask(sval) - if mask == nil { - return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) - } - return mask, nil -} - -// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name -func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { - val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) - if err != nil { - return nil, err - } - return val.(net.IPMask), nil -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go deleted file mode 100644 index e2c1b8bc..00000000 --- a/vendor/github.com/spf13/pflag/ipnet.go +++ /dev/null @@ -1,98 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// IPNet adapts net.IPNet for use as a flag. -type ipNetValue net.IPNet - -func (ipnet ipNetValue) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *ipNetValue) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = ipNetValue(*n) - return nil -} - -func (*ipNetValue) Type() string { - return "ipNet" -} - -func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { - *p = val - return (*ipNetValue)(p) -} - -func ipNetConv(sval string) (interface{}, error) { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err == nil { - return *n, nil - } - return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) -} - -// GetIPNet return the net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { - val, err := f.getFlagType(name, "ipNet", ipNetConv) - if err != nil { - return net.IPNet{}, err - } - return val.(net.IPNet), nil -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, "", value, usage) - return p -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, shorthand, value, usage) - return p -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func IPNet(name string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, "", value, usage) -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 04e0a26f..00000000 --- a/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,80 +0,0 @@ -package pflag - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return string(*s) } - -func stringConv(sval string) (interface{}, error) { - return sval, nil -} - -// GetString return the string value of a flag with the given name -func (f *FlagSet) GetString(name string) (string, error) { - val, err := f.getFlagType(name, "string", stringConv) - if err != nil { - return "", err - } - return val.(string), nil -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go deleted file mode 100644 index 4894af81..00000000 --- a/vendor/github.com/spf13/pflag/string_array.go +++ /dev/null @@ -1,129 +0,0 @@ -package pflag - -// -- stringArray Value -type stringArrayValue struct { - value *[]string - changed bool -} - -func newStringArrayValue(val []string, p *[]string) *stringArrayValue { - ssv := new(stringArrayValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func (s *stringArrayValue) Set(val string) error { - if !s.changed { - *s.value = []string{val} - s.changed = true - } else { - *s.value = append(*s.value, val) - } - return nil -} - -func (s *stringArrayValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringArrayValue) Replace(val []string) error { - out := make([]string, len(val)) - for i, d := range val { - var err error - out[i] = d - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *stringArrayValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = d - } - return out -} - -func (s *stringArrayValue) Type() string { - return "stringArray" -} - -func (s *stringArrayValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringArrayConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a array with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringArray return the []string value of a flag with the given name -func (f *FlagSet) GetStringArray(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringArray", stringArrayConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArrayVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, "", value, usage) - return &p -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma. Use a StringSlice for that. -func StringArray(name string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, "", value, usage) -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func StringArrayP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go deleted file mode 100644 index 3cb2e69d..00000000 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ /dev/null @@ -1,163 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "strings" -) - -// -- stringSlice Value -type stringSliceValue struct { - value *[]string - changed bool -} - -func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - ssv := new(stringSliceValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -func writeAsCSV(vals []string) (string, error) { - b := &bytes.Buffer{} - w := csv.NewWriter(b) - err := w.Write(vals) - if err != nil { - return "", err - } - w.Flush() - return strings.TrimSuffix(b.String(), "\n"), nil -} - -func (s *stringSliceValue) Set(val string) error { - v, err := readAsCSV(val) - if err != nil { - return err - } - if !s.changed { - *s.value = v - } else { - *s.value = append(*s.value, v...) - } - s.changed = true - return nil -} - -func (s *stringSliceValue) Type() string { - return "stringSlice" -} - -func (s *stringSliceValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func (s *stringSliceValue) Append(val string) error { - *s.value = append(*s.value, val) - return nil -} - -func (s *stringSliceValue) Replace(val []string) error { - *s.value = val - return nil -} - -func (s *stringSliceValue) GetSlice() []string { - return *s.value -} - -func stringSliceConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a slice with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringSlice return the []string value of a flag with the given name -func (f *FlagSet) GetStringSlice(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringSlice", stringSliceConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSliceVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, "", value, usage) - return &p -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. -// For example: -// --ss="v1,v2" --ss="v3" -// will result in -// []string{"v1", "v2", "v3"} -func StringSlice(name string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, "", value, usage) -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func StringSliceP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go deleted file mode 100644 index 5ceda396..00000000 --- a/vendor/github.com/spf13/pflag/string_to_int.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt Value -type stringToIntValue struct { - value *map[string]int - changed bool -} - -func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue { - ssv := new(stringToIntValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToIntValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToIntValue) Type() string { - return "stringToInt" -} - -func (s *stringToIntValue) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.Itoa(v)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToIntConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.Atoi(kv[1]) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt return the map[string]int value of a flag with the given name -func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) { - val, err := f.getFlagType(name, "stringToInt", stringToIntConv) - if err != nil { - return map[string]int{}, err - } - return val.(map[string]int), nil -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - f.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToIntVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]int variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, "", usage) -} - -// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash. -func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) { - CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage) -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, "", value, usage) - return &p -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - p := map[string]int{} - f.StringToIntVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt(name string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, "", value, usage) -} - -// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash. -func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int { - return CommandLine.StringToIntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go deleted file mode 100644 index a807a04a..00000000 --- a/vendor/github.com/spf13/pflag/string_to_int64.go +++ /dev/null @@ -1,149 +0,0 @@ -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -// -- stringToInt64 Value -type stringToInt64Value struct { - value *map[string]int64 - changed bool -} - -func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { - ssv := new(stringToInt64Value) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToInt64Value) Set(val string) error { - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return err - } - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToInt64Value) Type() string { - return "stringToInt64" -} - -func (s *stringToInt64Value) String() string { - var buf bytes.Buffer - i := 0 - for k, v := range *s.value { - if i > 0 { - buf.WriteRune(',') - } - buf.WriteString(k) - buf.WriteRune('=') - buf.WriteString(strconv.FormatInt(v, 10)) - i++ - } - return "[" + buf.String() + "]" -} - -func stringToInt64Conv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]int64{}, nil - } - ss := strings.Split(val, ",") - out := make(map[string]int64, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - var err error - out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetStringToInt64 return the map[string]int64 value of a flag with the given name -func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { - val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) - if err != nil { - return map[string]int64{}, err - } - return val.(map[string]int64), nil -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64Var defines a string flag with specified name, default value, and usage string. -// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) -} - -// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { - CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, "", value, usage) - return &p -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - p := map[string]int64{} - f.StringToInt64VarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToInt64 defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]int64 variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, "", value, usage) -} - -// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. -func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { - return CommandLine.StringToInt64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go deleted file mode 100644 index 890a01af..00000000 --- a/vendor/github.com/spf13/pflag/string_to_string.go +++ /dev/null @@ -1,160 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "fmt" - "strings" -) - -// -- stringToString Value -type stringToStringValue struct { - value *map[string]string - changed bool -} - -func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue { - ssv := new(stringToStringValue) - ssv.value = p - *ssv.value = val - return ssv -} - -// Format: a=1,b=2 -func (s *stringToStringValue) Set(val string) error { - var ss []string - n := strings.Count(val, "=") - switch n { - case 0: - return fmt.Errorf("%s must be formatted as key=value", val) - case 1: - ss = append(ss, strings.Trim(val, `"`)) - default: - r := csv.NewReader(strings.NewReader(val)) - var err error - ss, err = r.Read() - if err != nil { - return err - } - } - - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - if !s.changed { - *s.value = out - } else { - for k, v := range out { - (*s.value)[k] = v - } - } - s.changed = true - return nil -} - -func (s *stringToStringValue) Type() string { - return "stringToString" -} - -func (s *stringToStringValue) String() string { - records := make([]string, 0, len(*s.value)>>1) - for k, v := range *s.value { - records = append(records, k+"="+v) - } - - var buf bytes.Buffer - w := csv.NewWriter(&buf) - if err := w.Write(records); err != nil { - panic(err) - } - w.Flush() - return "[" + strings.TrimSpace(buf.String()) + "]" -} - -func stringToStringConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]string{}, nil - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil, err - } - out := make(map[string]string, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("%s must be formatted as key=value", pair) - } - out[kv[0]] = kv[1] - } - return out, nil -} - -// GetStringToString return the map[string]string value of a flag with the given name -func (f *FlagSet) GetStringToString(name string) (map[string]string, error) { - val, err := f.getFlagType(name, "stringToString", stringToStringConv) - if err != nil { - return map[string]string{}, err - } - return val.(map[string]string), nil -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - f.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToStringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a map[string]string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, "", usage) -} - -// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash. -func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) { - CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage) -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, "", value, usage) - return &p -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - p := map[string]string{} - f.StringToStringVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringToString defines a string flag with specified name, default value, and usage string. -// The return value is the address of a map[string]string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringToString(name string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, "", value, usage) -} - -// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash. -func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string { - return CommandLine.StringToStringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index dcbc2b75..00000000 --- a/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uintConv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 0) - if err != nil { - return 0, err - } - return uint(v), nil -} - -// GetUint return the uint value of a flag with the given name -func (f *FlagSet) GetUint(name string) (uint, error) { - val, err := f.getFlagType(name, "uint", uintConv) - if err != nil { - return 0, err - } - return val.(uint), nil -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index 7e9914ed..00000000 --- a/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 16) - if err != nil { - return 0, err - } - return uint16(v), nil -} - -// GetUint16 return the uint16 value of a flag with the given name -func (f *FlagSet) GetUint16(name string) (uint16, error) { - val, err := f.getFlagType(name, "uint16", uint16Conv) - if err != nil { - return 0, err - } - return val.(uint16), nil -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index d8024539..00000000 --- a/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint32 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} - -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 32) - if err != nil { - return 0, err - } - return uint32(v), nil -} - -// GetUint32 return the uint32 value of a flag with the given name -func (f *FlagSet) GetUint32(name string) (uint32, error) { - val, err := f.getFlagType(name, "uint32", uint32Conv) - if err != nil { - return 0, err - } - return val.(uint32), nil -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index f62240f2..00000000 --- a/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint64Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 64) - if err != nil { - return 0, err - } - return uint64(v), nil -} - -// GetUint64 return the uint64 value of a flag with the given name -func (f *FlagSet) GetUint64(name string) (uint64, error) { - val, err := f.getFlagType(name, "uint64", uint64Conv) - if err != nil { - return 0, err - } - return val.(uint64), nil -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index bb0e83c1..00000000 --- a/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 8) - if err != nil { - return 0, err - } - return uint8(v), nil -} - -// GetUint8 return the uint8 value of a flag with the given name -func (f *FlagSet) GetUint8(name string) (uint8, error) { - val, err := f.getFlagType(name, "uint8", uint8Conv) - if err != nil { - return 0, err - } - return val.(uint8), nil -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go deleted file mode 100644 index 5fa92483..00000000 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ /dev/null @@ -1,168 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- uintSlice Value -type uintSliceValue struct { - value *[]uint - changed bool -} - -func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { - uisv := new(uintSliceValue) - uisv.value = p - *uisv.value = val - return uisv -} - -func (s *uintSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return err - } - out[i] = uint(u) - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *uintSliceValue) Type() string { - return "uintSlice" -} - -func (s *uintSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func (s *uintSliceValue) fromString(val string) (uint, error) { - t, err := strconv.ParseUint(val, 10, 0) - if err != nil { - return 0, err - } - return uint(t), nil -} - -func (s *uintSliceValue) toString(val uint) string { - return fmt.Sprintf("%d", val) -} - -func (s *uintSliceValue) Append(val string) error { - i, err := s.fromString(val) - if err != nil { - return err - } - *s.value = append(*s.value, i) - return nil -} - -func (s *uintSliceValue) Replace(val []string) error { - out := make([]uint, len(val)) - for i, d := range val { - var err error - out[i], err = s.fromString(d) - if err != nil { - return err - } - } - *s.value = out - return nil -} - -func (s *uintSliceValue) GetSlice() []string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = s.toString(d) - } - return out -} - -func uintSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []uint{}, nil - } - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return nil, err - } - out[i] = uint(u) - } - return out, nil -} - -// GetUintSlice returns the []uint value of a flag with the given name. -func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { - val, err := f.getFlagType(name, "uintSlice", uintSliceConv) - if err != nil { - return []uint{}, err - } - return val.([]uint), nil -} - -// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. -// The argument p points to a []uint variable in which to store the value of the flag. -func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. -// The argument p points to a uint[] variable in which to store the value of the flag. -func UintSliceVar(p *[]uint, name string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, "", value, usage) - return &p -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func UintSlice(name string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, "", value, usage) -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig deleted file mode 100644 index 63afcbcd..00000000 --- a/vendor/github.com/spf13/viper/.editorconfig +++ /dev/null @@ -1,15 +0,0 @@ -root = true - -[*] -charset = utf-8 -end_of_line = lf -indent_size = 4 -indent_style = space -insert_final_newline = true -trim_trailing_whitespace = true - -[*.go] -indent_style = tab - -[{Makefile, *.mk}] -indent_style = tab diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore deleted file mode 100644 index 89625083..00000000 --- a/vendor/github.com/spf13/viper/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -/.idea/ -/bin/ -/build/ -/var/ -/vendor/ diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml deleted file mode 100644 index 4f970acb..00000000 --- a/vendor/github.com/spf13/viper/.golangci.yml +++ /dev/null @@ -1,93 +0,0 @@ -run: - timeout: 5m - -linters-settings: - gci: - local-prefixes: github.com/spf13/viper - golint: - min-confidence: 0 - goimports: - local-prefixes: github.com/spf13/viper - -linters: - disable-all: true - enable: - - bodyclose - - deadcode - - dogsled - - dupl - - durationcheck - - exhaustive - - exportloopref - - gci - - goconst - - gofmt - - gofumpt - - goimports - - gomoddirectives - - goprintffuncname - - govet - - importas - - ineffassign - - makezero - - misspell - - nakedret - - nilerr - - noctx - - nolintlint - - prealloc - - predeclared - - revive - - rowserrcheck - - sqlclosecheck - - staticcheck - - structcheck - - stylecheck - - tparallel - - typecheck - - unconvert - - unparam - - unused - - varcheck - - wastedassign - - whitespace - - # fixme - # - cyclop - # - errcheck - # - errorlint - # - exhaustivestruct - # - forbidigo - # - forcetypeassert - # - gochecknoglobals - # - gochecknoinits - # - gocognit - # - gocritic - # - gocyclo - # - godot - # - gosec - # - gosimple - # - ifshort - # - lll - # - nlreturn - # - paralleltest - # - scopelint - # - thelper - # - wrapcheck - - # unused - # - depguard - # - goheader - # - gomodguard - - # don't enable: - # - asciicheck - # - funlen - # - godox - # - goerr113 - # - gomnd - # - interfacer - # - maligned - # - nestif - # - testpackage - # - wsl diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE deleted file mode 100644 index 4527efb9..00000000 --- a/vendor/github.com/spf13/viper/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile deleted file mode 100644 index b0f9acf2..00000000 --- a/vendor/github.com/spf13/viper/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html - -OS = $(shell uname | tr A-Z a-z) -export PATH := $(abspath bin/):${PATH} - -# Build variables -BUILD_DIR ?= build -export CGO_ENABLED ?= 0 -export GOOS = $(shell go env GOOS) -ifeq (${VERBOSE}, 1) -ifeq ($(filter -v,${GOARGS}),) - GOARGS += -v -endif -TEST_FORMAT = short-verbose -endif - -# Dependency versions -GOTESTSUM_VERSION = 1.6.4 -GOLANGCI_VERSION = 1.40.1 - -# Add the ability to override some variables -# Use with care --include override.mk - -.PHONY: clear -clear: ## Clear the working area and the project - rm -rf bin/ - -.PHONY: check -check: test lint ## Run tests and linters - -bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION} - @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum -bin/gotestsum-${GOTESTSUM_VERSION}: - @mkdir -p bin - curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION} - -TEST_PKGS ?= ./... -.PHONY: test -test: TEST_FORMAT ?= short -test: SHELL = /bin/bash -test: export CGO_ENABLED=1 -test: bin/gotestsum ## Run tests - @mkdir -p ${BUILD_DIR} - bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...) - -bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} - @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint -bin/golangci-lint-${GOLANGCI_VERSION}: - @mkdir -p bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} - @mv bin/golangci-lint "$@" - -.PHONY: lint -lint: bin/golangci-lint ## Run linter - bin/golangci-lint run - -.PHONY: fix -fix: bin/golangci-lint ## Fix lint violations - bin/golangci-lint run --fix - -# Add custom targets here --include custom.mk - -.PHONY: list -list: ## List all make targets - @${MAKE} -pRrn : -f $(MAKEFILE_LIST) 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | sort - -.PHONY: help -.DEFAULT_GOAL := help -help: - @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -# Variable outputting/exporting rules -var-%: ; @echo $($*) -varexport-%: ; @echo $*=$($*) diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md deleted file mode 100644 index 9712e705..00000000 --- a/vendor/github.com/spf13/viper/README.md +++ /dev/null @@ -1,874 +0,0 @@ -> ## Viper v2 feedback -> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9 -> -> **Thank you!** - -![Viper](.github/logo.png?raw=true) - -[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) -[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) - -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) -[![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.14-61CFDD.svg?style=flat-square) -[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) - -**Go configuration with fangs!** - -Many Go projects are built using Viper including: - -* [Hugo](http://gohugo.io) -* [EMC RexRay](http://rexray.readthedocs.org/en/stable/) -* [Imgur’s Incus](https://github.com/Imgur/incus) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) -* [Docker Notary](https://github.com/docker/Notary) -* [BloomApi](https://www.bloomapi.com/) -* [doctl](https://github.com/digitalocean/doctl) -* [Clairctl](https://github.com/jgsqware/clairctl) -* [Mercure](https://mercure.rocks) - - -## Install - -```shell -go get github.com/spf13/viper -``` - -**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. - - -## What is Viper? - -Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed -to work within an application, and can handle all types of configuration needs -and formats. It supports: - -* setting defaults -* reading from JSON, TOML, YAML, HCL, envfile and Java properties config files -* live watching and re-reading of config files (optional) -* reading from environment variables -* reading from remote config systems (etcd or Consul), and watching changes -* reading from command line flags -* reading from buffer -* setting explicit values - -Viper can be thought of as a registry for all of your applications configuration needs. - - -## Why Viper? - -When building a modern application, you don’t want to worry about -configuration file formats; you want to focus on building awesome software. -Viper is here to help with that. - -Viper does the following for you: - -1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, INI, envfile or Java properties formats. -2. Provide a mechanism to set default values for your different configuration options. -3. Provide a mechanism to set override values for options specified through command line flags. -4. Provide an alias system to easily rename parameters without breaking existing code. -5. Make it easy to tell the difference between when a user has provided a command line or config file which is the same as the default. - -Viper uses the following precedence order. Each item takes precedence over the item below it: - - * explicit call to `Set` - * flag - * env - * config - * key/value store - * default - -**Important:** Viper configuration keys are case insensitive. -There are ongoing discussions about making that optional. - - -## Putting Values into Viper - -### Establishing Defaults - -A good configuration system will support default values. A default value is not -required for a key, but it’s useful in the event that a key hasn't been set via -config file, environment variable, remote configuration or flag. - -Examples: - -```go -viper.SetDefault("ContentDir", "content") -viper.SetDefault("LayoutDir", "layouts") -viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"}) -``` - -### Reading Config Files - -Viper requires minimal configuration so it knows where to look for config files. -Viper supports JSON, TOML, YAML, HCL, INI, envfile and Java Properties files. Viper can search multiple paths, but -currently a single Viper instance only supports a single configuration file. -Viper does not default to any configuration search paths leaving defaults decision -to an application. - -Here is an example of how to use Viper to search for and read a configuration file. -None of the specific paths are required, but at least one path should be provided -where a configuration file is expected. - -```go -viper.SetConfigName("config") // name of config file (without extension) -viper.SetConfigType("yaml") // REQUIRED if the config file does not have the extension in the name -viper.AddConfigPath("/etc/appname/") // path to look for the config file in -viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search paths -viper.AddConfigPath(".") // optionally look for config in the working directory -err := viper.ReadInConfig() // Find and read the config file -if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %w \n", err)) -} -``` - -You can handle the specific case where no config file is found like this: - -```go -if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } -} - -// Config file found and successfully parsed -``` - -*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc` - -### Writing Config Files - -Reading from config files is useful, but at times you want to store all modifications made at run time. -For that, a bunch of commands are available, each with its own purpose: - -* WriteConfig - writes the current viper configuration to the predefined path, if exists. Errors if no predefined path. Will overwrite the current config file, if it exists. -* SafeWriteConfig - writes the current viper configuration to the predefined path. Errors if no predefined path. Will not overwrite the current config file, if it exists. -* WriteConfigAs - writes the current viper configuration to the given filepath. Will overwrite the given file, if it exists. -* SafeWriteConfigAs - writes the current viper configuration to the given filepath. Will not overwrite the given file, if it exists. - -As a rule of the thumb, everything marked with safe won't overwrite any file, but just create if not existent, whilst the default behavior is to create or truncate. - -A small examples section: - -```go -viper.WriteConfig() // writes current config to predefined path set by 'viper.AddConfigPath()' and 'viper.SetConfigName' -viper.SafeWriteConfig() -viper.WriteConfigAs("/path/to/my/.config") -viper.SafeWriteConfigAs("/path/to/my/.config") // will error since it has already been written -viper.SafeWriteConfigAs("/path/to/my/.other_config") -``` - -### Watching and re-reading config files - -Viper supports the ability to have your application live read a config file while running. - -Gone are the days of needing to restart a server to have a config take effect, -viper powered applications can read an update to a config file while running and -not miss a beat. - -Simply tell the viper instance to watchConfig. -Optionally you can provide a function for Viper to run each time a change occurs. - -**Make sure you add all of the configPaths prior to calling `WatchConfig()`** - -```go -viper.OnConfigChange(func(e fsnotify.Event) { - fmt.Println("Config file changed:", e.Name) -}) -viper.WatchConfig() -``` - -### Reading Config from io.Reader - -Viper predefines many configuration sources such as files, environment -variables, flags, and remote K/V store, but you are not bound to them. You can -also implement your own required configuration source and feed it to viper. - -```go -viper.SetConfigType("yaml") // or viper.SetConfigType("YAML") - -// any approach to require this configuration into your program. -var yamlExample = []byte(` -Hacker: true -name: steve -hobbies: -- skateboarding -- snowboarding -- go -clothing: - jacket: leather - trousers: denim -age: 35 -eyes : brown -beard: true -`) - -viper.ReadConfig(bytes.NewBuffer(yamlExample)) - -viper.Get("name") // this would be "steve" -``` - -### Setting Overrides - -These could be from a command line flag, or from your own application logic. - -```go -viper.Set("Verbose", true) -viper.Set("LogFile", LogFile) -``` - -### Registering and Using Aliases - -Aliases permit a single value to be referenced by multiple keys - -```go -viper.RegisterAlias("loud", "Verbose") - -viper.Set("verbose", true) // same result as next line -viper.Set("loud", true) // same result as prior line - -viper.GetBool("loud") // true -viper.GetBool("verbose") // true -``` - -### Working with Environment Variables - -Viper has full support for environment variables. This enables 12 factor -applications out of the box. There are five methods that exist to aid working -with ENV: - - * `AutomaticEnv()` - * `BindEnv(string...) : error` - * `SetEnvPrefix(string)` - * `SetEnvKeyReplacer(string...) *strings.Replacer` - * `AllowEmptyEnv(bool)` - -_When working with ENV variables, it’s important to recognize that Viper -treats ENV variables as case sensitive._ - -Viper provides a mechanism to try to ensure that ENV variables are unique. By -using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from -the environment variables. Both `BindEnv` and `AutomaticEnv` will use this -prefix. - -`BindEnv` takes one or more parameters. The first parameter is the key name, the -rest are the name of the environment variables to bind to this key. If more than -one are provided, they will take precedence in the specified order. The name of -the environment variable is case sensitive. If the ENV variable name is not provided, then -Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter), -it **does not** automatically add the prefix. For example if the second parameter is "id", -Viper will look for the ENV variable "ID". - -One important thing to recognize when working with ENV variables is that the -value will be read each time it is accessed. Viper does not fix the value when -the `BindEnv` is called. - -`AutomaticEnv` is a powerful helper especially when combined with -`SetEnvPrefix`. When called, Viper will check for an environment variable any -time a `viper.Get` request is made. It will apply the following rules. It will -check for an environment variable with a name matching the key uppercased and -prefixed with the `EnvPrefix` if set. - -`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env -keys to an extent. This is useful if you want to use `-` or something in your -`Get()` calls, but want your environmental variables to use `_` delimiters. An -example of using it can be found in `viper_test.go`. - -Alternatively, you can use `EnvKeyReplacer` with `NewWithOptions` factory function. -Unlike `SetEnvKeyReplacer`, it accepts a `StringReplacer` interface allowing you to write custom string replacing logic. - -By default empty environment variables are considered unset and will fall back to -the next configuration source. To treat empty environment variables as set, use -the `AllowEmptyEnv` method. - -#### Env example - -```go -SetEnvPrefix("spf") // will be uppercased automatically -BindEnv("id") - -os.Setenv("SPF_ID", "13") // typically done outside of the app - -id := Get("id") // 13 -``` - -### Working with Flags - -Viper has the ability to bind to flags. Specifically, Viper supports `Pflags` -as used in the [Cobra](https://github.com/spf13/cobra) library. - -Like `BindEnv`, the value is not set when the binding method is called, but when -it is accessed. This means you can bind as early as you want, even in an -`init()` function. - -For individual flags, the `BindPFlag()` method provides this functionality. - -Example: - -```go -serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -``` - -You can also bind an existing set of pflags (pflag.FlagSet): - -Example: - -```go -pflag.Int("flagname", 1234, "help message for flagname") - -pflag.Parse() -viper.BindPFlags(pflag.CommandLine) - -i := viper.GetInt("flagname") // retrieve values from viper instead of pflag -``` - -The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude -the use of other packages that use the [flag](https://golang.org/pkg/flag/) -package from the standard library. The pflag package can handle the flags -defined for the flag package by importing these flags. This is accomplished -by a calling a convenience function provided by the pflag package called -AddGoFlagSet(). - -Example: - -```go -package main - -import ( - "flag" - "github.com/spf13/pflag" -) - -func main() { - - // using standard library "flag" package - flag.Int("flagname", 1234, "help message for flagname") - - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - pflag.Parse() - viper.BindPFlags(pflag.CommandLine) - - i := viper.GetInt("flagname") // retrieve value from viper - - // ... -} -``` - -#### Flag interfaces - -Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`. - -`FlagValue` represents a single flag. This is a very simple example on how to implement this interface: - -```go -type myFlag struct {} -func (f myFlag) HasChanged() bool { return false } -func (f myFlag) Name() string { return "my-flag-name" } -func (f myFlag) ValueString() string { return "my-flag-value" } -func (f myFlag) ValueType() string { return "string" } -``` - -Once your flag implements this interface, you can simply tell Viper to bind it: - -```go -viper.BindFlagValue("my-flag-name", myFlag{}) -``` - -`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface: - -```go -type myFlagSet struct { - flags []myFlag -} - -func (f myFlagSet) VisitAll(fn func(FlagValue)) { - for _, flag := range flags { - fn(flag) - } -} -``` - -Once your flag set implements this interface, you can simply tell Viper to bind it: - -```go -fSet := myFlagSet{ - flags: []myFlag{myFlag{}, myFlag{}}, -} -viper.BindFlagValues("my-flags", fSet) -``` - -### Remote Key/Value Store Support - -To enable remote support in Viper, do a blank import of the `viper/remote` -package: - -`import _ "github.com/spf13/viper/remote"` - -Viper will read a config string (as JSON, TOML, YAML, HCL or envfile) retrieved from a path -in a Key/Value store such as etcd or Consul. These values take precedence over -default values, but are overridden by configuration values retrieved from disk, -flags, or environment variables. - -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve -configuration from the K/V store, which means that you can store your -configuration values encrypted and have them automatically decrypted if you have -the correct gpg keyring. Encryption is optional. - -You can use remote configuration in conjunction with local configuration, or -independently of it. - -`crypt` has a command-line helper that you can use to put configurations in your -K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. - -```bash -$ go get github.com/bketelsen/crypt/bin/crypt -$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json -``` - -Confirm that your value was set: - -```bash -$ crypt get -plaintext /config/hugo.json -``` - -See the `crypt` documentation for examples of how to set encrypted values, or -how to use Consul. - -### Remote Key/Value Store Example - Unencrypted - -#### etcd -```go -viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -#### Consul -You need to set a key to Consul key/value storage with JSON value containing your desired config. -For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: - -```json -{ - "port": 8080, - "hostname": "myhostname.com" -} -``` - -```go -viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY") -viper.SetConfigType("json") // Need to explicitly set this to json -err := viper.ReadRemoteConfig() - -fmt.Println(viper.Get("port")) // 8080 -fmt.Println(viper.Get("hostname")) // myhostname.com -``` - -#### Firestore - -```go -viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document") -viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml" -err := viper.ReadRemoteConfig() -``` - -Of course, you're allowed to use `SecureRemoteProvider` also - -### Remote Key/Value Store Example - Encrypted - -```go -viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg") -viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" -err := viper.ReadRemoteConfig() -``` - -### Watching Changes in etcd - Unencrypted - -```go -// alternatively, you can create a new viper instance. -var runtime_viper = viper.New() - -runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml") -runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" - -// read from remote config the first time. -err := runtime_viper.ReadRemoteConfig() - -// unmarshal config -runtime_viper.Unmarshal(&runtime_conf) - -// open a goroutine to watch remote changes forever -go func(){ - for { - time.Sleep(time.Second * 5) // delay after each request - - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } - - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) - } -}() -``` - -## Getting Values From Viper - -In Viper, there are a few ways to get a value depending on the value’s type. -The following functions and methods exist: - - * `Get(key string) : interface{}` - * `GetBool(key string) : bool` - * `GetFloat64(key string) : float64` - * `GetInt(key string) : int` - * `GetIntSlice(key string) : []int` - * `GetString(key string) : string` - * `GetStringMap(key string) : map[string]interface{}` - * `GetStringMapString(key string) : map[string]string` - * `GetStringSlice(key string) : []string` - * `GetTime(key string) : time.Time` - * `GetDuration(key string) : time.Duration` - * `IsSet(key string) : bool` - * `AllSettings() : map[string]interface{}` - -One important thing to recognize is that each Get function will return a zero -value if it’s not found. To check if a given key exists, the `IsSet()` method -has been provided. - -Example: -```go -viper.GetString("logfile") // case-insensitive Setting & Getting -if viper.GetBool("verbose") { - fmt.Println("verbose enabled") -} -``` -### Accessing nested keys - -The accessor methods also accept formatted paths to deeply nested keys. For -example, if the following JSON file is loaded: - -```json -{ - "host": { - "address": "localhost", - "port": 5799 - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -``` - -Viper can access a nested field by passing a `.` delimited path of keys: - -```go -GetString("datastore.metric.host") // (returns "127.0.0.1") -``` - -This obeys the precedence rules established above; the search for the path -will cascade through the remaining configuration registries until found. - -For example, given this configuration file, both `datastore.metric.host` and -`datastore.metric.port` are already defined (and may be overridden). If in addition -`datastore.metric.protocol` was defined in the defaults, Viper would also find it. - -However, if `datastore.metric` was overridden (by a flag, an environment variable, -the `Set()` method, …) with an immediate value, then all sub-keys of -`datastore.metric` become undefined, they are “shadowed†by the higher-priority -configuration level. - -Viper can access array indices by using numbers in the path. For example: - -```json -{ - "host": { - "address": "localhost", - "ports": [ - 5799, - 6029 - ] - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -GetInt("host.ports.1") // returns 6029 - -``` - -Lastly, if there exists a key that matches the delimited key path, its value -will be returned instead. E.g. - -```json -{ - "datastore.metric.host": "0.0.0.0", - "host": { - "address": "localhost", - "port": 5799 - }, - "datastore": { - "metric": { - "host": "127.0.0.1", - "port": 3099 - }, - "warehouse": { - "host": "198.0.0.1", - "port": 2112 - } - } -} - -GetString("datastore.metric.host") // returns "0.0.0.0" -``` - -### Extracting a sub-tree - -When developing reusable modules, it's often useful to extract a subset of the configuration -and pass it to a module. This way the module can be instantiated more than once, with different configurations. - -For example, an application might use multiple different cache stores for different purposes: - -```yaml -cache: - cache1: - max-items: 100 - item-size: 64 - cache2: - max-items: 200 - item-size: 80 -``` - -We could pass the cache name to a module (eg. `NewCache("cache1")`), -but it would require weird concatenation for accessing config keys and would be less separated from the global config. - -So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration: - -```go -cache1Config := viper.Sub("cache.cache1") -if cache1Config == nil { // Sub returns nil if the key cannot be found - panic("cache configuration not found") -} - -cache1 := NewCache(cache1Config) -``` - -**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found. - -Internally, the `NewCache` function can address `max-items` and `item-size` keys directly: - -```go -func NewCache(v *Viper) *Cache { - return &Cache{ - MaxItems: v.GetInt("max-items"), - ItemSize: v.GetInt("item-size"), - } -} -``` - -The resulting code is easy to test, since it's decoupled from the main config structure, -and easier to reuse (for the same reason). - - -### Unmarshaling - -You also have the option of Unmarshaling all or a specific value to a struct, map, -etc. - -There are two methods to do this: - - * `Unmarshal(rawVal interface{}) : error` - * `UnmarshalKey(key string, rawVal interface{}) : error` - -Example: - -```go -type config struct { - Port int - Name string - PathMap string `mapstructure:"path_map"` -} - -var C config - -err := viper.Unmarshal(&C) -if err != nil { - t.Fatalf("unable to decode into struct, %v", err) -} -``` - -If you want to unmarshal configuration where the keys themselves contain dot (the default key delimiter), -you have to change the delimiter: - -```go -v := viper.NewWithOptions(viper.KeyDelimiter("::")) - -v.SetDefault("chart::values", map[string]interface{}{ - "ingress": map[string]interface{}{ - "annotations": map[string]interface{}{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, -}) - -type config struct { - Chart struct{ - Values map[string]interface{} - } -} - -var C config - -v.Unmarshal(&C) -``` - -Viper also supports unmarshaling into embedded structs: - -```go -/* -Example config: - -module: - enabled: true - token: 89h3f98hbwf987h3f98wenf89ehf -*/ -type config struct { - Module struct { - Enabled bool - - moduleConfig `mapstructure:",squash"` - } -} - -// moduleConfig could be in a module specific package -type moduleConfig struct { - Token string -} - -var C config - -err := viper.Unmarshal(&C) -if err != nil { - t.Fatalf("unable to decode into struct, %v", err) -} -``` - -Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. - -### Decoding custom formats - -A frequently requested feature for Viper is adding more value formats and decoders. -For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. - -This is already available in Viper using mapstructure decode hooks. - -Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). - -### Marshalling to string - -You may need to marshal all the settings held in viper into a string rather than write them to a file. -You can use your favorite format's marshaller with the config returned by `AllSettings()`. - -```go -import ( - yaml "gopkg.in/yaml.v2" - // ... -) - -func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) -} -``` - -## Viper or Vipers? - -Viper comes ready to use out of the box. There is no configuration or -initialization needed to begin using Viper. Since most applications will want -to use a single central repository for their configuration, the viper package -provides this. It is similar to a singleton. - -In all of the examples above, they demonstrate using viper in its singleton -style approach. - -### Working with multiple vipers - -You can also create many different vipers for use in your application. Each will -have its own unique set of configurations and values. Each can read from a -different config file, key value store, etc. All of the functions that viper -package supports are mirrored as methods on a viper. - -Example: - -```go -x := viper.New() -y := viper.New() - -x.SetDefault("ContentDir", "content") -y.SetDefault("ContentDir", "foobar") - -//... -``` - -When working with multiple vipers, it is up to the user to keep track of the -different vipers. - - -## Q & A - -### Why is it called “Viperâ€? - -A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) -to [Cobra](https://github.com/spf13/cobra). While both can operate completely -independently, together they make a powerful pair to handle much of your -application foundation needs. - -### Why is it called “Cobraâ€? - -Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? - -### Does Viper support case sensitive keys? - -**tl;dr:** No. - -Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars). -In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive. - -There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much. - -You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9 - -### Is it safe to concurrently read and write to a viper? - -No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic. - -## Troubleshooting - -See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md deleted file mode 100644 index 096277af..00000000 --- a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Troubleshooting - -## Unmarshaling doesn't work - -The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags. - -## Cannot find package - -Viper installation seems to fail a lot lately with the following (or a similar) error: - -``` -cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: -/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT) -/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH) -``` - -As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). - -The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. - -**tl;dr* `export GO111MODULE=on` diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go deleted file mode 100644 index b5ddbf5d..00000000 --- a/vendor/github.com/spf13/viper/flags.go +++ /dev/null @@ -1,57 +0,0 @@ -package viper - -import "github.com/spf13/pflag" - -// FlagValueSet is an interface that users can implement -// to bind a set of flags to viper. -type FlagValueSet interface { - VisitAll(fn func(FlagValue)) -} - -// FlagValue is an interface that users can implement -// to bind different flags to viper. -type FlagValue interface { - HasChanged() bool - Name() string - ValueString() string - ValueType() string -} - -// pflagValueSet is a wrapper around *pflag.ValueSet -// that implements FlagValueSet. -type pflagValueSet struct { - flags *pflag.FlagSet -} - -// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet. -func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { - p.flags.VisitAll(func(flag *pflag.Flag) { - fn(pflagValue{flag}) - }) -} - -// pflagValue is a wrapper aroung *pflag.flag -// that implements FlagValue -type pflagValue struct { - flag *pflag.Flag -} - -// HasChanged returns whether the flag has changes or not. -func (p pflagValue) HasChanged() bool { - return p.flag.Changed -} - -// Name returns the name of the flag. -func (p pflagValue) Name() string { - return p.flag.Name -} - -// ValueString returns the value of the flag as a string. -func (p pflagValue) ValueString() string { - return p.flag.Value.String() -} - -// ValueType returns the type of the flag as a string. -func (p pflagValue) ValueType() string { - return p.flag.Value.Type() -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go deleted file mode 100644 index 08b1bb66..00000000 --- a/vendor/github.com/spf13/viper/internal/encoding/decoder.go +++ /dev/null @@ -1,61 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Decoder decodes the contents of b into a v representation. -// It's primarily used for decoding contents of a file into a map[string]interface{}. -type Decoder interface { - Decode(b []byte, v interface{}) error -} - -const ( - // ErrDecoderNotFound is returned when there is no decoder registered for a format. - ErrDecoderNotFound = encodingError("decoder not found for this format") - - // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. - ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") -) - -// DecoderRegistry can choose an appropriate Decoder based on the provided format. -type DecoderRegistry struct { - decoders map[string]Decoder - - mu sync.RWMutex -} - -// NewDecoderRegistry returns a new, initialized DecoderRegistry. -func NewDecoderRegistry() *DecoderRegistry { - return &DecoderRegistry{ - decoders: make(map[string]Decoder), - } -} - -// RegisterDecoder registers a Decoder for a format. -// Registering a Decoder for an already existing format is not supported. -func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.decoders[format]; ok { - return ErrDecoderFormatAlreadyRegistered - } - - e.decoders[format] = enc - - return nil -} - -// Decode calls the underlying Decoder based on the format. -func (e *DecoderRegistry) Decode(format string, b []byte, v interface{}) error { - e.mu.RLock() - decoder, ok := e.decoders[format] - e.mu.RUnlock() - - if !ok { - return ErrDecoderNotFound - } - - return decoder.Decode(b, v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go deleted file mode 100644 index 82c7996c..00000000 --- a/vendor/github.com/spf13/viper/internal/encoding/encoder.go +++ /dev/null @@ -1,60 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Encoder encodes the contents of v into a byte representation. -// It's primarily used for encoding a map[string]interface{} into a file format. -type Encoder interface { - Encode(v interface{}) ([]byte, error) -} - -const ( - // ErrEncoderNotFound is returned when there is no encoder registered for a format. - ErrEncoderNotFound = encodingError("encoder not found for this format") - - // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. - ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") -) - -// EncoderRegistry can choose an appropriate Encoder based on the provided format. -type EncoderRegistry struct { - encoders map[string]Encoder - - mu sync.RWMutex -} - -// NewEncoderRegistry returns a new, initialized EncoderRegistry. -func NewEncoderRegistry() *EncoderRegistry { - return &EncoderRegistry{ - encoders: make(map[string]Encoder), - } -} - -// RegisterEncoder registers an Encoder for a format. -// Registering a Encoder for an already existing format is not supported. -func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.encoders[format]; ok { - return ErrEncoderFormatAlreadyRegistered - } - - e.encoders[format] = enc - - return nil -} - -func (e *EncoderRegistry) Encode(format string, v interface{}) ([]byte, error) { - e.mu.RLock() - encoder, ok := e.encoders[format] - e.mu.RUnlock() - - if !ok { - return nil, ErrEncoderNotFound - } - - return encoder.Encode(v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go deleted file mode 100644 index e4cde02d..00000000 --- a/vendor/github.com/spf13/viper/internal/encoding/error.go +++ /dev/null @@ -1,7 +0,0 @@ -package encoding - -type encodingError string - -func (e encodingError) Error() string { - return string(e) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go deleted file mode 100644 index f3e4ab12..00000000 --- a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go +++ /dev/null @@ -1,40 +0,0 @@ -package hcl - -import ( - "bytes" - "encoding/json" - - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. -// TODO: add printer config to the codec? -type Codec struct{} - -func (Codec) Encode(v interface{}) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - - // TODO: use printer.Format? Is the trailing newline an issue? - - ast, err := hcl.Parse(string(b)) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - - err = printer.Fprint(&buf, ast.Node) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (Codec) Decode(b []byte, v interface{}) error { - return hcl.Unmarshal(b, v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go deleted file mode 100644 index dff9ec98..00000000 --- a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go +++ /dev/null @@ -1,17 +0,0 @@ -package json - -import ( - "encoding/json" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. -type Codec struct{} - -func (Codec) Encode(v interface{}) ([]byte, error) { - // TODO: expose prefix and indent in the Codec as setting? - return json.MarshalIndent(v, "", " ") -} - -func (Codec) Decode(b []byte, v interface{}) error { - return json.Unmarshal(b, v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go deleted file mode 100644 index c043802b..00000000 --- a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go +++ /dev/null @@ -1,45 +0,0 @@ -package toml - -import ( - "github.com/pelletier/go-toml" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. -type Codec struct{} - -func (Codec) Encode(v interface{}) ([]byte, error) { - if m, ok := v.(map[string]interface{}); ok { - t, err := toml.TreeFromMap(m) - if err != nil { - return nil, err - } - - s, err := t.ToTomlString() - if err != nil { - return nil, err - } - - return []byte(s), nil - } - - return toml.Marshal(v) -} - -func (Codec) Decode(b []byte, v interface{}) error { - tree, err := toml.LoadBytes(b) - if err != nil { - return err - } - - if m, ok := v.(*map[string]interface{}); ok { - vmap := *m - tmap := tree.ToMap() - for k, v := range tmap { - vmap[k] = v - } - - return nil - } - - return tree.Unmarshal(v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go deleted file mode 100644 index f94b2699..00000000 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ /dev/null @@ -1,14 +0,0 @@ -package yaml - -import "gopkg.in/yaml.v2" - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. -type Codec struct{} - -func (Codec) Encode(v interface{}) ([]byte, error) { - return yaml.Marshal(v) -} - -func (Codec) Decode(b []byte, v interface{}) error { - return yaml.Unmarshal(b, v) -} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go deleted file mode 100644 index 09d051a2..00000000 --- a/vendor/github.com/spf13/viper/util.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is a application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -package viper - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "unicode" - - "github.com/spf13/afero" - "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" -) - -// ConfigParseError denotes failing to parse configuration file. -type ConfigParseError struct { - err error -} - -// Error returns the formatted configuration error. -func (pe ConfigParseError) Error() string { - return fmt.Sprintf("While parsing config: %s", pe.err.Error()) -} - -// toCaseInsensitiveValue checks if the value is a map; -// if so, create a copy and lower-case the keys recursively. -func toCaseInsensitiveValue(value interface{}) interface{} { - switch v := value.(type) { - case map[interface{}]interface{}: - value = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: - value = copyAndInsensitiviseMap(v) - } - - return value -} - -// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of -// any map it makes case insensitive. -func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { - nm := make(map[string]interface{}) - - for key, val := range m { - lkey := strings.ToLower(key) - switch v := val.(type) { - case map[interface{}]interface{}: - nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: - nm[lkey] = copyAndInsensitiviseMap(v) - default: - nm[lkey] = v - } - } - - return nm -} - -func insensitiviseMap(m map[string]interface{}) { - for key, val := range m { - switch val.(type) { - case map[interface{}]interface{}: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]interface{})) - case map[string]interface{}: - // nested map: recursively insensitivise - insensitiviseMap(val.(map[string]interface{})) - } - - lower := strings.ToLower(key) - if key != lower { - // remove old key (not lower-cased) - delete(m, key) - } - // update map - m[lower] = val - } -} - -func absPathify(inPath string) string { - jww.INFO.Println("Trying to resolve absolute path to", inPath) - - if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { - inPath = userHomeDir() + inPath[5:] - } - - inPath = os.ExpandEnv(inPath) - - if filepath.IsAbs(inPath) { - return filepath.Clean(inPath) - } - - p, err := filepath.Abs(inPath) - if err == nil { - return filepath.Clean(p) - } - - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) - return "" -} - -// Check if file Exists -func exists(fs afero.Fs, path string) (bool, error) { - stat, err := fs.Stat(path) - if err == nil { - return !stat.IsDir(), nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -func userHomeDir() string { - if runtime.GOOS == "windows" { - home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - if home == "" { - home = os.Getenv("USERPROFILE") - } - return home - } - return os.Getenv("HOME") -} - -func safeMul(a, b uint) uint { - c := a * b - if a > 1 && b > 1 && c/b != a { - return 0 - } - return c -} - -// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes -func parseSizeInBytes(sizeStr string) uint { - sizeStr = strings.TrimSpace(sizeStr) - lastChar := len(sizeStr) - 1 - multiplier := uint(1) - - if lastChar > 0 { - if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { - if lastChar > 1 { - switch unicode.ToLower(rune(sizeStr[lastChar-1])) { - case 'k': - multiplier = 1 << 10 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'm': - multiplier = 1 << 20 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'g': - multiplier = 1 << 30 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - default: - multiplier = 1 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - } - } - } - } - - size := cast.ToInt(sizeStr) - if size < 0 { - size = 0 - } - - return safeMul(uint(size), multiplier) -} - -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]interface{}) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]interface{}) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]interface{}) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go deleted file mode 100644 index 9e2e3537..00000000 --- a/vendor/github.com/spf13/viper/viper.go +++ /dev/null @@ -1,2156 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is an application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -// Each item takes precedence over the item below it: - -// overrides -// flag -// env -// config -// key/value store -// default - -package viper - -import ( - "bytes" - "encoding/csv" - "errors" - "fmt" - "io" - "log" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/fsnotify/fsnotify" - "github.com/magiconair/properties" - "github.com/mitchellh/mapstructure" - "github.com/spf13/afero" - "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" - "github.com/spf13/pflag" - "github.com/subosito/gotenv" - "gopkg.in/ini.v1" - - "github.com/spf13/viper/internal/encoding" - "github.com/spf13/viper/internal/encoding/hcl" - "github.com/spf13/viper/internal/encoding/json" - "github.com/spf13/viper/internal/encoding/toml" - "github.com/spf13/viper/internal/encoding/yaml" -) - -// ConfigMarshalError happens when failing to marshal the configuration. -type ConfigMarshalError struct { - err error -} - -// Error returns the formatted configuration error. -func (e ConfigMarshalError) Error() string { - return fmt.Sprintf("While marshaling config: %s", e.err.Error()) -} - -var v *Viper - -type RemoteResponse struct { - Value []byte - Error error -} - -var ( - encoderRegistry = encoding.NewEncoderRegistry() - decoderRegistry = encoding.NewDecoderRegistry() -) - -func init() { - v = New() - - { - codec := yaml.Codec{} - - encoderRegistry.RegisterEncoder("yaml", codec) - decoderRegistry.RegisterDecoder("yaml", codec) - - encoderRegistry.RegisterEncoder("yml", codec) - decoderRegistry.RegisterDecoder("yml", codec) - } - - { - codec := json.Codec{} - - encoderRegistry.RegisterEncoder("json", codec) - decoderRegistry.RegisterDecoder("json", codec) - } - - { - codec := toml.Codec{} - - encoderRegistry.RegisterEncoder("toml", codec) - decoderRegistry.RegisterDecoder("toml", codec) - } - - { - codec := hcl.Codec{} - - encoderRegistry.RegisterEncoder("hcl", codec) - decoderRegistry.RegisterDecoder("hcl", codec) - - encoderRegistry.RegisterEncoder("tfvars", codec) - decoderRegistry.RegisterDecoder("tfvars", codec) - } -} - -type remoteConfigFactory interface { - Get(rp RemoteProvider) (io.Reader, error) - Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) -} - -// RemoteConfig is optional, see the remote package -var RemoteConfig remoteConfigFactory - -// UnsupportedConfigError denotes encountering an unsupported -// configuration filetype. -type UnsupportedConfigError string - -// Error returns the formatted configuration error. -func (str UnsupportedConfigError) Error() string { - return fmt.Sprintf("Unsupported Config Type %q", string(str)) -} - -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. -type UnsupportedRemoteProviderError string - -// Error returns the formatted remote provider error. -func (str UnsupportedRemoteProviderError) Error() string { - return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) -} - -// RemoteConfigError denotes encountering an error while trying to -// pull the configuration from the remote provider. -type RemoteConfigError string - -// Error returns the formatted remote provider error -func (rce RemoteConfigError) Error() string { - return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) -} - -// ConfigFileNotFoundError denotes failing to find configuration file. -type ConfigFileNotFoundError struct { - name, locations string -} - -// Error returns the formatted configuration error. -func (fnfe ConfigFileNotFoundError) Error() string { - return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) -} - -// ConfigFileAlreadyExistsError denotes failure to write new configuration file. -type ConfigFileAlreadyExistsError string - -// Error returns the formatted error when configuration already exists. -func (faee ConfigFileAlreadyExistsError) Error() string { - return fmt.Sprintf("Config File %q Already Exists", string(faee)) -} - -// A DecoderConfigOption can be passed to viper.Unmarshal to configure -// mapstructure.DecoderConfig options -type DecoderConfigOption func(*mapstructure.DecoderConfig) - -// DecodeHook returns a DecoderConfigOption which overrides the default -// DecoderConfig.DecodeHook value, the default is: -// -// mapstructure.ComposeDecodeHookFunc( -// mapstructure.StringToTimeDurationHookFunc(), -// mapstructure.StringToSliceHookFunc(","), -// ) -func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { - return func(c *mapstructure.DecoderConfig) { - c.DecodeHook = hook - } -} - -// Viper is a prioritized configuration registry. It -// maintains a set of configuration sources, fetches -// values to populate those, and provides them according -// to the source's priority. -// The priority of the sources is the following: -// 1. overrides -// 2. flags -// 3. env. variables -// 4. config file -// 5. key/value store -// 6. defaults -// -// For example, if values from the following sources were loaded: -// -// Defaults : { -// "secret": "", -// "user": "default", -// "endpoint": "https://localhost" -// } -// Config : { -// "user": "root" -// "secret": "defaultsecret" -// } -// Env : { -// "secret": "somesecretkey" -// } -// -// The resulting config will have the following values: -// -// { -// "secret": "somesecretkey", -// "user": "root", -// "endpoint": "https://localhost" -// } -// -// Note: Vipers are not safe for concurrent Get() and Set() operations. -type Viper struct { - // Delimiter that separates a list of keys - // used to access a nested value in one go - keyDelim string - - // A set of paths to look for the config file in - configPaths []string - - // The filesystem to read config from. - fs afero.Fs - - // A set of remote providers to search for the configuration - remoteProviders []*defaultRemoteProvider - - // Name of file to look for inside the path - configName string - configFile string - configType string - configPermissions os.FileMode - envPrefix string - - // Specific commands for ini parsing - iniLoadOptions ini.LoadOptions - - automaticEnvApplied bool - envKeyReplacer StringReplacer - allowEmptyEnv bool - - config map[string]interface{} - override map[string]interface{} - defaults map[string]interface{} - kvstore map[string]interface{} - pflags map[string]FlagValue - env map[string][]string - aliases map[string]string - typeByDefValue bool - - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - properties *properties.Properties - - onConfigChange func(fsnotify.Event) -} - -// New returns an initialized Viper instance. -func New() *Viper { - v := new(Viper) - v.keyDelim = "." - v.configName = "config" - v.configPermissions = os.FileMode(0644) - v.fs = afero.NewOsFs() - v.config = make(map[string]interface{}) - v.override = make(map[string]interface{}) - v.defaults = make(map[string]interface{}) - v.kvstore = make(map[string]interface{}) - v.pflags = make(map[string]FlagValue) - v.env = make(map[string][]string) - v.aliases = make(map[string]string) - v.typeByDefValue = false - - return v -} - -// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney. -// If you're unfamiliar with this style, -// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and -// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis. -type Option interface { - apply(v *Viper) -} - -type optionFunc func(v *Viper) - -func (fn optionFunc) apply(v *Viper) { - fn(v) -} - -// KeyDelimiter sets the delimiter used for determining key parts. -// By default it's value is ".". -func KeyDelimiter(d string) Option { - return optionFunc(func(v *Viper) { - v.keyDelim = d - }) -} - -// StringReplacer applies a set of replacements to a string. -type StringReplacer interface { - // Replace returns a copy of s with all replacements performed. - Replace(s string) string -} - -// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys. -func EnvKeyReplacer(r StringReplacer) Option { - return optionFunc(func(v *Viper) { - v.envKeyReplacer = r - }) -} - -// NewWithOptions creates a new Viper instance. -func NewWithOptions(opts ...Option) *Viper { - v := New() - - for _, opt := range opts { - opt.apply(v) - } - - return v -} - -// Reset is intended for testing, will reset all to default settings. -// In the public interface for the viper package so applications -// can use it in their testing as well. -func Reset() { - v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} -} - -type defaultRemoteProvider struct { - provider string - endpoint string - path string - secretKeyring string -} - -func (rp defaultRemoteProvider) Provider() string { - return rp.provider -} - -func (rp defaultRemoteProvider) Endpoint() string { - return rp.endpoint -} - -func (rp defaultRemoteProvider) Path() string { - return rp.path -} - -func (rp defaultRemoteProvider) SecretKeyring() string { - return rp.secretKeyring -} - -// RemoteProvider stores the configuration necessary -// to connect to a remote key/value store. -// Optional secretKeyring to unencrypt encrypted values -// can be provided. -type RemoteProvider interface { - Provider() string - Endpoint() string - Path() string - SecretKeyring() string -} - -// SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} - -func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } -func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { - v.onConfigChange = run -} - -func WatchConfig() { v.WatchConfig() } - -func (v *Viper) WatchConfig() { - initWG := sync.WaitGroup{} - initWG.Add(1) - go func() { - watcher, err := newWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way - filename, err := v.getConfigFile() - if err != nil { - log.Printf("error: %v\n", err) - initWG.Done() - return - } - - configFile := filepath.Clean(filename) - configDir, _ := filepath.Split(configFile) - realConfigFile, _ := filepath.EvalSymlinks(filename) - - eventsWG := sync.WaitGroup{} - eventsWG.Add(1) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { // 'Events' channel is closed - eventsWG.Done() - return - } - currentConfigFile, _ := filepath.EvalSymlinks(filename) - // we only care about the config file with the following cases: - // 1 - if the config file was modified or created - // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement) - const writeOrCreateMask = fsnotify.Write | fsnotify.Create - if (filepath.Clean(event.Name) == configFile && - event.Op&writeOrCreateMask != 0) || - (currentConfigFile != "" && currentConfigFile != realConfigFile) { - realConfigFile = currentConfigFile - err := v.ReadInConfig() - if err != nil { - log.Printf("error reading config file: %v\n", err) - } - if v.onConfigChange != nil { - v.onConfigChange(event) - } - } else if filepath.Clean(event.Name) == configFile && - event.Op&fsnotify.Remove&fsnotify.Remove != 0 { - eventsWG.Done() - return - } - - case err, ok := <-watcher.Errors: - if ok { // 'Errors' channel is not closed - log.Printf("watcher error: %v\n", err) - } - eventsWG.Done() - return - } - } - }() - watcher.Add(configDir) - initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on... - eventsWG.Wait() // now, wait for event loop to end in this go-routine... - }() - initWG.Wait() // make sure that the go routine above fully ended before returning -} - -// SetConfigFile explicitly defines the path, name and extension of the config file. -// Viper will use this and not check any of the config paths. -func SetConfigFile(in string) { v.SetConfigFile(in) } - -func (v *Viper) SetConfigFile(in string) { - if in != "" { - v.configFile = in - } -} - -// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. -// E.g. if your prefix is "spf", the env registry will look for env -// variables that start with "SPF_". -func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } - -func (v *Viper) SetEnvPrefix(in string) { - if in != "" { - v.envPrefix = in - } -} - -func (v *Viper) mergeWithEnvPrefix(in string) string { - if v.envPrefix != "" { - return strings.ToUpper(v.envPrefix + "_" + in) - } - - return strings.ToUpper(in) -} - -// AllowEmptyEnv tells Viper to consider set, -// but empty environment variables as valid values instead of falling back. -// For backward compatibility reasons this is false by default. -func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } - -func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { - v.allowEmptyEnv = allowEmptyEnv -} - -// TODO: should getEnv logic be moved into find(). Can generalize the use of -// rewriting keys many things, Ex: Get('someKey') -> some_key -// (camel case to snake case for JSON keys perhaps) - -// getEnv is a wrapper around os.Getenv which replaces characters in the original -// key. This allows env vars which have different keys than the config object -// keys. -func (v *Viper) getEnv(key string) (string, bool) { - if v.envKeyReplacer != nil { - key = v.envKeyReplacer.Replace(key) - } - - val, ok := os.LookupEnv(key) - - return val, ok && (v.allowEmptyEnv || val != "") -} - -// ConfigFileUsed returns the file used to populate the config registry. -func ConfigFileUsed() string { return v.ConfigFileUsed() } -func (v *Viper) ConfigFileUsed() string { return v.configFile } - -// AddConfigPath adds a path for Viper to search for the config file in. -// Can be called multiple times to define multiple search paths. -func AddConfigPath(in string) { v.AddConfigPath(in) } - -func (v *Viper) AddConfigPath(in string) { - if in != "" { - absin := absPathify(in) - jww.INFO.Println("adding", absin, "to paths to search") - if !stringInSlice(absin, v.configPaths) { - v.configPaths = append(v.configPaths, absin) - } - } -} - -// AddRemoteProvider adds a remote configuration source. -// Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp" -func AddRemoteProvider(provider, endpoint, path string) error { - return v.AddRemoteProvider(provider, endpoint, path) -} - -func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -// AddSecureRemoteProvider adds a remote configuration source. -// Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp" -// Secure Remote Providers are implemented with github.com/bketelsen/crypt -func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) -} - -func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - secretKeyring: secretkeyring, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { - for _, y := range v.remoteProviders { - if reflect.DeepEqual(y, p) { - return true - } - } - return false -} - -// searchMap recursively searches for a value for path in source map. -// Returns nil if not found. -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { - if len(path) == 0 { - return source - } - - next, ok := source[path[0]] - if ok { - // Fast path - if len(path) == 1 { - return next - } - - // Nested case - switch next.(type) { - case map[interface{}]interface{}: - return v.searchMap(cast.ToStringMap(next), path[1:]) - case map[string]interface{}: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - return v.searchMap(next.(map[string]interface{}), path[1:]) - default: - // got a value but nested key expected, return "nil" for not found - return nil - } - } - return nil -} - -// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice. -// -// While searchMap() considers each path element as a single map key or slice index, this -// function searches for, and prioritizes, merged path elements. -// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" -// is also defined, this latter value is returned for path ["foo", "bar"]. -// -// This should be useful only at config level (other maps may not contain dots -// in their keys). -// -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} { - if len(path) == 0 { - return source - } - - // search for path prefixes, starting from the longest one - for i := len(path); i > 0; i-- { - prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - - var val interface{} - switch sourceIndexable := source.(type) { - case []interface{}: - val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) - case map[string]interface{}: - val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) - } - if val != nil { - return val - } - } - - // not found - return nil -} - -// searchSliceWithPathPrefixes searches for a value for path in sourceSlice -// -// This function is part of the searchIndexableWithPathPrefixes recurring search and -// should not be called directly from functions other than searchIndexableWithPathPrefixes. -func (v *Viper) searchSliceWithPathPrefixes( - sourceSlice []interface{}, - prefixKey string, - pathIndex int, - path []string, -) interface{} { - // if the prefixKey is not a number or it is out of bounds of the slice - index, err := strconv.Atoi(prefixKey) - if err != nil || len(sourceSlice) <= index { - return nil - } - - next := sourceSlice[index] - - // Fast path - if pathIndex == len(path) { - return next - } - - switch n := next.(type) { - case map[interface{}]interface{}: - return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]interface{}, []interface{}: - return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - - // not found - return nil -} - -// searchMapWithPathPrefixes searches for a value for path in sourceMap -// -// This function is part of the searchIndexableWithPathPrefixes recurring search and -// should not be called directly from functions other than searchIndexableWithPathPrefixes. -func (v *Viper) searchMapWithPathPrefixes( - sourceMap map[string]interface{}, - prefixKey string, - pathIndex int, - path []string, -) interface{} { - next, ok := sourceMap[prefixKey] - if !ok { - return nil - } - - // Fast path - if pathIndex == len(path) { - return next - } - - // Nested case - switch n := next.(type) { - case map[interface{}]interface{}: - return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]interface{}, []interface{}: - return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - - // not found - return nil -} - -// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere -// on its path in the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows†-// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { - var parentVal interface{} - for i := 1; i < len(path); i++ { - parentVal = v.searchMap(m, path[0:i]) - if parentVal == nil { - // not found, no need to add more path elements - return "" - } - switch parentVal.(type) { - case map[interface{}]interface{}: - continue - case map[string]interface{}: - continue - default: - // parentVal is a regular value which shadows "path" - return strings.Join(path[0:i], v.keyDelim) - } - } - return "" -} - -// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere -// in a sub-path of the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows†-// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { - // unify input map - var m map[string]interface{} - switch mi.(type) { - case map[string]string, map[string]FlagValue: - m = cast.ToStringMap(mi) - default: - return "" - } - - // scan paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := m[parentKey]; ok { - return parentKey - } - } - return "" -} - -// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere -// in the environment, when automatic env is on. -// e.g., if "foo.bar" has a value in the environment, it “shadows†-// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInAutoEnv(path []string) string { - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok { - return parentKey - } - } - return "" -} - -// SetTypeByDefaultValue enables or disables the inference of a key value's -// type when the Get function is used based upon a key's default value as -// opposed to the value returned based on the normal fetch logic. -// -// For example, if a key has a default value of []string{} and the same key -// is set via an environment variable to "a b c", a call to the Get function -// would return a string slice for the key if the key's type is inferred by -// the default value and the Get function would return: -// -// []string {"a", "b", "c"} -// -// Otherwise the Get function would return: -// -// "a b c" -func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } - -func (v *Viper) SetTypeByDefaultValue(enable bool) { - v.typeByDefValue = enable -} - -// GetViper gets the global Viper instance. -func GetViper() *Viper { - return v -} - -// Get can retrieve any value given the key to use. -// Get is case-insensitive for a key. -// Get has the behavior of returning the value associated with the first -// place from where it is set. Viper will check in the following order: -// override, flag, env, config file, key/value store, default -// -// Get returns an interface. For a specific value use one of the Get____ methods. -func Get(key string) interface{} { return v.Get(key) } - -func (v *Viper) Get(key string) interface{} { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey, true) - if val == nil { - return nil - } - - if v.typeByDefValue { - // TODO(bep) this branch isn't covered by a single test. - valType := val - path := strings.Split(lcaseKey, v.keyDelim) - defVal := v.searchMap(v.defaults, path) - if defVal != nil { - valType = defVal - } - - switch valType.(type) { - case bool: - return cast.ToBool(val) - case string: - return cast.ToString(val) - case int32, int16, int8, int: - return cast.ToInt(val) - case uint: - return cast.ToUint(val) - case uint32: - return cast.ToUint32(val) - case uint64: - return cast.ToUint64(val) - case int64: - return cast.ToInt64(val) - case float64, float32: - return cast.ToFloat64(val) - case time.Time: - return cast.ToTime(val) - case time.Duration: - return cast.ToDuration(val) - case []string: - return cast.ToStringSlice(val) - case []int: - return cast.ToIntSlice(val) - } - } - - return val -} - -// Sub returns new Viper instance representing a sub tree of this instance. -// Sub is case-insensitive for a key. -func Sub(key string) *Viper { return v.Sub(key) } - -func (v *Viper) Sub(key string) *Viper { - subv := New() - data := v.Get(key) - if data == nil { - return nil - } - - if reflect.TypeOf(data).Kind() == reflect.Map { - subv.config = cast.ToStringMap(data) - return subv - } - return nil -} - -// GetString returns the value associated with the key as a string. -func GetString(key string) string { return v.GetString(key) } - -func (v *Viper) GetString(key string) string { - return cast.ToString(v.Get(key)) -} - -// GetBool returns the value associated with the key as a boolean. -func GetBool(key string) bool { return v.GetBool(key) } - -func (v *Viper) GetBool(key string) bool { - return cast.ToBool(v.Get(key)) -} - -// GetInt returns the value associated with the key as an integer. -func GetInt(key string) int { return v.GetInt(key) } - -func (v *Viper) GetInt(key string) int { - return cast.ToInt(v.Get(key)) -} - -// GetInt32 returns the value associated with the key as an integer. -func GetInt32(key string) int32 { return v.GetInt32(key) } - -func (v *Viper) GetInt32(key string) int32 { - return cast.ToInt32(v.Get(key)) -} - -// GetInt64 returns the value associated with the key as an integer. -func GetInt64(key string) int64 { return v.GetInt64(key) } - -func (v *Viper) GetInt64(key string) int64 { - return cast.ToInt64(v.Get(key)) -} - -// GetUint returns the value associated with the key as an unsigned integer. -func GetUint(key string) uint { return v.GetUint(key) } - -func (v *Viper) GetUint(key string) uint { - return cast.ToUint(v.Get(key)) -} - -// GetUint32 returns the value associated with the key as an unsigned integer. -func GetUint32(key string) uint32 { return v.GetUint32(key) } - -func (v *Viper) GetUint32(key string) uint32 { - return cast.ToUint32(v.Get(key)) -} - -// GetUint64 returns the value associated with the key as an unsigned integer. -func GetUint64(key string) uint64 { return v.GetUint64(key) } - -func (v *Viper) GetUint64(key string) uint64 { - return cast.ToUint64(v.Get(key)) -} - -// GetFloat64 returns the value associated with the key as a float64. -func GetFloat64(key string) float64 { return v.GetFloat64(key) } - -func (v *Viper) GetFloat64(key string) float64 { - return cast.ToFloat64(v.Get(key)) -} - -// GetTime returns the value associated with the key as time. -func GetTime(key string) time.Time { return v.GetTime(key) } - -func (v *Viper) GetTime(key string) time.Time { - return cast.ToTime(v.Get(key)) -} - -// GetDuration returns the value associated with the key as a duration. -func GetDuration(key string) time.Duration { return v.GetDuration(key) } - -func (v *Viper) GetDuration(key string) time.Duration { - return cast.ToDuration(v.Get(key)) -} - -// GetIntSlice returns the value associated with the key as a slice of int values. -func GetIntSlice(key string) []int { return v.GetIntSlice(key) } - -func (v *Viper) GetIntSlice(key string) []int { - return cast.ToIntSlice(v.Get(key)) -} - -// GetStringSlice returns the value associated with the key as a slice of strings. -func GetStringSlice(key string) []string { return v.GetStringSlice(key) } - -func (v *Viper) GetStringSlice(key string) []string { - return cast.ToStringSlice(v.Get(key)) -} - -// GetStringMap returns the value associated with the key as a map of interfaces. -func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } - -func (v *Viper) GetStringMap(key string) map[string]interface{} { - return cast.ToStringMap(v.Get(key)) -} - -// GetStringMapString returns the value associated with the key as a map of strings. -func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } - -func (v *Viper) GetStringMapString(key string) map[string]string { - return cast.ToStringMapString(v.Get(key)) -} - -// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. -func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } - -func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { - return cast.ToStringMapStringSlice(v.Get(key)) -} - -// GetSizeInBytes returns the size of the value associated with the given key -// in bytes. -func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } - -func (v *Viper) GetSizeInBytes(key string) uint { - sizeStr := cast.ToString(v.Get(key)) - return parseSizeInBytes(sizeStr) -} - -// UnmarshalKey takes a single key and unmarshals it into a Struct. -func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { - return v.UnmarshalKey(key, rawVal, opts...) -} - -func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { - return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) -} - -// Unmarshal unmarshals the config into a Struct. Make sure that the tags -// on the fields of the structure are properly set. -func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { - return v.Unmarshal(rawVal, opts...) -} - -func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { - return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) -} - -// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot -// of time.Duration values & string slices -func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { - c := &mapstructure.DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), - } - for _, opt := range opts { - opt(c) - } - return c -} - -// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality -func decode(input interface{}, config *mapstructure.DecoderConfig) error { - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - return decoder.Decode(input) -} - -// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent -// in the destination struct. -func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { - return v.UnmarshalExact(rawVal, opts...) -} - -func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { - config := defaultDecoderConfig(rawVal, opts...) - config.ErrorUnused = true - - return decode(v.AllSettings(), config) -} - -// BindPFlags binds a full flag set to the configuration, using each flag's long -// name as the config key. -func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } - -func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { - return v.BindFlagValues(pflagValueSet{flags}) -} - -// BindPFlag binds a specific key to a pflag (as used by cobra). -// Example (where serverCmd is a Cobra instance): -// -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -// -func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } - -func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { - if flag == nil { - return fmt.Errorf("flag for %q is nil", key) - } - return v.BindFlagValue(key, pflagValue{flag}) -} - -// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long -// name as the config key. -func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } - -func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { - flags.VisitAll(func(flag FlagValue) { - if err = v.BindFlagValue(flag.Name(), flag); err != nil { - return - } - }) - return nil -} - -// BindFlagValue binds a specific key to a FlagValue. -func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } - -func (v *Viper) BindFlagValue(key string, flag FlagValue) error { - if flag == nil { - return fmt.Errorf("flag for %q is nil", key) - } - v.pflags[strings.ToLower(key)] = flag - return nil -} - -// BindEnv binds a Viper key to a ENV variable. -// ENV variables are case sensitive. -// If only a key is provided, it will use the env key matching the key, uppercased. -// If more arguments are provided, they will represent the env variable names that -// should bind to this key and will be taken in the specified order. -// EnvPrefix will be used when set when env name is not provided. -func BindEnv(input ...string) error { return v.BindEnv(input...) } - -func (v *Viper) BindEnv(input ...string) error { - if len(input) == 0 { - return fmt.Errorf("missing key to bind to") - } - - key := strings.ToLower(input[0]) - - if len(input) == 1 { - v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key)) - } else { - v.env[key] = append(v.env[key], input[1:]...) - } - - return nil -} - -// Given a key, find the value. -// -// Viper will check to see if an alias exists first. -// Viper will then check in the following order: -// flag, env, config file, key/value store. -// Lastly, if no value was found and flagDefault is true, and if the key -// corresponds to a flag, the flag's default value is returned. -// -// Note: this assumes a lower-cased key given. -func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { - var ( - val interface{} - exists bool - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - ) - - // compute the path through the nested maps to the nested value - if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { - return nil - } - - // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - - // Set() override first - val = v.searchMap(v.override, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { - return nil - } - - // PFlag override next - flag, exists := v.pflags[lcaseKey] - if exists && flag.HasChanged() { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice", "stringArray": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - case "intSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return cast.ToIntSlice(res) - case "stringToString": - return stringToStringConv(flag.ValueString()) - default: - return flag.ValueString() - } - } - if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { - return nil - } - - // Env override next - if v.automaticEnvApplied { - // even if it hasn't been registered, if automaticEnv is used, - // check any Get request - if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok { - return val - } - if nested && v.isPathShadowedInAutoEnv(path) != "" { - return nil - } - } - envkeys, exists := v.env[lcaseKey] - if exists { - for _, envkey := range envkeys { - if val, ok := v.getEnv(envkey); ok { - return val - } - } - } - if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { - return nil - } - - // Config file next - val = v.searchIndexableWithPathPrefixes(v.config, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { - return nil - } - - // K/V store next - val = v.searchMap(v.kvstore, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { - return nil - } - - // Default next - val = v.searchMap(v.defaults, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { - return nil - } - - if flagDefault { - // last chance: if no value is found and a flag does exist for the key, - // get the flag's default value even if the flag's value has not been set. - if flag, exists := v.pflags[lcaseKey]; exists { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice", "stringArray": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - case "intSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return cast.ToIntSlice(res) - case "stringToString": - return stringToStringConv(flag.ValueString()) - default: - return flag.ValueString() - } - } - // last item, no need to check shadowing - } - - return nil -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79 -// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap -func stringToStringConv(val string) interface{} { - val = strings.Trim(val, "[]") - // An empty string would cause an empty map - if len(val) == 0 { - return map[string]interface{}{} - } - r := csv.NewReader(strings.NewReader(val)) - ss, err := r.Read() - if err != nil { - return nil - } - out := make(map[string]interface{}, len(ss)) - for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { - return nil - } - out[kv[0]] = kv[1] - } - return out -} - -// IsSet checks to see if the key has been set in any of the data locations. -// IsSet is case-insensitive for a key. -func IsSet(key string) bool { return v.IsSet(key) } - -func (v *Viper) IsSet(key string) bool { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey, false) - return val != nil -} - -// AutomaticEnv makes Viper check if environment variables match any of the existing keys -// (config, default or flags). If matching env vars are found, they are loaded into Viper. -func AutomaticEnv() { v.AutomaticEnv() } - -func (v *Viper) AutomaticEnv() { - v.automaticEnvApplied = true -} - -// SetEnvKeyReplacer sets the strings.Replacer on the viper object -// Useful for mapping an environmental variable to a key that does -// not match it. -func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } - -func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { - v.envKeyReplacer = r -} - -// RegisterAlias creates an alias that provides another accessor for the same key. -// This enables one to change a name without breaking the application. -func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } - -func (v *Viper) RegisterAlias(alias string, key string) { - v.registerAlias(alias, strings.ToLower(key)) -} - -func (v *Viper) registerAlias(alias string, key string) { - alias = strings.ToLower(alias) - if alias != key && alias != v.realKey(key) { - _, exists := v.aliases[alias] - - if !exists { - // if we alias something that exists in one of the maps to another - // name, we'll never be able to get that value using the original - // name, so move the config value to the new realkey. - if val, ok := v.config[alias]; ok { - delete(v.config, alias) - v.config[key] = val - } - if val, ok := v.kvstore[alias]; ok { - delete(v.kvstore, alias) - v.kvstore[key] = val - } - if val, ok := v.defaults[alias]; ok { - delete(v.defaults, alias) - v.defaults[key] = val - } - if val, ok := v.override[alias]; ok { - delete(v.override, alias) - v.override[key] = val - } - v.aliases[alias] = key - } - } else { - jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) - } -} - -func (v *Viper) realKey(key string) string { - newkey, exists := v.aliases[key] - if exists { - jww.DEBUG.Println("Alias", key, "to", newkey) - return v.realKey(newkey) - } - return key -} - -// InConfig checks to see if the given key (or an alias) is in the config file. -func InConfig(key string) bool { return v.InConfig(key) } - -func (v *Viper) InConfig(key string) bool { - lcaseKey := strings.ToLower(key) - - // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path := strings.Split(lcaseKey, v.keyDelim) - - return v.searchIndexableWithPathPrefixes(v.config, path) != nil -} - -// SetDefault sets the default value for this key. -// SetDefault is case-insensitive for a key. -// Default only used when no value is provided by the user via flag, config or ENV. -func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } - -func (v *Viper) SetDefault(key string, value interface{}) { - // If alias passed in, then set the proper default - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// Set sets the value for the key in the override register. -// Set is case-insensitive for a key. -// Will be used instead of values obtained via -// flags, config file, ENV, default, or key/value store. -func Set(key string, value interface{}) { v.Set(key, value) } - -func (v *Viper) Set(key string, value interface{}) { - // If alias passed in, then set the proper override - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.override, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// ReadInConfig will discover and load the configuration file from disk -// and key/value stores, searching in one of the defined paths. -func ReadInConfig() error { return v.ReadInConfig() } - -func (v *Viper) ReadInConfig() error { - jww.INFO.Println("Attempting to read in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !stringInSlice(v.getConfigType(), SupportedExts) { - return UnsupportedConfigError(v.getConfigType()) - } - - jww.DEBUG.Println("Reading file: ", filename) - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - config := make(map[string]interface{}) - - err = v.unmarshalReader(bytes.NewReader(file), config) - if err != nil { - return err - } - - v.config = config - return nil -} - -// MergeInConfig merges a new configuration with an existing config. -func MergeInConfig() error { return v.MergeInConfig() } - -func (v *Viper) MergeInConfig() error { - jww.INFO.Println("Attempting to merge in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !stringInSlice(v.getConfigType(), SupportedExts) { - return UnsupportedConfigError(v.getConfigType()) - } - - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - return v.MergeConfig(bytes.NewReader(file)) -} - -// ReadConfig will read a configuration file, setting existing keys to nil if the -// key does not exist in the file. -func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } - -func (v *Viper) ReadConfig(in io.Reader) error { - v.config = make(map[string]interface{}) - return v.unmarshalReader(in, v.config) -} - -// MergeConfig merges a new configuration with an existing config. -func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } - -func (v *Viper) MergeConfig(in io.Reader) error { - cfg := make(map[string]interface{}) - if err := v.unmarshalReader(in, cfg); err != nil { - return err - } - return v.MergeConfigMap(cfg) -} - -// MergeConfigMap merges the configuration from the map given with an existing config. -// Note that the map given may be modified. -func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) } - -func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { - if v.config == nil { - v.config = make(map[string]interface{}) - } - insensitiviseMap(cfg) - mergeMaps(cfg, v.config, nil) - return nil -} - -// WriteConfig writes the current configuration to a file. -func WriteConfig() error { return v.WriteConfig() } - -func (v *Viper) WriteConfig() error { - filename, err := v.getConfigFile() - if err != nil { - return err - } - return v.writeConfig(filename, true) -} - -// SafeWriteConfig writes current configuration to file only if the file does not exist. -func SafeWriteConfig() error { return v.SafeWriteConfig() } - -func (v *Viper) SafeWriteConfig() error { - if len(v.configPaths) < 1 { - return errors.New("missing configuration for 'configPath'") - } - return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType)) -} - -// WriteConfigAs writes current configuration to a given filename. -func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } - -func (v *Viper) WriteConfigAs(filename string) error { - return v.writeConfig(filename, true) -} - -// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. -func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } - -func (v *Viper) SafeWriteConfigAs(filename string) error { - alreadyExists, err := afero.Exists(v.fs, filename) - if alreadyExists && err == nil { - return ConfigFileAlreadyExistsError(filename) - } - return v.writeConfig(filename, false) -} - -func (v *Viper) writeConfig(filename string, force bool) error { - jww.INFO.Println("Attempting to write configuration to file.") - var configType string - - ext := filepath.Ext(filename) - if ext != "" && ext != filepath.Base(filename) { - configType = ext[1:] - } else { - configType = v.configType - } - if configType == "" { - return fmt.Errorf("config type could not be determined for %s", filename) - } - - if !stringInSlice(configType, SupportedExts) { - return UnsupportedConfigError(configType) - } - if v.config == nil { - v.config = make(map[string]interface{}) - } - flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY - if !force { - flags |= os.O_EXCL - } - f, err := v.fs.OpenFile(filename, flags, v.configPermissions) - if err != nil { - return err - } - defer f.Close() - - if err := v.marshalWriter(f, configType); err != nil { - return err - } - - return f.Sync() -} - -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]interface{}) error { - return v.unmarshalReader(in, c) -} - -func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { - buf := new(bytes.Buffer) - buf.ReadFrom(in) - - switch format := strings.ToLower(v.getConfigType()); format { - case "yaml", "yml", "json", "toml", "hcl", "tfvars": - err := decoderRegistry.Decode(format, buf.Bytes(), &c) - if err != nil { - return ConfigParseError{err} - } - - case "dotenv", "env": - env, err := gotenv.StrictParse(buf) - if err != nil { - return ConfigParseError{err} - } - for k, v := range env { - c[k] = v - } - - case "properties", "props", "prop": - v.properties = properties.NewProperties() - var err error - if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { - return ConfigParseError{err} - } - for _, key := range v.properties.Keys() { - value, _ := v.properties.Get(key) - // recursively build nested maps - path := strings.Split(key, ".") - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(c, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - - case "ini": - cfg := ini.Empty(v.iniLoadOptions) - err := cfg.Append(buf.Bytes()) - if err != nil { - return ConfigParseError{err} - } - sections := cfg.Sections() - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - c[section.Name()+"."+key.Name()] = value - } - } - } - - insensitiviseMap(c) - return nil -} - -// Marshal a map into Writer. -func (v *Viper) marshalWriter(f afero.File, configType string) error { - c := v.AllSettings() - switch configType { - case "yaml", "yml", "json", "toml", "hcl", "tfvars": - b, err := encoderRegistry.Encode(configType, c) - if err != nil { - return ConfigMarshalError{err} - } - - _, err = f.WriteString(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - - case "prop", "props", "properties": - if v.properties == nil { - v.properties = properties.NewProperties() - } - p := v.properties - for _, key := range v.AllKeys() { - _, _, err := p.Set(key, v.GetString(key)) - if err != nil { - return ConfigMarshalError{err} - } - } - _, err := p.WriteComment(f, "#", properties.UTF8) - if err != nil { - return ConfigMarshalError{err} - } - - case "dotenv", "env": - lines := []string{} - for _, key := range v.AllKeys() { - envName := strings.ToUpper(strings.Replace(key, ".", "_", -1)) - val := v.Get(key) - lines = append(lines, fmt.Sprintf("%v=%v", envName, val)) - } - s := strings.Join(lines, "\n") - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "ini": - keys := v.AllKeys() - cfg := ini.Empty() - ini.PrettyFormat = false - for i := 0; i < len(keys); i++ { - key := keys[i] - lastSep := strings.LastIndex(key, ".") - sectionName := key[:(lastSep)] - keyName := key[(lastSep + 1):] - if sectionName == "default" { - sectionName = "" - } - cfg.Section(sectionName).Key(keyName).SetValue(v.GetString(key)) - } - cfg.WriteTo(f) - } - return nil -} - -func keyExists(k string, m map[string]interface{}) string { - lk := strings.ToLower(k) - for mk := range m { - lmk := strings.ToLower(mk) - if lmk == lk { - return mk - } - } - return "" -} - -func castToMapStringInterface( - src map[interface{}]interface{}) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[fmt.Sprintf("%v", k)] = v - } - return tgt -} - -func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapStringToMapInterface(src map[string]string) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's -// insistence on parsing nested structures as `map[interface{}]interface{}` -// instead of using a `string` as the key for nest structures beyond one level -// deep. Both map types are supported as there is a go-yaml fork that uses -// `map[string]interface{}` instead. -func mergeMaps( - src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { - for sk, sv := range src { - tk := keyExists(sk, tgt) - if tk == "" { - jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - tv, ok := tgt[tk] - if !ok { - jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - svType := reflect.TypeOf(sv) - tvType := reflect.TypeOf(tv) - if tvType != nil && svType != tvType { // Allow for the target to be nil - jww.ERROR.Printf( - "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) - continue - } - - jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) - - switch ttv := tv.(type) { - case map[interface{}]interface{}: - jww.TRACE.Printf("merging maps (must convert)") - tsv := sv.(map[interface{}]interface{}) - ssv := castToMapStringInterface(tsv) - stv := castToMapStringInterface(ttv) - mergeMaps(ssv, stv, ttv) - case map[string]interface{}: - jww.TRACE.Printf("merging maps") - mergeMaps(sv.(map[string]interface{}), ttv, nil) - default: - jww.TRACE.Printf("setting value") - tgt[tk] = sv - if itgt != nil { - itgt[tk] = sv - } - } - } -} - -// ReadRemoteConfig attempts to get configuration from a remote source -// and read it in the remote configuration registry. -func ReadRemoteConfig() error { return v.ReadRemoteConfig() } - -func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() -} - -func WatchRemoteConfig() error { return v.WatchRemoteConfig() } -func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() -} - -// Retrieve the first found remote configuration. -func (v *Viper) getKeyValueConfig() error { - if RemoteConfig == nil { - return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") - } - - for _, rp := range v.remoteProviders { - val, err := v.getRemoteConfig(rp) - if err != nil { - jww.ERROR.Printf("get remote config: %s", err) - - continue - } - - v.kvstore = val - - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { - reader, err := RemoteConfig.Get(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - // Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfig() error { - for _, rp := range v.remoteProviders { - val, err := v.watchRemoteConfig(rp) - if err != nil { - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { - reader, err := RemoteConfig.Watch(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// AllKeys returns all keys holding a value, regardless of where they are set. -// Nested keys are returned with a v.keyDelim separator -func AllKeys() []string { return v.AllKeys() } - -func (v *Viper) AllKeys() []string { - m := map[string]bool{} - // add all paths, by order of descending priority to ensure correct shadowing - m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") - m = v.flattenAndMergeMap(m, v.override, "") - m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env)) - m = v.flattenAndMergeMap(m, v.config, "") - m = v.flattenAndMergeMap(m, v.kvstore, "") - m = v.flattenAndMergeMap(m, v.defaults, "") - - // convert set of paths to list - a := make([]string, 0, len(m)) - for x := range m { - a = append(a, x) - } - return a -} - -// flattenAndMergeMap recursively flattens the given map into a map[string]bool -// of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. -// The resulting set of paths is merged to the given shadow set at the same time. -func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { - if shadow != nil && prefix != "" && shadow[prefix] { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]bool) - } - - var m2 map[string]interface{} - if prefix != "" { - prefix += v.keyDelim - } - for k, val := range m { - fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = true - continue - } - // recursively merge to shadow map - shadow = v.flattenAndMergeMap(shadow, m2, fullKey) - } - return shadow -} - -// mergeFlatMap merges the given maps, excluding values of the second map -// shadowed by values from the first map. -func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { - // scan keys -outer: - for k := range m { - path := strings.Split(k, v.keyDelim) - // scan intermediate paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if shadow[parentKey] { - // path is shadowed, continue - continue outer - } - } - // add key - shadow[strings.ToLower(k)] = true - } - return shadow -} - -// AllSettings merges all settings and returns them as a map[string]interface{}. -func AllSettings() map[string]interface{} { return v.AllSettings() } - -func (v *Viper) AllSettings() map[string]interface{} { - m := map[string]interface{}{} - // start from the list of keys, and construct the map one value at a time - for _, k := range v.AllKeys() { - value := v.Get(k) - if value == nil { - // should not happen, since AllKeys() returns only keys holding a value, - // check just in case anything changes - continue - } - path := strings.Split(k, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(m, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - return m -} - -// SetFs sets the filesystem to use to read configuration. -func SetFs(fs afero.Fs) { v.SetFs(fs) } - -func (v *Viper) SetFs(fs afero.Fs) { - v.fs = fs -} - -// SetConfigName sets name for the config file. -// Does not include extension. -func SetConfigName(in string) { v.SetConfigName(in) } - -func (v *Viper) SetConfigName(in string) { - if in != "" { - v.configName = in - v.configFile = "" - } -} - -// SetConfigType sets the type of the configuration returned by the -// remote source, e.g. "json". -func SetConfigType(in string) { v.SetConfigType(in) } - -func (v *Viper) SetConfigType(in string) { - if in != "" { - v.configType = in - } -} - -// SetConfigPermissions sets the permissions for the config file. -func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } - -func (v *Viper) SetConfigPermissions(perm os.FileMode) { - v.configPermissions = perm.Perm() -} - -// IniLoadOptions sets the load options for ini parsing. -func IniLoadOptions(in ini.LoadOptions) Option { - return optionFunc(func(v *Viper) { - v.iniLoadOptions = in - }) -} - -func (v *Viper) getConfigType() string { - if v.configType != "" { - return v.configType - } - - cf, err := v.getConfigFile() - if err != nil { - return "" - } - - ext := filepath.Ext(cf) - - if len(ext) > 1 { - return ext[1:] - } - - return "" -} - -func (v *Viper) getConfigFile() (string, error) { - if v.configFile == "" { - cf, err := v.findConfigFile() - if err != nil { - return "", err - } - v.configFile = cf - } - return v.configFile, nil -} - -func (v *Viper) searchInPath(in string) (filename string) { - jww.DEBUG.Println("Searching for config in ", in) - for _, ext := range SupportedExts { - jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - if v.configType != "" { - if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { - return filepath.Join(in, v.configName) - } - } - - return "" -} - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - jww.INFO.Println("Searching for config in ", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - -// Debug prints all configuration registries for debugging -// purposes. -func Debug() { v.Debug() } - -func (v *Viper) Debug() { - fmt.Printf("Aliases:\n%#v\n", v.aliases) - fmt.Printf("Override:\n%#v\n", v.override) - fmt.Printf("PFlags:\n%#v\n", v.pflags) - fmt.Printf("Env:\n%#v\n", v.env) - fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) - fmt.Printf("Config:\n%#v\n", v.config) - fmt.Printf("Defaults:\n%#v\n", v.defaults) -} diff --git a/vendor/github.com/spf13/viper/watch.go b/vendor/github.com/spf13/viper/watch.go deleted file mode 100644 index c433a8fa..00000000 --- a/vendor/github.com/spf13/viper/watch.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !js - -package viper - -import "github.com/fsnotify/fsnotify" - -type watcher = fsnotify.Watcher - -func newWatcher() (*watcher, error) { - return fsnotify.NewWatcher() -} diff --git a/vendor/github.com/spf13/viper/watch_wasm.go b/vendor/github.com/spf13/viper/watch_wasm.go deleted file mode 100644 index 8e47e6a9..00000000 --- a/vendor/github.com/spf13/viper/watch_wasm.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build js,wasm - -package viper - -import ( - "errors" - - "github.com/fsnotify/fsnotify" -) - -type watcher struct { - Events chan fsnotify.Event - Errors chan error -} - -func (*watcher) Close() error { - return nil -} - -func (*watcher) Add(name string) error { - return nil -} - -func (*watcher) Remove(name string) error { - return nil -} - -func newWatcher() (*watcher, error) { - return &watcher{}, errors.New("fsnotify is not supported on WASM") -} diff --git a/vendor/github.com/stoewer/go-strcase/.gitignore b/vendor/github.com/stoewer/go-strcase/.gitignore deleted file mode 100644 index db5247b9..00000000 --- a/vendor/github.com/stoewer/go-strcase/.gitignore +++ /dev/null @@ -1,17 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -vendor -doc - -# Temporary files -*~ -*.swp - -# Editor and IDE config -.idea -*.iml -.vscode diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml deleted file mode 100644 index 7f98d55c..00000000 --- a/vendor/github.com/stoewer/go-strcase/.golangci.yml +++ /dev/null @@ -1,26 +0,0 @@ -run: - deadline: 10m - -linters: - enable: - - dupl - - goconst - - gocyclo - - godox - - gosec - - interfacer - - lll - - maligned - - misspell - - prealloc - - stylecheck - - unconvert - - unparam - - errcheck - - golint - - gofmt - disable: [] - fast: false - -issues: - exclude-use-default: false diff --git a/vendor/github.com/stoewer/go-strcase/LICENSE b/vendor/github.com/stoewer/go-strcase/LICENSE deleted file mode 100644 index a105a381..00000000 --- a/vendor/github.com/stoewer/go-strcase/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017, Adrian Stoewer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stoewer/go-strcase/README.md b/vendor/github.com/stoewer/go-strcase/README.md deleted file mode 100644 index 0e8635d8..00000000 --- a/vendor/github.com/stoewer/go-strcase/README.md +++ /dev/null @@ -1,50 +0,0 @@ -[![CircleCI](https://circleci.com/gh/stoewer/go-strcase/tree/master.svg?style=svg)](https://circleci.com/gh/stoewer/go-strcase/tree/master) -[![codecov](https://codecov.io/gh/stoewer/go-strcase/branch/master/graph/badge.svg)](https://codecov.io/gh/stoewer/go-strcase) -[![GoDoc](https://godoc.org/github.com/stoewer/go-strcase?status.svg)](https://pkg.go.dev/github.com/stoewer/go-strcase) ---- - -Go strcase -========== - -The package `strcase` converts between different kinds of naming formats such as camel case -(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`). -The package is designed to work only with strings consisting of standard ASCII letters. -Unicode is currently not supported. - -Versioning and stability ------------------------- - -Although the master branch is supposed to remain always backward compatible, the repository -contains version tags in order to support vendoring tools. -The tag names follow semantic versioning conventions and have the following format `v1.0.0`. -This package supports Go modules introduced with version 1.11. - -Example -------- - -```go -import "github.com/stoewer/go-strcase" - -var snake = strcase.SnakeCase("CamelCase") -``` - -Dependencies ------------- - -### Build dependencies - -* none - -### Test dependencies - -* `github.com/stretchr/testify` - -Run linters and unit tests --------------------------- - -To run the static code analysis, linters and tests use the following commands: - -``` -golangci-lint run --config .golangci.yml ./... -go test ./... -``` diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go deleted file mode 100644 index 5c233cc8..00000000 --- a/vendor/github.com/stoewer/go-strcase/camel.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -import ( - "strings" -) - -// UpperCamelCase converts a string into camel case starting with a upper case letter. -func UpperCamelCase(s string) string { - return camelCase(s, true) -} - -// LowerCamelCase converts a string into camel case starting with a lower case letter. -func LowerCamelCase(s string) string { - return camelCase(s, false) -} - -func camelCase(s string, upper bool) string { - s = strings.TrimSpace(s) - buffer := make([]rune, 0, len(s)) - - stringIter(s, func(prev, curr, next rune) { - if !isDelimiter(curr) { - if isDelimiter(prev) || (upper && prev == 0) { - buffer = append(buffer, toUpper(curr)) - } else if isLower(prev) { - buffer = append(buffer, curr) - } else { - buffer = append(buffer, toLower(curr)) - } - } - }) - - return string(buffer) -} diff --git a/vendor/github.com/stoewer/go-strcase/doc.go b/vendor/github.com/stoewer/go-strcase/doc.go deleted file mode 100644 index 3e441ca3..00000000 --- a/vendor/github.com/stoewer/go-strcase/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -// Package strcase converts between different kinds of naming formats such as camel case -// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed -// to work only with strings consisting of standard ASCII letters. Unicode is currently not -// supported. -package strcase diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go deleted file mode 100644 index ecad5891..00000000 --- a/vendor/github.com/stoewer/go-strcase/helper.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -// isLower checks if a character is lower case. More precisely it evaluates if it is -// in the range of ASCII character 'a' to 'z'. -func isLower(ch rune) bool { - return ch >= 'a' && ch <= 'z' -} - -// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower -// case counterpart. Other characters remain the same. -func toLower(ch rune) rune { - if ch >= 'A' && ch <= 'Z' { - return ch + 32 - } - return ch -} - -// isLower checks if a character is upper case. More precisely it evaluates if it is -// in the range of ASCII characters 'A' to 'Z'. -func isUpper(ch rune) bool { - return ch >= 'A' && ch <= 'Z' -} - -// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower -// case counterpart. Other characters remain the same. -func toUpper(ch rune) rune { - if ch >= 'a' && ch <= 'z' { - return ch - 32 - } - return ch -} - -// isSpace checks if a character is some kind of whitespace. -func isSpace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// isDelimiter checks if a character is some kind of whitespace or '_' or '-'. -func isDelimiter(ch rune) bool { - return ch == '-' || ch == '_' || isSpace(ch) -} - -// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the -// rune at the respective string position as well as the previous and the next rune. If curr is at the -// first position of the string prev is zero. If curr is at the end of the string next is zero. -type iterFunc func(prev, curr, next rune) - -// stringIter iterates over a string, invoking the callback for every single rune in the string. -func stringIter(s string, callback iterFunc) { - var prev rune - var curr rune - for _, next := range s { - if curr == 0 { - prev = curr - curr = next - continue - } - - callback(prev, curr, next) - - prev = curr - curr = next - } - - if len(s) > 0 { - callback(prev, curr, 0) - } -} diff --git a/vendor/github.com/stoewer/go-strcase/kebab.go b/vendor/github.com/stoewer/go-strcase/kebab.go deleted file mode 100644 index e9a64875..00000000 --- a/vendor/github.com/stoewer/go-strcase/kebab.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -// KebabCase converts a string into kebab case. -func KebabCase(s string) string { - return delimiterCase(s, '-', false) -} - -// UpperKebabCase converts a string into kebab case with capital letters. -func UpperKebabCase(s string) string { - return delimiterCase(s, '-', true) -} diff --git a/vendor/github.com/stoewer/go-strcase/snake.go b/vendor/github.com/stoewer/go-strcase/snake.go deleted file mode 100644 index 1b216e20..00000000 --- a/vendor/github.com/stoewer/go-strcase/snake.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2017, A. Stoewer -// All rights reserved. - -package strcase - -import ( - "strings" -) - -// SnakeCase converts a string into snake case. -func SnakeCase(s string) string { - return delimiterCase(s, '_', false) -} - -// UpperSnakeCase converts a string into snake case with capital letters. -func UpperSnakeCase(s string) string { - return delimiterCase(s, '_', true) -} - -// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed -// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE. -func delimiterCase(s string, delimiter rune, upperCase bool) string { - s = strings.TrimSpace(s) - buffer := make([]rune, 0, len(s)+3) - - adjustCase := toLower - if upperCase { - adjustCase = toUpper - } - - var prev rune - var curr rune - for _, next := range s { - if isDelimiter(curr) { - if !isDelimiter(prev) { - buffer = append(buffer, delimiter) - } - } else if isUpper(curr) { - if isLower(prev) || (isUpper(prev) && isLower(next)) { - buffer = append(buffer, delimiter) - } - buffer = append(buffer, adjustCase(curr)) - } else if curr != 0 { - buffer = append(buffer, adjustCase(curr)) - } - prev = curr - curr = next - } - - if len(s) > 0 { - if isUpper(curr) && isLower(prev) && prev != 0 { - buffer = append(buffer, delimiter) - } - buffer = append(buffer, adjustCase(curr)) - } - - return string(buffer) -} diff --git a/vendor/github.com/subosito/gotenv/.env b/vendor/github.com/subosito/gotenv/.env deleted file mode 100644 index 6405eca7..00000000 --- a/vendor/github.com/subosito/gotenv/.env +++ /dev/null @@ -1 +0,0 @@ -HELLO=world diff --git a/vendor/github.com/subosito/gotenv/.env.invalid b/vendor/github.com/subosito/gotenv/.env.invalid deleted file mode 100644 index 016d5e0c..00000000 --- a/vendor/github.com/subosito/gotenv/.env.invalid +++ /dev/null @@ -1 +0,0 @@ -lol$wut diff --git a/vendor/github.com/subosito/gotenv/.gitignore b/vendor/github.com/subosito/gotenv/.gitignore deleted file mode 100644 index 2b8d4561..00000000 --- a/vendor/github.com/subosito/gotenv/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.test -*.out -annotate.json diff --git a/vendor/github.com/subosito/gotenv/.travis.yml b/vendor/github.com/subosito/gotenv/.travis.yml deleted file mode 100644 index 3370d5f4..00000000 --- a/vendor/github.com/subosito/gotenv/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.x -os: - - linux - - osx -script: - - go test -test.v -coverprofile=coverage.out -covermode=count -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/subosito/gotenv/CHANGELOG.md b/vendor/github.com/subosito/gotenv/CHANGELOG.md deleted file mode 100644 index 67f68738..00000000 --- a/vendor/github.com/subosito/gotenv/CHANGELOG.md +++ /dev/null @@ -1,47 +0,0 @@ -# Changelog - -## [1.2.0] - 2019-08-03 - -### Added - -- Add `Must` helper to raise an error as panic. It can be used with `Load` and `OverLoad`. -- Add more tests to be 100% coverage. -- Add CHANGELOG -- Add more OS for the test: OSX and Windows - -### Changed - -- Reduce complexity and improve source code for having `A+` score in [goreportcard](https://goreportcard.com/report/github.com/subosito/gotenv). -- Updated README with mentions to all available functions - -### Removed - -- Remove `ErrFormat` -- Remove `MustLoad` and `MustOverload`, replaced with `Must` helper. - -## [1.1.1] - 2018-06-05 - -### Changed - -- Replace `os.Getenv` with `os.LookupEnv` to ensure that the environment variable is not set, by [radding](https://github.com/radding) - -## [1.1.0] - 2017-03-20 - -### Added - -- Supports carriage return in env -- Handle files with UTF-8 BOM - -### Changed - -- Whitespace handling - -### Fixed - -- Incorrect variable expansion -- Handling escaped '$' characters - -## [1.0.0] - 2014-10-05 - -First stable release. - diff --git a/vendor/github.com/subosito/gotenv/LICENSE b/vendor/github.com/subosito/gotenv/LICENSE deleted file mode 100644 index f64ccaed..00000000 --- a/vendor/github.com/subosito/gotenv/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Alif Rachmawadi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/subosito/gotenv/README.md b/vendor/github.com/subosito/gotenv/README.md deleted file mode 100644 index d610cdf0..00000000 --- a/vendor/github.com/subosito/gotenv/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# gotenv - -[![Build Status](https://travis-ci.org/subosito/gotenv.svg?branch=master)](https://travis-ci.org/subosito/gotenv) -[![Build status](https://ci.appveyor.com/api/projects/status/wb2e075xkfl0m0v2/branch/master?svg=true)](https://ci.appveyor.com/project/subosito/gotenv/branch/master) -[![Coverage Status](https://badgen.net/codecov/c/github/subosito/gotenv)](https://codecov.io/gh/subosito/gotenv) -[![Go Report Card](https://goreportcard.com/badge/github.com/subosito/gotenv)](https://goreportcard.com/report/github.com/subosito/gotenv) -[![GoDoc](https://godoc.org/github.com/subosito/gotenv?status.svg)](https://godoc.org/github.com/subosito/gotenv) - -Load environment variables dynamically in Go. - -## Usage - -Put the gotenv package on your `import` statement: - -```go -import "github.com/subosito/gotenv" -``` - -To modify your app environment variables, `gotenv` expose 2 main functions: - -- `gotenv.Load` -- `gotenv.Apply` - -By default, `gotenv.Load` will look for a file called `.env` in the current working directory. - -Behind the scene, it will then load `.env` file and export the valid variables to the environment variables. Make sure you call the method as soon as possible to ensure it loads all variables, say, put it on `init()` function. - -Once loaded you can use `os.Getenv()` to get the value of the variable. - -Let's say you have `.env` file: - -``` -APP_ID=1234567 -APP_SECRET=abcdef -``` - -Here's the example of your app: - -```go -package main - -import ( - "github.com/subosito/gotenv" - "log" - "os" -) - -func init() { - gotenv.Load() -} - -func main() { - log.Println(os.Getenv("APP_ID")) // "1234567" - log.Println(os.Getenv("APP_SECRET")) // "abcdef" -} -``` - -You can also load other than `.env` file if you wish. Just supply filenames when calling `Load()`. It will load them in order and the first value set for a variable will win.: - -```go -gotenv.Load(".env.production", "credentials") -``` - -While `gotenv.Load` loads entries from `.env` file, `gotenv.Apply` allows you to use any `io.Reader`: - -```go -gotenv.Apply(strings.NewReader("APP_ID=1234567")) - -log.Println(os.Getenv("APP_ID")) -// Output: "1234567" -``` - -Both `gotenv.Load` and `gotenv.Apply` **DO NOT** overrides existing environment variables. If you want to override existing ones, you can see section below. - -### Environment Overrides - -Besides above functions, `gotenv` also provides another functions that overrides existing: - -- `gotenv.OverLoad` -- `gotenv.OverApply` - - -Here's the example of this overrides behavior: - -```go -os.Setenv("HELLO", "world") - -// NOTE: using Apply existing value will be reserved -gotenv.Apply(strings.NewReader("HELLO=universe")) -fmt.Println(os.Getenv("HELLO")) -// Output: "world" - -// NOTE: using OverApply existing value will be overridden -gotenv.OverApply(strings.NewReader("HELLO=universe")) -fmt.Println(os.Getenv("HELLO")) -// Output: "universe" -``` - -### Throw a Panic - -Both `gotenv.Load` and `gotenv.OverLoad` returns an error on something wrong occurred, like your env file is not exist, and so on. To make it easier to use, `gotenv` also provides `gotenv.Must` helper, to let it panic when an error returned. - -```go -err := gotenv.Load(".env-is-not-exist") -fmt.Println("error", err) -// error: open .env-is-not-exist: no such file or directory - -gotenv.Must(gotenv.Load, ".env-is-not-exist") -// it will throw a panic -// panic: open .env-is-not-exist: no such file or directory -``` - -### Another Scenario - -Just in case you want to parse environment variables from any `io.Reader`, gotenv keeps its `Parse` and `StrictParse` function as public API so you can use that. - -```go -// import "strings" - -pairs := gotenv.Parse(strings.NewReader("FOO=test\nBAR=$FOO")) -// gotenv.Env{"FOO": "test", "BAR": "test"} - -err, pairs = gotenv.StrictParse(strings.NewReader(`FOO="bar"`)) -// gotenv.Env{"FOO": "bar"} -``` - -`Parse` ignores invalid lines and returns `Env` of valid environment variables, while `StrictParse` returns an error for invalid lines. - -## Notes - -The gotenv package is a Go port of [`dotenv`](https://github.com/bkeepers/dotenv) project with some additions made for Go. For general features, it aims to be compatible as close as possible. diff --git a/vendor/github.com/subosito/gotenv/appveyor.yml b/vendor/github.com/subosito/gotenv/appveyor.yml deleted file mode 100644 index 33b4c404..00000000 --- a/vendor/github.com/subosito/gotenv/appveyor.yml +++ /dev/null @@ -1,9 +0,0 @@ -build: off -clone_folder: c:\gopath\src\github.com\subosito\gotenv -environment: - GOPATH: c:\gopath -stack: go 1.10 -before_test: - - go get -t -test_script: - - go test -v -cover -race diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go deleted file mode 100644 index 745a3448..00000000 --- a/vendor/github.com/subosito/gotenv/gotenv.go +++ /dev/null @@ -1,265 +0,0 @@ -// Package gotenv provides functionality to dynamically load the environment variables -package gotenv - -import ( - "bufio" - "fmt" - "io" - "os" - "regexp" - "strings" -) - -const ( - // Pattern for detecting valid line format - linePattern = `\A\s*(?:export\s+)?([\w\.]+)(?:\s*=\s*|:\s+?)('(?:\'|[^'])*'|"(?:\"|[^"])*"|[^#\n]+)?\s*(?:\s*\#.*)?\z` - - // Pattern for detecting valid variable within a value - variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)` -) - -// Env holds key/value pair of valid environment variable -type Env map[string]string - -/* -Load is a function to load a file or multiple files and then export the valid variables into environment variables if they do not exist. -When it's called with no argument, it will load `.env` file on the current path and set the environment variables. -Otherwise, it will loop over the filenames parameter and set the proper environment variables. -*/ -func Load(filenames ...string) error { - return loadenv(false, filenames...) -} - -/* -OverLoad is a function to load a file or multiple files and then export and override the valid variables into environment variables. -*/ -func OverLoad(filenames ...string) error { - return loadenv(true, filenames...) -} - -/* -Must is wrapper function that will panic when supplied function returns an error. -*/ -func Must(fn func(filenames ...string) error, filenames ...string) { - if err := fn(filenames...); err != nil { - panic(err.Error()) - } -} - -/* -Apply is a function to load an io Reader then export the valid variables into environment variables if they do not exist. -*/ -func Apply(r io.Reader) error { - return parset(r, false) -} - -/* -OverApply is a function to load an io Reader then export and override the valid variables into environment variables. -*/ -func OverApply(r io.Reader) error { - return parset(r, true) -} - -func loadenv(override bool, filenames ...string) error { - if len(filenames) == 0 { - filenames = []string{".env"} - } - - for _, filename := range filenames { - f, err := os.Open(filename) - if err != nil { - return err - } - - err = parset(f, override) - if err != nil { - return err - } - - f.Close() - } - - return nil -} - -// parse and set :) -func parset(r io.Reader, override bool) error { - env, err := StrictParse(r) - if err != nil { - return err - } - - for key, val := range env { - setenv(key, val, override) - } - - return nil -} - -func setenv(key, val string, override bool) { - if override { - os.Setenv(key, val) - } else { - if _, present := os.LookupEnv(key); !present { - os.Setenv(key, val) - } - } -} - -// Parse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is skipping any invalid lines and only processing the valid one. -func Parse(r io.Reader) Env { - env, _ := StrictParse(r) - return env -} - -// StrictParse is a function to parse line by line any io.Reader supplied and returns the valid Env key/value pair of valid variables. -// It expands the value of a variable from the environment variable but does not set the value to the environment itself. -// This function is returning an error if there are any invalid lines. -func StrictParse(r io.Reader) (Env, error) { - env := make(Env) - scanner := bufio.NewScanner(r) - - i := 1 - bom := string([]byte{239, 187, 191}) - - for scanner.Scan() { - line := scanner.Text() - - if i == 1 { - line = strings.TrimPrefix(line, bom) - } - - i++ - - err := parseLine(line, env) - if err != nil { - return env, err - } - } - - return env, nil -} - -func parseLine(s string, env Env) error { - rl := regexp.MustCompile(linePattern) - rm := rl.FindStringSubmatch(s) - - if len(rm) == 0 { - return checkFormat(s, env) - } - - key := rm[1] - val := rm[2] - - // determine if string has quote prefix - hdq := strings.HasPrefix(val, `"`) - - // determine if string has single quote prefix - hsq := strings.HasPrefix(val, `'`) - - // trim whitespace - val = strings.Trim(val, " ") - - // remove quotes '' or "" - rq := regexp.MustCompile(`\A(['"])(.*)(['"])\z`) - val = rq.ReplaceAllString(val, "$2") - - if hdq { - val = strings.Replace(val, `\n`, "\n", -1) - val = strings.Replace(val, `\r`, "\r", -1) - - // Unescape all characters except $ so variables can be escaped properly - re := regexp.MustCompile(`\\([^$])`) - val = re.ReplaceAllString(val, "$1") - } - - rv := regexp.MustCompile(variablePattern) - fv := func(s string) string { - return varReplacement(s, hsq, env) - } - - val = rv.ReplaceAllStringFunc(val, fv) - val = parseVal(val, env) - - env[key] = val - return nil -} - -func parseExport(st string, env Env) error { - if strings.HasPrefix(st, "export") { - vs := strings.SplitN(st, " ", 2) - - if len(vs) > 1 { - if _, ok := env[vs[1]]; !ok { - return fmt.Errorf("line `%s` has an unset variable", st) - } - } - } - - return nil -} - -func varReplacement(s string, hsq bool, env Env) string { - if strings.HasPrefix(s, "\\") { - return strings.TrimPrefix(s, "\\") - } - - if hsq { - return s - } - - sn := `(\$)(\{?([A-Z0-9_]+)\}?)` - rn := regexp.MustCompile(sn) - mn := rn.FindStringSubmatch(s) - - if len(mn) == 0 { - return s - } - - v := mn[3] - - replace, ok := env[v] - if !ok { - replace = os.Getenv(v) - } - - return replace -} - -func checkFormat(s string, env Env) error { - st := strings.TrimSpace(s) - - if (st == "") || strings.HasPrefix(st, "#") { - return nil - } - - if err := parseExport(st, env); err != nil { - return err - } - - return fmt.Errorf("line `%s` doesn't match format", s) -} - -func parseVal(val string, env Env) string { - if strings.Contains(val, "=") { - if !(val == "\n" || val == "\r") { - kv := strings.Split(val, "\n") - - if len(kv) == 1 { - kv = strings.Split(val, "\r") - } - - if len(kv) > 1 { - val = kv[0] - - for i := 1; i < len(kv); i++ { - parseLine(kv[i], env) - } - } - } - } - - return val -} diff --git a/vendor/github.com/urfave/cli/.flake8 b/vendor/github.com/urfave/cli/.flake8 deleted file mode 100644 index 6deafc26..00000000 --- a/vendor/github.com/urfave/cli/.flake8 +++ /dev/null @@ -1,2 +0,0 @@ -[flake8] -max-line-length = 120 diff --git a/vendor/github.com/urfave/cli/.gitignore b/vendor/github.com/urfave/cli/.gitignore deleted file mode 100644 index 9c250603..00000000 --- a/vendor/github.com/urfave/cli/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.coverprofile -node_modules/ -vendor -.idea \ No newline at end of file diff --git a/vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md b/vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md deleted file mode 100644 index 41ba294f..00000000 --- a/vendor/github.com/urfave/cli/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -education, socio-economic status, nationality, personal appearance, race, -religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting Dan Buch at dan@meatballhat.com. All complaints will be -reviewed and investigated and will result in a response that is deemed necessary -and appropriate to the circumstances. The project team is obligated to maintain -confidentiality with regard to the reporter of an incident. Further details of -specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - diff --git a/vendor/github.com/urfave/cli/LICENSE b/vendor/github.com/urfave/cli/LICENSE deleted file mode 100644 index 42a597e2..00000000 --- a/vendor/github.com/urfave/cli/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 Jeremy Saenz & Contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md deleted file mode 100644 index b2abbcf9..00000000 --- a/vendor/github.com/urfave/cli/README.md +++ /dev/null @@ -1,70 +0,0 @@ -cli -=== - -[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli) -[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli) - -[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli) -[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli) -[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli) -[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli) - -cli is a simple, fast, and fun package for building command line apps in Go. The -goal is to enable developers to write fast and distributable command line -applications in an expressive way. - -## Usage Documentation - -Usage documentation exists for each major version - -- `v1` - [./docs/v1/manual.md](./docs/v1/manual.md) -- `v2` - 🚧 documentation for `v2` is WIP 🚧 - -## Installation - -Make sure you have a working Go environment. Go version 1.10+ is supported. [See -the install instructions for Go](http://golang.org/doc/install.html). - -### GOPATH - -Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can -be easily used: -``` -export PATH=$PATH:$GOPATH/bin -``` - -### Supported platforms - -cli is tested against multiple versions of Go on Linux, and against the latest -released version of Go on OS X and Windows. For full details, see -[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). - -### Using `v1` releases - -``` -$ go get github.com/urfave/cli -``` - -```go -... -import ( - "github.com/urfave/cli" -) -... -``` - -### Using `v2` releases - -**Warning**: `v2` is in a pre-release state. - -``` -$ go get github.com/urfave/cli.v2 -``` - -```go -... -import ( - "github.com/urfave/cli.v2" // imports as package "cli" -) -... -``` diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go deleted file mode 100644 index 382f238f..00000000 --- a/vendor/github.com/urfave/cli/app.go +++ /dev/null @@ -1,531 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "time" -) - -var ( - changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md" - appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL) - // unused variable. commented for now. will remove in future if agreed upon by everyone - //runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL) - - contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you." - - errInvalidActionType = NewExitError("ERROR invalid Action type. "+ - fmt.Sprintf("Must be `func(*Context`)` or `func(*Context) error). %s", contactSysadmin)+ - fmt.Sprintf("See %s", appActionDeprecationURL), 2) -) - -// App is the main structure of a cli application. It is recommended that -// an app be created with the cli.NewApp() function -type App struct { - // The name of the program. Defaults to path.Base(os.Args[0]) - Name string - // Full name of command for help, defaults to Name - HelpName string - // Description of the program. - Usage string - // Text to override the USAGE section of help - UsageText string - // Description of the program argument format. - ArgsUsage string - // Version of the program - Version string - // Description of the program - Description string - // List of commands to execute - Commands []Command - // List of flags to parse - Flags []Flag - // Boolean to enable bash completion commands - EnableBashCompletion bool - // Boolean to hide built-in help command - HideHelp bool - // Boolean to hide built-in version flag and the VERSION section of help - HideVersion bool - // Populate on app startup, only gettable through method Categories() - categories CommandCategories - // An action to execute when the bash-completion flag is set - BashComplete BashCompleteFunc - // An action to execute before any subcommands are run, but after the context is ready - // If a non-nil error is returned, no subcommands are run - Before BeforeFunc - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After AfterFunc - - // The action to execute when no subcommands are specified - // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}` - // *Note*: support for the deprecated `Action` signature will be removed in a future version - Action interface{} - - // Execute this function if the proper command cannot be found - CommandNotFound CommandNotFoundFunc - // Execute this function if an usage error occurs - OnUsageError OnUsageErrorFunc - // Compilation date - Compiled time.Time - // List of all authors who contributed - Authors []Author - // Copyright of the binary if any - Copyright string - // Name of Author (Note: Use App.Authors, this is deprecated) - Author string - // Email of Author (Note: Use App.Authors, this is deprecated) - Email string - // Writer writer to write output to - Writer io.Writer - // ErrWriter writes error output - ErrWriter io.Writer - // Execute this function to handle ExitErrors. If not provided, HandleExitCoder is provided to - // function as a default, so this is optional. - ExitErrHandler ExitErrHandlerFunc - // Other custom info - Metadata map[string]interface{} - // Carries a function which returns app specific info. - ExtraInfo func() map[string]string - // CustomAppHelpTemplate the text template for app help topic. - // cli.go uses text/template to render templates. You can - // render custom help text by setting this variable. - CustomAppHelpTemplate string - // Boolean to enable short-option handling so user can combine several - // single-character bool arguements into one - // i.e. foobar -o -v -> foobar -ov - UseShortOptionHandling bool - - didSetup bool -} - -// Tries to find out when this binary was compiled. -// Returns the current time if it fails to find it. -func compileTime() time.Time { - info, err := os.Stat(os.Args[0]) - if err != nil { - return time.Now() - } - return info.ModTime() -} - -// NewApp creates a new cli Application with some reasonable defaults for Name, -// Usage, Version and Action. -func NewApp() *App { - return &App{ - Name: filepath.Base(os.Args[0]), - HelpName: filepath.Base(os.Args[0]), - Usage: "A new cli application", - UsageText: "", - BashComplete: DefaultAppComplete, - Action: helpCommand.Action, - Compiled: compileTime(), - Writer: os.Stdout, - } -} - -// Setup runs initialization code to ensure all data structures are ready for -// `Run` or inspection prior to `Run`. It is internally called by `Run`, but -// will return early if setup has already happened. -func (a *App) Setup() { - if a.didSetup { - return - } - - a.didSetup = true - - if a.Author != "" || a.Email != "" { - a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) - } - - var newCmds []Command - for _, c := range a.Commands { - if c.HelpName == "" { - c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) - } - newCmds = append(newCmds, c) - } - a.Commands = newCmds - - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - - if a.Version == "" { - a.HideVersion = true - } - - if !a.HideVersion { - a.appendFlag(VersionFlag) - } - - a.categories = CommandCategories{} - for _, command := range a.Commands { - a.categories = a.categories.AddCommand(command.Category, command) - } - sort.Sort(a.categories) - - if a.Metadata == nil { - a.Metadata = make(map[string]interface{}) - } - - if a.Writer == nil { - a.Writer = os.Stdout - } -} - -func (a *App) newFlagSet() (*flag.FlagSet, error) { - return flagSet(a.Name, a.Flags) -} - -func (a *App) useShortOptionHandling() bool { - return a.UseShortOptionHandling -} - -// Run is the entry point to the cli app. Parses the arguments slice and routes -// to the proper flag/args combination -func (a *App) Run(arguments []string) (err error) { - a.Setup() - - // handle the completion flag separately from the flagset since - // completion could be attempted after a flag, but before its value was put - // on the command line. this causes the flagset to interpret the completion - // flag name as the value of the flag before it which is undesirable - // note that we can only do this because the shell autocomplete function - // always appends the completion flag at the end of the command - shellComplete, arguments := checkShellCompleteFlag(a, arguments) - - set, err := a.newFlagSet() - if err != nil { - return err - } - - err = parseIter(set, a, arguments[1:], shellComplete) - nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, nil) - if nerr != nil { - _, _ = fmt.Fprintln(a.Writer, nerr) - _ = ShowAppHelp(context) - return nerr - } - context.shellComplete = shellComplete - - if checkCompletions(context) { - return nil - } - - if err != nil { - if a.OnUsageError != nil { - err := a.OnUsageError(context, err, false) - a.handleExitCoder(context, err) - return err - } - _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) - _ = ShowAppHelp(context) - return err - } - - if !a.HideHelp && checkHelp(context) { - _ = ShowAppHelp(context) - return nil - } - - if !a.HideVersion && checkVersion(context) { - ShowVersion(context) - return nil - } - - cerr := checkRequiredFlags(a.Flags, context) - if cerr != nil { - _ = ShowAppHelp(context) - return cerr - } - - if a.After != nil { - defer func() { - if afterErr := a.After(context); afterErr != nil { - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - - if a.Before != nil { - beforeErr := a.Before(context) - if beforeErr != nil { - a.handleExitCoder(context, beforeErr) - err = beforeErr - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - if a.Action == nil { - a.Action = helpCommand.Action - } - - // Run default Action - err = HandleAction(a.Action, context) - - a.handleExitCoder(context, err) - return err -} - -// RunAndExitOnError calls .Run() and exits non-zero if an error was returned -// -// Deprecated: instead you should return an error that fulfills cli.ExitCoder -// to cli.App.Run. This will cause the application to exit with the given eror -// code in the cli.ExitCoder -func (a *App) RunAndExitOnError() { - if err := a.Run(os.Args); err != nil { - _, _ = fmt.Fprintln(a.errWriter(), err) - OsExiter(1) - } -} - -// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to -// generate command-specific flags -func (a *App) RunAsSubcommand(ctx *Context) (err error) { - // append help to commands - if len(a.Commands) > 0 { - if a.Command(helpCommand.Name) == nil && !a.HideHelp { - a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } - } - } - - newCmds := []Command{} - for _, c := range a.Commands { - if c.HelpName == "" { - c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) - } - newCmds = append(newCmds, c) - } - a.Commands = newCmds - - set, err := a.newFlagSet() - if err != nil { - return err - } - - err = parseIter(set, a, ctx.Args().Tail(), ctx.shellComplete) - nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx) - - if nerr != nil { - _, _ = fmt.Fprintln(a.Writer, nerr) - _, _ = fmt.Fprintln(a.Writer) - if len(a.Commands) > 0 { - _ = ShowSubcommandHelp(context) - } else { - _ = ShowCommandHelp(ctx, context.Args().First()) - } - return nerr - } - - if checkCompletions(context) { - return nil - } - - if err != nil { - if a.OnUsageError != nil { - err = a.OnUsageError(context, err, true) - a.handleExitCoder(context, err) - return err - } - _, _ = fmt.Fprintf(a.Writer, "%s %s\n\n", "Incorrect Usage.", err.Error()) - _ = ShowSubcommandHelp(context) - return err - } - - if len(a.Commands) > 0 { - if checkSubcommandHelp(context) { - return nil - } - } else { - if checkCommandHelp(ctx, context.Args().First()) { - return nil - } - } - - cerr := checkRequiredFlags(a.Flags, context) - if cerr != nil { - _ = ShowSubcommandHelp(context) - return cerr - } - - if a.After != nil { - defer func() { - afterErr := a.After(context) - if afterErr != nil { - a.handleExitCoder(context, err) - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - - if a.Before != nil { - beforeErr := a.Before(context) - if beforeErr != nil { - a.handleExitCoder(context, beforeErr) - err = beforeErr - return err - } - } - - args := context.Args() - if args.Present() { - name := args.First() - c := a.Command(name) - if c != nil { - return c.Run(context) - } - } - - // Run default Action - err = HandleAction(a.Action, context) - - a.handleExitCoder(context, err) - return err -} - -// Command returns the named command on App. Returns nil if the command does not exist -func (a *App) Command(name string) *Command { - for _, c := range a.Commands { - if c.HasName(name) { - return &c - } - } - - return nil -} - -// Categories returns a slice containing all the categories with the commands they contain -func (a *App) Categories() CommandCategories { - return a.categories -} - -// VisibleCategories returns a slice of categories and commands that are -// Hidden=false -func (a *App) VisibleCategories() []*CommandCategory { - ret := []*CommandCategory{} - for _, category := range a.categories { - if visible := func() *CommandCategory { - for _, command := range category.Commands { - if !command.Hidden { - return category - } - } - return nil - }(); visible != nil { - ret = append(ret, visible) - } - } - return ret -} - -// VisibleCommands returns a slice of the Commands with Hidden=false -func (a *App) VisibleCommands() []Command { - var ret []Command - for _, command := range a.Commands { - if !command.Hidden { - ret = append(ret, command) - } - } - return ret -} - -// VisibleFlags returns a slice of the Flags with Hidden=false -func (a *App) VisibleFlags() []Flag { - return visibleFlags(a.Flags) -} - -func (a *App) hasFlag(flag Flag) bool { - for _, f := range a.Flags { - if flag == f { - return true - } - } - - return false -} - -func (a *App) errWriter() io.Writer { - // When the app ErrWriter is nil use the package level one. - if a.ErrWriter == nil { - return ErrWriter - } - - return a.ErrWriter -} - -func (a *App) appendFlag(flag Flag) { - if !a.hasFlag(flag) { - a.Flags = append(a.Flags, flag) - } -} - -func (a *App) handleExitCoder(context *Context, err error) { - if a.ExitErrHandler != nil { - a.ExitErrHandler(context, err) - } else { - HandleExitCoder(err) - } -} - -// Author represents someone who has contributed to a cli project. -type Author struct { - Name string // The Authors name - Email string // The Authors email -} - -// String makes Author comply to the Stringer interface, to allow an easy print in the templating process -func (a Author) String() string { - e := "" - if a.Email != "" { - e = " <" + a.Email + ">" - } - - return fmt.Sprintf("%v%v", a.Name, e) -} - -// HandleAction attempts to figure out which Action signature was used. If -// it's an ActionFunc or a func with the legacy signature for Action, the func -// is run! -func HandleAction(action interface{}, context *Context) (err error) { - switch a := action.(type) { - case ActionFunc: - return a(context) - case func(*Context) error: - return a(context) - case func(*Context): // deprecated function signature - a(context) - return nil - } - - return errInvalidActionType -} diff --git a/vendor/github.com/urfave/cli/appveyor.yml b/vendor/github.com/urfave/cli/appveyor.yml deleted file mode 100644 index 8ef2fea1..00000000 --- a/vendor/github.com/urfave/cli/appveyor.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: "{build}" - -os: Windows Server 2016 - -image: Visual Studio 2017 - -clone_folder: c:\gopath\src\github.com\urfave\cli - -cache: - - node_modules - -environment: - GOPATH: C:\gopath - GOVERSION: 1.11.x - GO111MODULE: on - GOPROXY: https://proxy.golang.org - -install: - - set PATH=%GOPATH%\bin;C:\go\bin;%PATH% - - go version - - go env - - go get github.com/urfave/gfmrun/cmd/gfmrun - - go mod vendor - -build_script: - - go run build.go vet - - go run build.go test - - go run build.go gfmrun docs/v1/manual.md diff --git a/vendor/github.com/urfave/cli/category.go b/vendor/github.com/urfave/cli/category.go deleted file mode 100644 index bf3c73c5..00000000 --- a/vendor/github.com/urfave/cli/category.go +++ /dev/null @@ -1,44 +0,0 @@ -package cli - -// CommandCategories is a slice of *CommandCategory. -type CommandCategories []*CommandCategory - -// CommandCategory is a category containing commands. -type CommandCategory struct { - Name string - Commands Commands -} - -func (c CommandCategories) Less(i, j int) bool { - return lexicographicLess(c[i].Name, c[j].Name) -} - -func (c CommandCategories) Len() int { - return len(c) -} - -func (c CommandCategories) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -// AddCommand adds a command to a category. -func (c CommandCategories) AddCommand(category string, command Command) CommandCategories { - for _, commandCategory := range c { - if commandCategory.Name == category { - commandCategory.Commands = append(commandCategory.Commands, command) - return c - } - } - return append(c, &CommandCategory{Name: category, Commands: []Command{command}}) -} - -// VisibleCommands returns a slice of the Commands with Hidden=false -func (c *CommandCategory) VisibleCommands() []Command { - ret := []Command{} - for _, command := range c.Commands { - if !command.Hidden { - ret = append(ret, command) - } - } - return ret -} diff --git a/vendor/github.com/urfave/cli/cli.go b/vendor/github.com/urfave/cli/cli.go deleted file mode 100644 index 4bd25083..00000000 --- a/vendor/github.com/urfave/cli/cli.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package cli provides a minimal framework for creating and organizing command line -// Go applications. cli is designed to be easy to understand and write, the most simple -// cli application can be written as follows: -// func main() { -// cli.NewApp().Run(os.Args) -// } -// -// Of course this application does not do much, so let's make this an actual application: -// func main() { -// app := cli.NewApp() -// app.Name = "greet" -// app.Usage = "say a greeting" -// app.Action = func(c *cli.Context) error { -// println("Greetings") -// return nil -// } -// -// app.Run(os.Args) -// } -package cli - -//go:generate go run flag-gen/main.go flag-gen/assets_vfsdata.go diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go deleted file mode 100644 index f02d3589..00000000 --- a/vendor/github.com/urfave/cli/command.go +++ /dev/null @@ -1,378 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "sort" - "strings" -) - -// Command is a subcommand for a cli.App. -type Command struct { - // The name of the command - Name string - // short name of the command. Typically one character (deprecated, use `Aliases`) - ShortName string - // A list of aliases for the command - Aliases []string - // A short description of the usage of this command - Usage string - // Custom text to show on USAGE section of help - UsageText string - // A longer explanation of how the command works - Description string - // A short description of the arguments of this command - ArgsUsage string - // The category the command is part of - Category string - // The function to call when checking for bash command completions - BashComplete BashCompleteFunc - // An action to execute before any sub-subcommands are run, but after the context is ready - // If a non-nil error is returned, no sub-subcommands are run - Before BeforeFunc - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After AfterFunc - // The function to call when this command is invoked - Action interface{} - // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind - // of deprecation period has passed, maybe? - - // Execute this function if a usage error occurs. - OnUsageError OnUsageErrorFunc - // List of child commands - Subcommands Commands - // List of flags to parse - Flags []Flag - // Treat all flags as normal arguments if true - SkipFlagParsing bool - // Skip argument reordering which attempts to move flags before arguments, - // but only works if all flags appear after all arguments. This behavior was - // removed n version 2 since it only works under specific conditions so we - // backport here by exposing it as an option for compatibility. - SkipArgReorder bool - // Boolean to hide built-in help command - HideHelp bool - // Boolean to hide this command from help or completion - Hidden bool - // Boolean to enable short-option handling so user can combine several - // single-character bool arguments into one - // i.e. foobar -o -v -> foobar -ov - UseShortOptionHandling bool - - // Full name of command for help, defaults to full command name, including parent commands. - HelpName string - commandNamePath []string - - // CustomHelpTemplate the text template for the command help topic. - // cli.go uses text/template to render templates. You can - // render custom help text by setting this variable. - CustomHelpTemplate string -} - -type CommandsByName []Command - -func (c CommandsByName) Len() int { - return len(c) -} - -func (c CommandsByName) Less(i, j int) bool { - return lexicographicLess(c[i].Name, c[j].Name) -} - -func (c CommandsByName) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -// FullName returns the full name of the command. -// For subcommands this ensures that parent commands are part of the command path -func (c Command) FullName() string { - if c.commandNamePath == nil { - return c.Name - } - return strings.Join(c.commandNamePath, " ") -} - -// Commands is a slice of Command -type Commands []Command - -// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags -func (c Command) Run(ctx *Context) (err error) { - if len(c.Subcommands) > 0 { - return c.startApp(ctx) - } - - if !c.HideHelp && (HelpFlag != BoolFlag{}) { - // append help to flags - c.Flags = append( - c.Flags, - HelpFlag, - ) - } - - if ctx.App.UseShortOptionHandling { - c.UseShortOptionHandling = true - } - - set, err := c.parseFlags(ctx.Args().Tail(), ctx.shellComplete) - - context := NewContext(ctx.App, set, ctx) - context.Command = c - if checkCommandCompletions(context, c.Name) { - return nil - } - - if err != nil { - if c.OnUsageError != nil { - err := c.OnUsageError(context, err, false) - context.App.handleExitCoder(context, err) - return err - } - _, _ = fmt.Fprintln(context.App.Writer, "Incorrect Usage:", err.Error()) - _, _ = fmt.Fprintln(context.App.Writer) - _ = ShowCommandHelp(context, c.Name) - return err - } - - if checkCommandHelp(context, c.Name) { - return nil - } - - cerr := checkRequiredFlags(c.Flags, context) - if cerr != nil { - _ = ShowCommandHelp(context, c.Name) - return cerr - } - - if c.After != nil { - defer func() { - afterErr := c.After(context) - if afterErr != nil { - context.App.handleExitCoder(context, err) - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - - if c.Before != nil { - err = c.Before(context) - if err != nil { - context.App.handleExitCoder(context, err) - return err - } - } - - if c.Action == nil { - c.Action = helpSubcommand.Action - } - - err = HandleAction(c.Action, context) - - if err != nil { - context.App.handleExitCoder(context, err) - } - return err -} - -func (c *Command) parseFlags(args Args, shellComplete bool) (*flag.FlagSet, error) { - if c.SkipFlagParsing { - set, err := c.newFlagSet() - if err != nil { - return nil, err - } - - return set, set.Parse(append([]string{"--"}, args...)) - } - - if !c.SkipArgReorder { - args = reorderArgs(c.Flags, args) - } - - set, err := c.newFlagSet() - if err != nil { - return nil, err - } - - err = parseIter(set, c, args, shellComplete) - if err != nil { - return nil, err - } - - err = normalizeFlags(c.Flags, set) - if err != nil { - return nil, err - } - - return set, nil -} - -func (c *Command) newFlagSet() (*flag.FlagSet, error) { - return flagSet(c.Name, c.Flags) -} - -func (c *Command) useShortOptionHandling() bool { - return c.UseShortOptionHandling -} - -// reorderArgs moves all flags (via reorderedArgs) before the rest of -// the arguments (remainingArgs) as this is what flag expects. -func reorderArgs(commandFlags []Flag, args []string) []string { - var remainingArgs, reorderedArgs []string - - nextIndexMayContainValue := false - for i, arg := range args { - - // dont reorder any args after a -- - // read about -- here: - // https://unix.stackexchange.com/questions/11376/what-does-double-dash-mean-also-known-as-bare-double-dash - if arg == "--" { - remainingArgs = append(remainingArgs, args[i:]...) - break - - // checks if this arg is a value that should be re-ordered next to its associated flag - } else if nextIndexMayContainValue && !strings.HasPrefix(arg, "-") { - nextIndexMayContainValue = false - reorderedArgs = append(reorderedArgs, arg) - - // checks if this is an arg that should be re-ordered - } else if argIsFlag(commandFlags, arg) { - // we have determined that this is a flag that we should re-order - reorderedArgs = append(reorderedArgs, arg) - // if this arg does not contain a "=", then the next index may contain the value for this flag - nextIndexMayContainValue = !strings.Contains(arg, "=") - - // simply append any remaining args - } else { - remainingArgs = append(remainingArgs, arg) - } - } - - return append(reorderedArgs, remainingArgs...) -} - -// argIsFlag checks if an arg is one of our command flags -func argIsFlag(commandFlags []Flag, arg string) bool { - // checks if this is just a `-`, and so definitely not a flag - if arg == "-" { - return false - } - // flags always start with a - - if !strings.HasPrefix(arg, "-") { - return false - } - // this line turns `--flag` into `flag` - if strings.HasPrefix(arg, "--") { - arg = strings.Replace(arg, "-", "", 2) - } - // this line turns `-flag` into `flag` - if strings.HasPrefix(arg, "-") { - arg = strings.Replace(arg, "-", "", 1) - } - // this line turns `flag=value` into `flag` - arg = strings.Split(arg, "=")[0] - // look through all the flags, to see if the `arg` is one of our flags - for _, flag := range commandFlags { - for _, key := range strings.Split(flag.GetName(), ",") { - key := strings.TrimSpace(key) - if key == arg { - return true - } - } - } - // return false if this arg was not one of our flags - return false -} - -// Names returns the names including short names and aliases. -func (c Command) Names() []string { - names := []string{c.Name} - - if c.ShortName != "" { - names = append(names, c.ShortName) - } - - return append(names, c.Aliases...) -} - -// HasName returns true if Command.Name or Command.ShortName matches given name -func (c Command) HasName(name string) bool { - for _, n := range c.Names() { - if n == name { - return true - } - } - return false -} - -func (c Command) startApp(ctx *Context) error { - app := NewApp() - app.Metadata = ctx.App.Metadata - app.ExitErrHandler = ctx.App.ExitErrHandler - // set the name and usage - app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) - if c.HelpName == "" { - app.HelpName = c.HelpName - } else { - app.HelpName = app.Name - } - - app.Usage = c.Usage - app.Description = c.Description - app.ArgsUsage = c.ArgsUsage - - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - app.CustomAppHelpTemplate = c.CustomHelpTemplate - - // set the flags and commands - app.Commands = c.Subcommands - app.Flags = c.Flags - app.HideHelp = c.HideHelp - - app.Version = ctx.App.Version - app.HideVersion = ctx.App.HideVersion - app.Compiled = ctx.App.Compiled - app.Author = ctx.App.Author - app.Email = ctx.App.Email - app.Writer = ctx.App.Writer - app.ErrWriter = ctx.App.ErrWriter - app.UseShortOptionHandling = ctx.App.UseShortOptionHandling - - app.categories = CommandCategories{} - for _, command := range c.Subcommands { - app.categories = app.categories.AddCommand(command.Category, command) - } - - sort.Sort(app.categories) - - // bash completion - app.EnableBashCompletion = ctx.App.EnableBashCompletion - if c.BashComplete != nil { - app.BashComplete = c.BashComplete - } - - // set the actions - app.Before = c.Before - app.After = c.After - if c.Action != nil { - app.Action = c.Action - } else { - app.Action = helpSubcommand.Action - } - app.OnUsageError = c.OnUsageError - - for index, cc := range app.Commands { - app.Commands[index].commandNamePath = []string{c.Name, cc.Name} - } - - return app.RunAsSubcommand(ctx) -} - -// VisibleFlags returns a slice of the Flags with Hidden=false -func (c Command) VisibleFlags() []Flag { - return visibleFlags(c.Flags) -} diff --git a/vendor/github.com/urfave/cli/context.go b/vendor/github.com/urfave/cli/context.go deleted file mode 100644 index 3adf37e7..00000000 --- a/vendor/github.com/urfave/cli/context.go +++ /dev/null @@ -1,348 +0,0 @@ -package cli - -import ( - "errors" - "flag" - "fmt" - "os" - "reflect" - "strings" - "syscall" -) - -// Context is a type that is passed through to -// each Handler action in a cli application. Context -// can be used to retrieve context-specific Args and -// parsed command-line options. -type Context struct { - App *App - Command Command - shellComplete bool - flagSet *flag.FlagSet - setFlags map[string]bool - parentContext *Context -} - -// NewContext creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { - c := &Context{App: app, flagSet: set, parentContext: parentCtx} - - if parentCtx != nil { - c.shellComplete = parentCtx.shellComplete - } - - return c -} - -// NumFlags returns the number of flags set -func (c *Context) NumFlags() int { - return c.flagSet.NFlag() -} - -// Set sets a context flag to a value. -func (c *Context) Set(name, value string) error { - c.setFlags = nil - return c.flagSet.Set(name, value) -} - -// GlobalSet sets a context flag to a value on the global flagset -func (c *Context) GlobalSet(name, value string) error { - globalContext(c).setFlags = nil - return globalContext(c).flagSet.Set(name, value) -} - -// IsSet determines if the flag was actually set -func (c *Context) IsSet(name string) bool { - if c.setFlags == nil { - c.setFlags = make(map[string]bool) - - c.flagSet.Visit(func(f *flag.Flag) { - c.setFlags[f.Name] = true - }) - - c.flagSet.VisitAll(func(f *flag.Flag) { - if _, ok := c.setFlags[f.Name]; ok { - return - } - c.setFlags[f.Name] = false - }) - - // XXX hack to support IsSet for flags with EnvVar - // - // There isn't an easy way to do this with the current implementation since - // whether a flag was set via an environment variable is very difficult to - // determine here. Instead, we intend to introduce a backwards incompatible - // change in version 2 to add `IsSet` to the Flag interface to push the - // responsibility closer to where the information required to determine - // whether a flag is set by non-standard means such as environment - // variables is available. - // - // See https://github.com/urfave/cli/issues/294 for additional discussion - flags := c.Command.Flags - if c.Command.Name == "" { // cannot == Command{} since it contains slice types - if c.App != nil { - flags = c.App.Flags - } - } - for _, f := range flags { - eachName(f.GetName(), func(name string) { - if isSet, ok := c.setFlags[name]; isSet || !ok { - // Check if a flag is set - if isSet { - // If the flag is set, also set its other aliases - eachName(f.GetName(), func(name string) { - c.setFlags[name] = true - }) - } - - return - } - - val := reflect.ValueOf(f) - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - - filePathValue := val.FieldByName("FilePath") - if filePathValue.IsValid() { - eachName(filePathValue.String(), func(filePath string) { - if _, err := os.Stat(filePath); err == nil { - c.setFlags[name] = true - return - } - }) - } - - envVarValue := val.FieldByName("EnvVar") - if envVarValue.IsValid() { - eachName(envVarValue.String(), func(envVar string) { - envVar = strings.TrimSpace(envVar) - if _, ok := syscall.Getenv(envVar); ok { - c.setFlags[name] = true - return - } - }) - } - }) - } - } - - return c.setFlags[name] -} - -// GlobalIsSet determines if the global flag was actually set -func (c *Context) GlobalIsSet(name string) bool { - ctx := c - if ctx.parentContext != nil { - ctx = ctx.parentContext - } - - for ; ctx != nil; ctx = ctx.parentContext { - if ctx.IsSet(name) { - return true - } - } - return false -} - -// FlagNames returns a slice of flag names used in this context. -func (c *Context) FlagNames() (names []string) { - for _, f := range c.Command.Flags { - name := strings.Split(f.GetName(), ",")[0] - if name == "help" { - continue - } - names = append(names, name) - } - return -} - -// GlobalFlagNames returns a slice of global flag names used by the app. -func (c *Context) GlobalFlagNames() (names []string) { - for _, f := range c.App.Flags { - name := strings.Split(f.GetName(), ",")[0] - if name == "help" || name == "version" { - continue - } - names = append(names, name) - } - return -} - -// Parent returns the parent context, if any -func (c *Context) Parent() *Context { - return c.parentContext -} - -// value returns the value of the flag coressponding to `name` -func (c *Context) value(name string) interface{} { - return c.flagSet.Lookup(name).Value.(flag.Getter).Get() -} - -// Args contains apps console arguments -type Args []string - -// Args returns the command line arguments associated with the context. -func (c *Context) Args() Args { - args := Args(c.flagSet.Args()) - return args -} - -// NArg returns the number of the command line arguments. -func (c *Context) NArg() int { - return len(c.Args()) -} - -// Get returns the nth argument, or else a blank string -func (a Args) Get(n int) string { - if len(a) > n { - return a[n] - } - return "" -} - -// First returns the first argument, or else a blank string -func (a Args) First() string { - return a.Get(0) -} - -// Tail returns the rest of the arguments (not the first one) -// or else an empty string slice -func (a Args) Tail() []string { - if len(a) >= 2 { - return []string(a)[1:] - } - return []string{} -} - -// Present checks if there are any arguments present -func (a Args) Present() bool { - return len(a) != 0 -} - -// Swap swaps arguments at the given indexes -func (a Args) Swap(from, to int) error { - if from >= len(a) || to >= len(a) { - return errors.New("index out of range") - } - a[from], a[to] = a[to], a[from] - return nil -} - -func globalContext(ctx *Context) *Context { - if ctx == nil { - return nil - } - - for { - if ctx.parentContext == nil { - return ctx - } - ctx = ctx.parentContext - } -} - -func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { - if ctx.parentContext != nil { - ctx = ctx.parentContext - } - for ; ctx != nil; ctx = ctx.parentContext { - if f := ctx.flagSet.Lookup(name); f != nil { - return ctx.flagSet - } - } - return nil -} - -func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) { - switch ff.Value.(type) { - case *StringSlice: - default: - _ = set.Set(name, ff.Value.String()) - } -} - -func normalizeFlags(flags []Flag, set *flag.FlagSet) error { - visited := make(map[string]bool) - set.Visit(func(f *flag.Flag) { - visited[f.Name] = true - }) - for _, f := range flags { - parts := strings.Split(f.GetName(), ",") - if len(parts) == 1 { - continue - } - var ff *flag.Flag - for _, name := range parts { - name = strings.Trim(name, " ") - if visited[name] { - if ff != nil { - return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name) - } - ff = set.Lookup(name) - } - } - if ff == nil { - continue - } - for _, name := range parts { - name = strings.Trim(name, " ") - if !visited[name] { - copyFlag(name, ff, set) - } - } - } - return nil -} - -type requiredFlagsErr interface { - error - getMissingFlags() []string -} - -type errRequiredFlags struct { - missingFlags []string -} - -func (e *errRequiredFlags) Error() string { - numberOfMissingFlags := len(e.missingFlags) - if numberOfMissingFlags == 1 { - return fmt.Sprintf("Required flag %q not set", e.missingFlags[0]) - } - joinedMissingFlags := strings.Join(e.missingFlags, ", ") - return fmt.Sprintf("Required flags %q not set", joinedMissingFlags) -} - -func (e *errRequiredFlags) getMissingFlags() []string { - return e.missingFlags -} - -func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr { - var missingFlags []string - for _, f := range flags { - if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() { - var flagPresent bool - var flagName string - for _, key := range strings.Split(f.GetName(), ",") { - key = strings.TrimSpace(key) - if len(key) > 1 { - flagName = key - } - - if context.IsSet(key) { - flagPresent = true - } - } - - if !flagPresent && flagName != "" { - missingFlags = append(missingFlags, flagName) - } - } - } - - if len(missingFlags) != 0 { - return &errRequiredFlags{missingFlags: missingFlags} - } - - return nil -} diff --git a/vendor/github.com/urfave/cli/docs.go b/vendor/github.com/urfave/cli/docs.go deleted file mode 100644 index 5b945661..00000000 --- a/vendor/github.com/urfave/cli/docs.go +++ /dev/null @@ -1,148 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "io" - "sort" - "strings" - "text/template" - - "github.com/cpuguy83/go-md2man/v2/md2man" -) - -// ToMarkdown creates a markdown string for the `*App` -// The function errors if either parsing or writing of the string fails. -func (a *App) ToMarkdown() (string, error) { - var w bytes.Buffer - if err := a.writeDocTemplate(&w); err != nil { - return "", err - } - return w.String(), nil -} - -// ToMan creates a man page string for the `*App` -// The function errors if either parsing or writing of the string fails. -func (a *App) ToMan() (string, error) { - var w bytes.Buffer - if err := a.writeDocTemplate(&w); err != nil { - return "", err - } - man := md2man.Render(w.Bytes()) - return string(man), nil -} - -type cliTemplate struct { - App *App - Commands []string - GlobalArgs []string - SynopsisArgs []string -} - -func (a *App) writeDocTemplate(w io.Writer) error { - const name = "cli" - t, err := template.New(name).Parse(MarkdownDocTemplate) - if err != nil { - return err - } - return t.ExecuteTemplate(w, name, &cliTemplate{ - App: a, - Commands: prepareCommands(a.Commands, 0), - GlobalArgs: prepareArgsWithValues(a.Flags), - SynopsisArgs: prepareArgsSynopsis(a.Flags), - }) -} - -func prepareCommands(commands []Command, level int) []string { - coms := []string{} - for i := range commands { - command := &commands[i] - if command.Hidden { - continue - } - usage := "" - if command.Usage != "" { - usage = command.Usage - } - - prepared := fmt.Sprintf("%s %s\n\n%s\n", - strings.Repeat("#", level+2), - strings.Join(command.Names(), ", "), - usage, - ) - - flags := prepareArgsWithValues(command.Flags) - if len(flags) > 0 { - prepared += fmt.Sprintf("\n%s", strings.Join(flags, "\n")) - } - - coms = append(coms, prepared) - - // recursevly iterate subcommands - if len(command.Subcommands) > 0 { - coms = append( - coms, - prepareCommands(command.Subcommands, level+1)..., - ) - } - } - - return coms -} - -func prepareArgsWithValues(flags []Flag) []string { - return prepareFlags(flags, ", ", "**", "**", `""`, true) -} - -func prepareArgsSynopsis(flags []Flag) []string { - return prepareFlags(flags, "|", "[", "]", "[value]", false) -} - -func prepareFlags( - flags []Flag, - sep, opener, closer, value string, - addDetails bool, -) []string { - args := []string{} - for _, f := range flags { - flag, ok := f.(DocGenerationFlag) - if !ok { - continue - } - modifiedArg := opener - for _, s := range strings.Split(flag.GetName(), ",") { - trimmed := strings.TrimSpace(s) - if len(modifiedArg) > len(opener) { - modifiedArg += sep - } - if len(trimmed) > 1 { - modifiedArg += fmt.Sprintf("--%s", trimmed) - } else { - modifiedArg += fmt.Sprintf("-%s", trimmed) - } - } - modifiedArg += closer - if flag.TakesValue() { - modifiedArg += fmt.Sprintf("=%s", value) - } - - if addDetails { - modifiedArg += flagDetails(flag) - } - - args = append(args, modifiedArg+"\n") - - } - sort.Strings(args) - return args -} - -// flagDetails returns a string containing the flags metadata -func flagDetails(flag DocGenerationFlag) string { - description := flag.GetUsage() - value := flag.GetValue() - if value != "" { - description += " (default: " + value + ")" - } - return ": " + description -} diff --git a/vendor/github.com/urfave/cli/errors.go b/vendor/github.com/urfave/cli/errors.go deleted file mode 100644 index 562b2953..00000000 --- a/vendor/github.com/urfave/cli/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "os" - "strings" -) - -// OsExiter is the function used when the app exits. If not set defaults to os.Exit. -var OsExiter = os.Exit - -// ErrWriter is used to write errors to the user. This can be anything -// implementing the io.Writer interface and defaults to os.Stderr. -var ErrWriter io.Writer = os.Stderr - -// MultiError is an error that wraps multiple errors. -type MultiError struct { - Errors []error -} - -// NewMultiError creates a new MultiError. Pass in one or more errors. -func NewMultiError(err ...error) MultiError { - return MultiError{Errors: err} -} - -// Error implements the error interface. -func (m MultiError) Error() string { - errs := make([]string, len(m.Errors)) - for i, err := range m.Errors { - errs[i] = err.Error() - } - - return strings.Join(errs, "\n") -} - -type ErrorFormatter interface { - Format(s fmt.State, verb rune) -} - -// ExitCoder is the interface checked by `App` and `Command` for a custom exit -// code -type ExitCoder interface { - error - ExitCode() int -} - -// ExitError fulfills both the builtin `error` interface and `ExitCoder` -type ExitError struct { - exitCode int - message interface{} -} - -// NewExitError makes a new *ExitError -func NewExitError(message interface{}, exitCode int) *ExitError { - return &ExitError{ - exitCode: exitCode, - message: message, - } -} - -// Error returns the string message, fulfilling the interface required by -// `error` -func (ee *ExitError) Error() string { - return fmt.Sprintf("%v", ee.message) -} - -// ExitCode returns the exit code, fulfilling the interface required by -// `ExitCoder` -func (ee *ExitError) ExitCode() int { - return ee.exitCode -} - -// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if -// so prints the error to stderr (if it is non-empty) and calls OsExiter with the -// given exit code. If the given error is a MultiError, then this func is -// called on all members of the Errors slice and calls OsExiter with the last exit code. -func HandleExitCoder(err error) { - if err == nil { - return - } - - if exitErr, ok := err.(ExitCoder); ok { - if err.Error() != "" { - if _, ok := exitErr.(ErrorFormatter); ok { - fmt.Fprintf(ErrWriter, "%+v\n", err) - } else { - fmt.Fprintln(ErrWriter, err) - } - } - OsExiter(exitErr.ExitCode()) - return - } - - if multiErr, ok := err.(MultiError); ok { - code := handleMultiError(multiErr) - OsExiter(code) - return - } -} - -func handleMultiError(multiErr MultiError) int { - code := 1 - for _, merr := range multiErr.Errors { - if multiErr2, ok := merr.(MultiError); ok { - code = handleMultiError(multiErr2) - } else { - fmt.Fprintln(ErrWriter, merr) - if exitErr, ok := merr.(ExitCoder); ok { - code = exitErr.ExitCode() - } - } - } - return code -} diff --git a/vendor/github.com/urfave/cli/fish.go b/vendor/github.com/urfave/cli/fish.go deleted file mode 100644 index cf183af6..00000000 --- a/vendor/github.com/urfave/cli/fish.go +++ /dev/null @@ -1,194 +0,0 @@ -package cli - -import ( - "bytes" - "fmt" - "io" - "strings" - "text/template" -) - -// ToFishCompletion creates a fish completion string for the `*App` -// The function errors if either parsing or writing of the string fails. -func (a *App) ToFishCompletion() (string, error) { - var w bytes.Buffer - if err := a.writeFishCompletionTemplate(&w); err != nil { - return "", err - } - return w.String(), nil -} - -type fishCompletionTemplate struct { - App *App - Completions []string - AllCommands []string -} - -func (a *App) writeFishCompletionTemplate(w io.Writer) error { - const name = "cli" - t, err := template.New(name).Parse(FishCompletionTemplate) - if err != nil { - return err - } - allCommands := []string{} - - // Add global flags - completions := a.prepareFishFlags(a.VisibleFlags(), allCommands) - - // Add help flag - if !a.HideHelp { - completions = append( - completions, - a.prepareFishFlags([]Flag{HelpFlag}, allCommands)..., - ) - } - - // Add version flag - if !a.HideVersion { - completions = append( - completions, - a.prepareFishFlags([]Flag{VersionFlag}, allCommands)..., - ) - } - - // Add commands and their flags - completions = append( - completions, - a.prepareFishCommands(a.VisibleCommands(), &allCommands, []string{})..., - ) - - return t.ExecuteTemplate(w, name, &fishCompletionTemplate{ - App: a, - Completions: completions, - AllCommands: allCommands, - }) -} - -func (a *App) prepareFishCommands(commands []Command, allCommands *[]string, previousCommands []string) []string { - completions := []string{} - for i := range commands { - command := &commands[i] - - if command.Hidden { - continue - } - - var completion strings.Builder - completion.WriteString(fmt.Sprintf( - "complete -r -c %s -n '%s' -a '%s'", - a.Name, - a.fishSubcommandHelper(previousCommands), - strings.Join(command.Names(), " "), - )) - - if command.Usage != "" { - completion.WriteString(fmt.Sprintf(" -d '%s'", - escapeSingleQuotes(command.Usage))) - } - - if !command.HideHelp { - completions = append( - completions, - a.prepareFishFlags([]Flag{HelpFlag}, command.Names())..., - ) - } - - *allCommands = append(*allCommands, command.Names()...) - completions = append(completions, completion.String()) - completions = append( - completions, - a.prepareFishFlags(command.Flags, command.Names())..., - ) - - // recursevly iterate subcommands - if len(command.Subcommands) > 0 { - completions = append( - completions, - a.prepareFishCommands( - command.Subcommands, allCommands, command.Names(), - )..., - ) - } - } - - return completions -} - -func (a *App) prepareFishFlags(flags []Flag, previousCommands []string) []string { - completions := []string{} - for _, f := range flags { - flag, ok := f.(DocGenerationFlag) - if !ok { - continue - } - - completion := &strings.Builder{} - completion.WriteString(fmt.Sprintf( - "complete -c %s -n '%s'", - a.Name, - a.fishSubcommandHelper(previousCommands), - )) - - fishAddFileFlag(f, completion) - - for idx, opt := range strings.Split(flag.GetName(), ",") { - if idx == 0 { - completion.WriteString(fmt.Sprintf( - " -l %s", strings.TrimSpace(opt), - )) - } else { - completion.WriteString(fmt.Sprintf( - " -s %s", strings.TrimSpace(opt), - )) - - } - } - - if flag.TakesValue() { - completion.WriteString(" -r") - } - - if flag.GetUsage() != "" { - completion.WriteString(fmt.Sprintf(" -d '%s'", - escapeSingleQuotes(flag.GetUsage()))) - } - - completions = append(completions, completion.String()) - } - - return completions -} - -func fishAddFileFlag(flag Flag, completion *strings.Builder) { - switch f := flag.(type) { - case GenericFlag: - if f.TakesFile { - return - } - case StringFlag: - if f.TakesFile { - return - } - case StringSliceFlag: - if f.TakesFile { - return - } - } - completion.WriteString(" -f") -} - -func (a *App) fishSubcommandHelper(allCommands []string) string { - fishHelper := fmt.Sprintf("__fish_%s_no_subcommand", a.Name) - if len(allCommands) > 0 { - fishHelper = fmt.Sprintf( - "__fish_seen_subcommand_from %s", - strings.Join(allCommands, " "), - ) - } - return fishHelper - -} - -func escapeSingleQuotes(input string) string { - return strings.Replace(input, `'`, `\'`, -1) -} diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go deleted file mode 100644 index 1cfa1cdb..00000000 --- a/vendor/github.com/urfave/cli/flag.go +++ /dev/null @@ -1,346 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "io/ioutil" - "reflect" - "runtime" - "strconv" - "strings" - "syscall" -) - -const defaultPlaceholder = "value" - -// BashCompletionFlag enables bash-completion for all commands and subcommands -var BashCompletionFlag Flag = BoolFlag{ - Name: "generate-bash-completion", - Hidden: true, -} - -// VersionFlag prints the version for the application -var VersionFlag Flag = BoolFlag{ - Name: "version, v", - Usage: "print the version", -} - -// HelpFlag prints the help for all commands and subcommands -// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand -// unless HideHelp is set to true) -var HelpFlag Flag = BoolFlag{ - Name: "help, h", - Usage: "show help", -} - -// FlagStringer converts a flag definition to a string. This is used by help -// to display a flag. -var FlagStringer FlagStringFunc = stringifyFlag - -// FlagNamePrefixer converts a full flag name and its placeholder into the help -// message flag prefix. This is used by the default FlagStringer. -var FlagNamePrefixer FlagNamePrefixFunc = prefixedNames - -// FlagEnvHinter annotates flag help message with the environment variable -// details. This is used by the default FlagStringer. -var FlagEnvHinter FlagEnvHintFunc = withEnvHint - -// FlagFileHinter annotates flag help message with the environment variable -// details. This is used by the default FlagStringer. -var FlagFileHinter FlagFileHintFunc = withFileHint - -// FlagsByName is a slice of Flag. -type FlagsByName []Flag - -func (f FlagsByName) Len() int { - return len(f) -} - -func (f FlagsByName) Less(i, j int) bool { - return lexicographicLess(f[i].GetName(), f[j].GetName()) -} - -func (f FlagsByName) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// Flag is a common interface related to parsing flags in cli. -// For more advanced flag parsing techniques, it is recommended that -// this interface be implemented. -type Flag interface { - fmt.Stringer - // Apply Flag settings to the given flag set - Apply(*flag.FlagSet) - GetName() string -} - -// RequiredFlag is an interface that allows us to mark flags as required -// it allows flags required flags to be backwards compatible with the Flag interface -type RequiredFlag interface { - Flag - - IsRequired() bool -} - -// DocGenerationFlag is an interface that allows documentation generation for the flag -type DocGenerationFlag interface { - Flag - - // TakesValue returns true if the flag takes a value, otherwise false - TakesValue() bool - - // GetUsage returns the usage string for the flag - GetUsage() string - - // GetValue returns the flags value as string representation and an empty - // string if the flag takes no value at all. - GetValue() string -} - -// errorableFlag is an interface that allows us to return errors during apply -// it allows flags defined in this library to return errors in a fashion backwards compatible -// TODO remove in v2 and modify the existing Flag interface to return errors -type errorableFlag interface { - Flag - - ApplyWithError(*flag.FlagSet) error -} - -func flagSet(name string, flags []Flag) (*flag.FlagSet, error) { - set := flag.NewFlagSet(name, flag.ContinueOnError) - - for _, f := range flags { - //TODO remove in v2 when errorableFlag is removed - if ef, ok := f.(errorableFlag); ok { - if err := ef.ApplyWithError(set); err != nil { - return nil, err - } - } else { - f.Apply(set) - } - } - set.SetOutput(ioutil.Discard) - return set, nil -} - -func eachName(longName string, fn func(string)) { - parts := strings.Split(longName, ",") - for _, name := range parts { - name = strings.Trim(name, " ") - fn(name) - } -} - -func visibleFlags(fl []Flag) []Flag { - var visible []Flag - for _, f := range fl { - field := flagValue(f).FieldByName("Hidden") - if !field.IsValid() || !field.Bool() { - visible = append(visible, f) - } - } - return visible -} - -func prefixFor(name string) (prefix string) { - if len(name) == 1 { - prefix = "-" - } else { - prefix = "--" - } - - return -} - -// Returns the placeholder, if any, and the unquoted usage string. -func unquoteUsage(usage string) (string, string) { - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name := usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break - } - } - return "", usage -} - -func prefixedNames(fullName, placeholder string) string { - var prefixed string - parts := strings.Split(fullName, ",") - for i, name := range parts { - name = strings.Trim(name, " ") - prefixed += prefixFor(name) + name - if placeholder != "" { - prefixed += " " + placeholder - } - if i < len(parts)-1 { - prefixed += ", " - } - } - return prefixed -} - -func withEnvHint(envVar, str string) string { - envText := "" - if envVar != "" { - prefix := "$" - suffix := "" - sep := ", $" - if runtime.GOOS == "windows" { - prefix = "%" - suffix = "%" - sep = "%, %" - } - envText = " [" + prefix + strings.Join(strings.Split(envVar, ","), sep) + suffix + "]" - } - return str + envText -} - -func withFileHint(filePath, str string) string { - fileText := "" - if filePath != "" { - fileText = fmt.Sprintf(" [%s]", filePath) - } - return str + fileText -} - -func flagValue(f Flag) reflect.Value { - fv := reflect.ValueOf(f) - for fv.Kind() == reflect.Ptr { - fv = reflect.Indirect(fv) - } - return fv -} - -func stringifyFlag(f Flag) string { - fv := flagValue(f) - - switch f.(type) { - case IntSliceFlag: - return FlagFileHinter( - fv.FieldByName("FilePath").String(), - FlagEnvHinter( - fv.FieldByName("EnvVar").String(), - stringifyIntSliceFlag(f.(IntSliceFlag)), - ), - ) - case Int64SliceFlag: - return FlagFileHinter( - fv.FieldByName("FilePath").String(), - FlagEnvHinter( - fv.FieldByName("EnvVar").String(), - stringifyInt64SliceFlag(f.(Int64SliceFlag)), - ), - ) - case StringSliceFlag: - return FlagFileHinter( - fv.FieldByName("FilePath").String(), - FlagEnvHinter( - fv.FieldByName("EnvVar").String(), - stringifyStringSliceFlag(f.(StringSliceFlag)), - ), - ) - } - - placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String()) - - needsPlaceholder := false - defaultValueString := "" - - if val := fv.FieldByName("Value"); val.IsValid() { - needsPlaceholder = true - defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface()) - - if val.Kind() == reflect.String && val.String() != "" { - defaultValueString = fmt.Sprintf(" (default: %q)", val.String()) - } - } - - if defaultValueString == " (default: )" { - defaultValueString = "" - } - - if needsPlaceholder && placeholder == "" { - placeholder = defaultPlaceholder - } - - usageWithDefault := strings.TrimSpace(usage + defaultValueString) - - return FlagFileHinter( - fv.FieldByName("FilePath").String(), - FlagEnvHinter( - fv.FieldByName("EnvVar").String(), - FlagNamePrefixer(fv.FieldByName("Name").String(), placeholder)+"\t"+usageWithDefault, - ), - ) -} - -func stringifyIntSliceFlag(f IntSliceFlag) string { - var defaultVals []string - if f.Value != nil && len(f.Value.Value()) > 0 { - for _, i := range f.Value.Value() { - defaultVals = append(defaultVals, strconv.Itoa(i)) - } - } - - return stringifySliceFlag(f.Usage, f.Name, defaultVals) -} - -func stringifyInt64SliceFlag(f Int64SliceFlag) string { - var defaultVals []string - if f.Value != nil && len(f.Value.Value()) > 0 { - for _, i := range f.Value.Value() { - defaultVals = append(defaultVals, strconv.FormatInt(i, 10)) - } - } - - return stringifySliceFlag(f.Usage, f.Name, defaultVals) -} - -func stringifyStringSliceFlag(f StringSliceFlag) string { - var defaultVals []string - if f.Value != nil && len(f.Value.Value()) > 0 { - for _, s := range f.Value.Value() { - if len(s) > 0 { - defaultVals = append(defaultVals, strconv.Quote(s)) - } - } - } - - return stringifySliceFlag(f.Usage, f.Name, defaultVals) -} - -func stringifySliceFlag(usage, name string, defaultVals []string) string { - placeholder, usage := unquoteUsage(usage) - if placeholder == "" { - placeholder = defaultPlaceholder - } - - defaultVal := "" - if len(defaultVals) > 0 { - defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", ")) - } - - usageWithDefault := strings.TrimSpace(usage + defaultVal) - return FlagNamePrefixer(name, placeholder) + "\t" + usageWithDefault -} - -func flagFromFileEnv(filePath, envName string) (val string, ok bool) { - for _, envVar := range strings.Split(envName, ",") { - envVar = strings.TrimSpace(envVar) - if envVal, ok := syscall.Getenv(envVar); ok { - return envVal, true - } - } - for _, fileVar := range strings.Split(filePath, ",") { - if data, err := ioutil.ReadFile(fileVar); err == nil { - return string(data), true - } - } - return "", false -} diff --git a/vendor/github.com/urfave/cli/flag_bool.go b/vendor/github.com/urfave/cli/flag_bool.go deleted file mode 100644 index 2499b0b5..00000000 --- a/vendor/github.com/urfave/cli/flag_bool.go +++ /dev/null @@ -1,109 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" -) - -// BoolFlag is a flag with type bool -type BoolFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Destination *bool -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f BoolFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f BoolFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f BoolFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f BoolFlag) TakesValue() bool { - return false -} - -// GetUsage returns the usage string for the flag -func (f BoolFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f BoolFlag) GetValue() string { - return "" -} - -// Bool looks up the value of a local BoolFlag, returns -// false if not found -func (c *Context) Bool(name string) bool { - return lookupBool(name, c.flagSet) -} - -// GlobalBool looks up the value of a global BoolFlag, returns -// false if not found -func (c *Context) GlobalBool(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBool(name, fs) - } - return false -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f BoolFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f BoolFlag) ApplyWithError(set *flag.FlagSet) error { - val := false - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - if envVal == "" { - val = false - } else { - envValBool, err := strconv.ParseBool(envVal) - if err != nil { - return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) - } - val = envValBool - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } - set.Bool(name, val, f.Usage) - }) - - return nil -} - -func lookupBool(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return parsed - } - return false -} diff --git a/vendor/github.com/urfave/cli/flag_bool_t.go b/vendor/github.com/urfave/cli/flag_bool_t.go deleted file mode 100644 index cd0888fa..00000000 --- a/vendor/github.com/urfave/cli/flag_bool_t.go +++ /dev/null @@ -1,110 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" -) - -// BoolTFlag is a flag with type bool that is true by default -type BoolTFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Destination *bool -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f BoolTFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f BoolTFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f BoolTFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f BoolTFlag) TakesValue() bool { - return false -} - -// GetUsage returns the usage string for the flag -func (f BoolTFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f BoolTFlag) GetValue() string { - return "" -} - -// BoolT looks up the value of a local BoolTFlag, returns -// false if not found -func (c *Context) BoolT(name string) bool { - return lookupBoolT(name, c.flagSet) -} - -// GlobalBoolT looks up the value of a global BoolTFlag, returns -// false if not found -func (c *Context) GlobalBoolT(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBoolT(name, fs) - } - return false -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f BoolTFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f BoolTFlag) ApplyWithError(set *flag.FlagSet) error { - val := true - - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - if envVal == "" { - val = false - } else { - envValBool, err := strconv.ParseBool(envVal) - if err != nil { - return fmt.Errorf("could not parse %s as bool value for flag %s: %s", envVal, f.Name, err) - } - val = envValBool - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } - set.Bool(name, val, f.Usage) - }) - - return nil -} - -func lookupBoolT(name string, set *flag.FlagSet) bool { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseBool(f.Value.String()) - if err != nil { - return false - } - return parsed - } - return false -} diff --git a/vendor/github.com/urfave/cli/flag_duration.go b/vendor/github.com/urfave/cli/flag_duration.go deleted file mode 100644 index df4ade58..00000000 --- a/vendor/github.com/urfave/cli/flag_duration.go +++ /dev/null @@ -1,106 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "time" -) - -// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration) -type DurationFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value time.Duration - Destination *time.Duration -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f DurationFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f DurationFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f DurationFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f DurationFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f DurationFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f DurationFlag) GetValue() string { - return f.Value.String() -} - -// Duration looks up the value of a local DurationFlag, returns -// 0 if not found -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - -// GlobalDuration looks up the value of a global DurationFlag, returns -// 0 if not found -func (c *Context) GlobalDuration(name string) time.Duration { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupDuration(name, fs) - } - return 0 -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f DurationFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f DurationFlag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - envValDuration, err := time.ParseDuration(envVal) - if err != nil { - return fmt.Errorf("could not parse %s as duration for flag %s: %s", envVal, f.Name, err) - } - - f.Value = envValDuration - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.DurationVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Duration(name, f.Value, f.Usage) - }) - - return nil -} - -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - parsed, err := time.ParseDuration(f.Value.String()) - if err != nil { - return 0 - } - return parsed - } - return 0 -} diff --git a/vendor/github.com/urfave/cli/flag_float64.go b/vendor/github.com/urfave/cli/flag_float64.go deleted file mode 100644 index 65398d3b..00000000 --- a/vendor/github.com/urfave/cli/flag_float64.go +++ /dev/null @@ -1,106 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" -) - -// Float64Flag is a flag with type float64 -type Float64Flag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value float64 - Destination *float64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Float64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Float64Flag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f Float64Flag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f Float64Flag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f Float64Flag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f Float64Flag) GetValue() string { - return fmt.Sprintf("%f", f.Value) -} - -// Float64 looks up the value of a local Float64Flag, returns -// 0 if not found -func (c *Context) Float64(name string) float64 { - return lookupFloat64(name, c.flagSet) -} - -// GlobalFloat64 looks up the value of a global Float64Flag, returns -// 0 if not found -func (c *Context) GlobalFloat64(name string) float64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupFloat64(name, fs) - } - return 0 -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Float64Flag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Float64Flag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err != nil { - return fmt.Errorf("could not parse %s as float64 value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = envValFloat - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Float64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Float64(name, f.Value, f.Usage) - }) - - return nil -} - -func lookupFloat64(name string, set *flag.FlagSet) float64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseFloat(f.Value.String(), 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} diff --git a/vendor/github.com/urfave/cli/flag_generic.go b/vendor/github.com/urfave/cli/flag_generic.go deleted file mode 100644 index c43dae7d..00000000 --- a/vendor/github.com/urfave/cli/flag_generic.go +++ /dev/null @@ -1,110 +0,0 @@ -package cli - -import ( - "flag" - "fmt" -) - -// Generic is a generic parseable type identified by a specific flag -type Generic interface { - Set(value string) error - String() string -} - -// GenericFlag is a flag with type Generic -type GenericFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - TakesFile bool - Value Generic -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f GenericFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f GenericFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f GenericFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f GenericFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f GenericFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f GenericFlag) GetValue() string { - if f.Value != nil { - return f.Value.String() - } - return "" -} - -// Apply takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -// Ignores parsing errors -func (f GenericFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag -func (f GenericFlag) ApplyWithError(set *flag.FlagSet) error { - val := f.Value - if fileEnvVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - if err := val.Set(fileEnvVal); err != nil { - return fmt.Errorf("could not parse %s as value for flag %s: %s", fileEnvVal, f.Name, err) - } - } - - eachName(f.Name, func(name string) { - set.Var(f.Value, name, f.Usage) - }) - - return nil -} - -// Generic looks up the value of a local GenericFlag, returns -// nil if not found -func (c *Context) Generic(name string) interface{} { - return lookupGeneric(name, c.flagSet) -} - -// GlobalGeneric looks up the value of a global GenericFlag, returns -// nil if not found -func (c *Context) GlobalGeneric(name string) interface{} { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupGeneric(name, fs) - } - return nil -} - -func lookupGeneric(name string, set *flag.FlagSet) interface{} { - f := set.Lookup(name) - if f != nil { - parsed, err := f.Value, error(nil) - if err != nil { - return nil - } - return parsed - } - return nil -} diff --git a/vendor/github.com/urfave/cli/flag_int.go b/vendor/github.com/urfave/cli/flag_int.go deleted file mode 100644 index bae32e28..00000000 --- a/vendor/github.com/urfave/cli/flag_int.go +++ /dev/null @@ -1,105 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" -) - -// IntFlag is a flag with type int -type IntFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value int - Destination *int -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f IntFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f IntFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f IntFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f IntFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f IntFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f IntFlag) GetValue() string { - return fmt.Sprintf("%d", f.Value) -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f IntFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f IntFlag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) - } - f.Value = int(envValInt) - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.IntVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Int(name, f.Value, f.Usage) - }) - - return nil -} - -// Int looks up the value of a local IntFlag, returns -// 0 if not found -func (c *Context) Int(name string) int { - return lookupInt(name, c.flagSet) -} - -// GlobalInt looks up the value of a global IntFlag, returns -// 0 if not found -func (c *Context) GlobalInt(name string) int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt(name, fs) - } - return 0 -} - -func lookupInt(name string, set *flag.FlagSet) int { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return int(parsed) - } - return 0 -} diff --git a/vendor/github.com/urfave/cli/flag_int64.go b/vendor/github.com/urfave/cli/flag_int64.go deleted file mode 100644 index aaafbe9d..00000000 --- a/vendor/github.com/urfave/cli/flag_int64.go +++ /dev/null @@ -1,106 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" -) - -// Int64Flag is a flag with type int64 -type Int64Flag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value int64 - Destination *int64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Int64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Int64Flag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f Int64Flag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f Int64Flag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f Int64Flag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f Int64Flag) GetValue() string { - return fmt.Sprintf("%d", f.Value) -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Int64Flag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Int64Flag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as int value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = envValInt - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Int64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Int64(name, f.Value, f.Usage) - }) - - return nil -} - -// Int64 looks up the value of a local Int64Flag, returns -// 0 if not found -func (c *Context) Int64(name string) int64 { - return lookupInt64(name, c.flagSet) -} - -// GlobalInt64 looks up the value of a global Int64Flag, returns -// 0 if not found -func (c *Context) GlobalInt64(name string) int64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt64(name, fs) - } - return 0 -} - -func lookupInt64(name string, set *flag.FlagSet) int64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseInt(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} diff --git a/vendor/github.com/urfave/cli/flag_int64_slice.go b/vendor/github.com/urfave/cli/flag_int64_slice.go deleted file mode 100644 index 80772e7c..00000000 --- a/vendor/github.com/urfave/cli/flag_int64_slice.go +++ /dev/null @@ -1,199 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" - "strings" -) - -// Int64Slice is an opaque type for []int to satisfy flag.Value and flag.Getter -type Int64Slice []int64 - -// Set parses the value into an integer and appends it to the list of values -func (f *Int64Slice) Set(value string) error { - tmp, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return err - } - *f = append(*f, tmp) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *Int64Slice) String() string { - slice := make([]string, len(*f)) - for i, v := range *f { - slice[i] = strconv.FormatInt(v, 10) - } - - return strings.Join(slice, ",") -} - -// Value returns the slice of ints set by this flag -func (f *Int64Slice) Value() []int64 { - return *f -} - -// Get returns the slice of ints set by this flag -func (f *Int64Slice) Get() interface{} { - return *f -} - -// Int64SliceFlag is a flag with type *Int64Slice -type Int64SliceFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value *Int64Slice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Int64SliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Int64SliceFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f Int64SliceFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f Int64SliceFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f Int64SliceFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f Int64SliceFlag) GetValue() string { - if f.Value != nil { - return f.Value.String() - } - return "" -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Int64SliceFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Int64SliceFlag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - newVal := &Int64Slice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - if err := newVal.Set(s); err != nil { - return fmt.Errorf("could not parse %s as int64 slice value for flag %s: %s", envVal, f.Name, err) - } - } - if f.Value == nil { - f.Value = newVal - } else { - *f.Value = *newVal - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &Int64Slice{} - } - set.Var(f.Value, name, f.Usage) - }) - - return nil -} - -// Int64Slice looks up the value of a local Int64SliceFlag, returns -// nil if not found -func (c *Context) Int64Slice(name string) []int64 { - return lookupInt64Slice(name, c.flagSet) -} - -// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns -// nil if not found -func (c *Context) GlobalInt64Slice(name string) []int64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt64Slice(name, fs) - } - return nil -} - -func lookupInt64Slice(name string, set *flag.FlagSet) []int64 { - f := set.Lookup(name) - if f != nil { - value, ok := f.Value.(*Int64Slice) - if !ok { - return nil - } - - // extract the slice from asserted value - parsed := value.Value() - - // extract default value from the flag - var defaultVal []int64 - for _, v := range strings.Split(f.DefValue, ",") { - if v != "" { - int64Value, err := strconv.ParseInt(v, 10, 64) - if err != nil { - panic(err) - } - defaultVal = append(defaultVal, int64Value) - } - } - // if the current value is not equal to the default value - // remove the default values from the flag - if !isInt64SliceEqual(parsed, defaultVal) { - for _, v := range defaultVal { - parsed = removeFromInt64Slice(parsed, v) - } - } - return parsed - } - return nil -} - -func removeFromInt64Slice(slice []int64, val int64) []int64 { - for i, v := range slice { - if v == val { - ret := append([]int64{}, slice[:i]...) - ret = append(ret, slice[i+1:]...) - return ret - } - } - return slice -} - -func isInt64SliceEqual(newValue, defaultValue []int64) bool { - // If one is nil, the other must also be nil. - if (newValue == nil) != (defaultValue == nil) { - return false - } - - if len(newValue) != len(defaultValue) { - return false - } - - for i, v := range newValue { - if v != defaultValue[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/urfave/cli/flag_int_slice.go b/vendor/github.com/urfave/cli/flag_int_slice.go deleted file mode 100644 index af6d582d..00000000 --- a/vendor/github.com/urfave/cli/flag_int_slice.go +++ /dev/null @@ -1,198 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" - "strings" -) - -// IntSlice is an opaque type for []int to satisfy flag.Value and flag.Getter -type IntSlice []int - -// Set parses the value into an integer and appends it to the list of values -func (f *IntSlice) Set(value string) error { - tmp, err := strconv.Atoi(value) - if err != nil { - return err - } - *f = append(*f, tmp) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *IntSlice) String() string { - slice := make([]string, len(*f)) - for i, v := range *f { - slice[i] = strconv.Itoa(v) - } - - return strings.Join(slice, ",") -} - -// Value returns the slice of ints set by this flag -func (f *IntSlice) Value() []int { - return *f -} - -// Get returns the slice of ints set by this flag -func (f *IntSlice) Get() interface{} { - return *f -} - -// IntSliceFlag is a flag with type *IntSlice -type IntSliceFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value *IntSlice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f IntSliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f IntSliceFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f IntSliceFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f IntSliceFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f IntSliceFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f IntSliceFlag) GetValue() string { - if f.Value != nil { - return f.Value.String() - } - return "" -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f IntSliceFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f IntSliceFlag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - if err := newVal.Set(s); err != nil { - return fmt.Errorf("could not parse %s as int slice value for flag %s: %s", envVal, f.Name, err) - } - } - if f.Value == nil { - f.Value = newVal - } else { - *f.Value = *newVal - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &IntSlice{} - } - set.Var(f.Value, name, f.Usage) - }) - - return nil -} - -// IntSlice looks up the value of a local IntSliceFlag, returns -// nil if not found -func (c *Context) IntSlice(name string) []int { - return lookupIntSlice(name, c.flagSet) -} - -// GlobalIntSlice looks up the value of a global IntSliceFlag, returns -// nil if not found -func (c *Context) GlobalIntSlice(name string) []int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupIntSlice(name, fs) - } - return nil -} - -func lookupIntSlice(name string, set *flag.FlagSet) []int { - f := set.Lookup(name) - if f != nil { - value, ok := f.Value.(*IntSlice) - if !ok { - return nil - } - // extract the slice from asserted value - slice := value.Value() - - // extract default value from the flag - var defaultVal []int - for _, v := range strings.Split(f.DefValue, ",") { - if v != "" { - intValue, err := strconv.Atoi(v) - if err != nil { - panic(err) - } - defaultVal = append(defaultVal, intValue) - } - } - // if the current value is not equal to the default value - // remove the default values from the flag - if !isIntSliceEqual(slice, defaultVal) { - for _, v := range defaultVal { - slice = removeFromIntSlice(slice, v) - } - } - return slice - } - return nil -} - -func removeFromIntSlice(slice []int, val int) []int { - for i, v := range slice { - if v == val { - ret := append([]int{}, slice[:i]...) - ret = append(ret, slice[i+1:]...) - return ret - } - } - return slice -} - -func isIntSliceEqual(newValue, defaultValue []int) bool { - // If one is nil, the other must also be nil. - if (newValue == nil) != (defaultValue == nil) { - return false - } - - if len(newValue) != len(defaultValue) { - return false - } - - for i, v := range newValue { - if v != defaultValue[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/urfave/cli/flag_string.go b/vendor/github.com/urfave/cli/flag_string.go deleted file mode 100644 index 9f29da40..00000000 --- a/vendor/github.com/urfave/cli/flag_string.go +++ /dev/null @@ -1,98 +0,0 @@ -package cli - -import "flag" - -// StringFlag is a flag with type string -type StringFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - TakesFile bool - Value string - Destination *string -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f StringFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f StringFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f StringFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f StringFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f StringFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f StringFlag) GetValue() string { - return f.Value -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f StringFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f StringFlag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - f.Value = envVal - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.StringVar(f.Destination, name, f.Value, f.Usage) - return - } - set.String(name, f.Value, f.Usage) - }) - - return nil -} - -// String looks up the value of a local StringFlag, returns -// "" if not found -func (c *Context) String(name string) string { - return lookupString(name, c.flagSet) -} - -// GlobalString looks up the value of a global StringFlag, returns -// "" if not found -func (c *Context) GlobalString(name string) string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupString(name, fs) - } - return "" -} - -func lookupString(name string, set *flag.FlagSet) string { - f := set.Lookup(name) - if f != nil { - parsed, err := f.Value.String(), error(nil) - if err != nil { - return "" - } - return parsed - } - return "" -} diff --git a/vendor/github.com/urfave/cli/flag_string_slice.go b/vendor/github.com/urfave/cli/flag_string_slice.go deleted file mode 100644 index a7c71e9d..00000000 --- a/vendor/github.com/urfave/cli/flag_string_slice.go +++ /dev/null @@ -1,184 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strings" -) - -// StringSlice is an opaque type for []string to satisfy flag.Value and flag.Getter -type StringSlice []string - -// Set appends the string value to the list of values -func (f *StringSlice) Set(value string) error { - *f = append(*f, value) - return nil -} - -// String returns a readable representation of this value (for usage defaults) -func (f *StringSlice) String() string { - return strings.Join(*f, ",") -} - -// Value returns the slice of strings set by this flag -func (f *StringSlice) Value() []string { - return *f -} - -// Get returns the slice of strings set by this flag -func (f *StringSlice) Get() interface{} { - return *f -} - -// StringSliceFlag is a flag with type *StringSlice -type StringSliceFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - TakesFile bool - Value *StringSlice -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f StringSliceFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f StringSliceFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f StringSliceFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f StringSliceFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f StringSliceFlag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f StringSliceFlag) GetValue() string { - if f.Value != nil { - return f.Value.String() - } - return "" -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f StringSliceFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f StringSliceFlag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - if err := newVal.Set(s); err != nil { - return fmt.Errorf("could not parse %s as string value for flag %s: %s", envVal, f.Name, err) - } - } - if f.Value == nil { - f.Value = newVal - } else { - *f.Value = *newVal - } - } - - eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &StringSlice{} - } - set.Var(f.Value, name, f.Usage) - }) - - return nil -} - -// StringSlice looks up the value of a local StringSliceFlag, returns -// nil if not found -func (c *Context) StringSlice(name string) []string { - return lookupStringSlice(name, c.flagSet) -} - -// GlobalStringSlice looks up the value of a global StringSliceFlag, returns -// nil if not found -func (c *Context) GlobalStringSlice(name string) []string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupStringSlice(name, fs) - } - return nil -} - -func lookupStringSlice(name string, set *flag.FlagSet) []string { - f := set.Lookup(name) - if f != nil { - value, ok := f.Value.(*StringSlice) - if !ok { - return nil - } - // extract the slice from asserted value - slice := value.Value() - - // extract default value from the flag - var defaultVal []string - for _, v := range strings.Split(f.DefValue, ",") { - defaultVal = append(defaultVal, v) - } - - // if the current value is not equal to the default value - // remove the default values from the flag - if !isStringSliceEqual(slice, defaultVal) { - for _, v := range defaultVal { - slice = removeFromStringSlice(slice, v) - } - } - return slice - } - return nil -} - -func removeFromStringSlice(slice []string, val string) []string { - for i, v := range slice { - if v == val { - ret := append([]string{}, slice[:i]...) - ret = append(ret, slice[i+1:]...) - return ret - } - } - return slice -} - -func isStringSliceEqual(newValue, defaultValue []string) bool { - // If one is nil, the other must also be nil. - if (newValue == nil) != (defaultValue == nil) { - return false - } - - if len(newValue) != len(defaultValue) { - return false - } - - for i, v := range newValue { - if v != defaultValue[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/urfave/cli/flag_uint.go b/vendor/github.com/urfave/cli/flag_uint.go deleted file mode 100644 index d6a04f40..00000000 --- a/vendor/github.com/urfave/cli/flag_uint.go +++ /dev/null @@ -1,106 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" -) - -// UintFlag is a flag with type uint -type UintFlag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value uint - Destination *uint -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f UintFlag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f UintFlag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f UintFlag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f UintFlag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f UintFlag) GetUsage() string { - return f.Usage -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f UintFlag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f UintFlag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - envValInt, err := strconv.ParseUint(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as uint value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = uint(envValInt) - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.UintVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Uint(name, f.Value, f.Usage) - }) - - return nil -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f UintFlag) GetValue() string { - return fmt.Sprintf("%d", f.Value) -} - -// Uint looks up the value of a local UintFlag, returns -// 0 if not found -func (c *Context) Uint(name string) uint { - return lookupUint(name, c.flagSet) -} - -// GlobalUint looks up the value of a global UintFlag, returns -// 0 if not found -func (c *Context) GlobalUint(name string) uint { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupUint(name, fs) - } - return 0 -} - -func lookupUint(name string, set *flag.FlagSet) uint { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return uint(parsed) - } - return 0 -} diff --git a/vendor/github.com/urfave/cli/flag_uint64.go b/vendor/github.com/urfave/cli/flag_uint64.go deleted file mode 100644 index ea6493a8..00000000 --- a/vendor/github.com/urfave/cli/flag_uint64.go +++ /dev/null @@ -1,106 +0,0 @@ -package cli - -import ( - "flag" - "fmt" - "strconv" -) - -// Uint64Flag is a flag with type uint64 -type Uint64Flag struct { - Name string - Usage string - EnvVar string - FilePath string - Required bool - Hidden bool - Value uint64 - Destination *uint64 -} - -// String returns a readable representation of this value -// (for usage defaults) -func (f Uint64Flag) String() string { - return FlagStringer(f) -} - -// GetName returns the name of the flag -func (f Uint64Flag) GetName() string { - return f.Name -} - -// IsRequired returns whether or not the flag is required -func (f Uint64Flag) IsRequired() bool { - return f.Required -} - -// TakesValue returns true of the flag takes a value, otherwise false -func (f Uint64Flag) TakesValue() bool { - return true -} - -// GetUsage returns the usage string for the flag -func (f Uint64Flag) GetUsage() string { - return f.Usage -} - -// GetValue returns the flags value as string representation and an empty -// string if the flag takes no value at all. -func (f Uint64Flag) GetValue() string { - return fmt.Sprintf("%d", f.Value) -} - -// Apply populates the flag given the flag set and environment -// Ignores errors -func (f Uint64Flag) Apply(set *flag.FlagSet) { - _ = f.ApplyWithError(set) -} - -// ApplyWithError populates the flag given the flag set and environment -func (f Uint64Flag) ApplyWithError(set *flag.FlagSet) error { - if envVal, ok := flagFromFileEnv(f.FilePath, f.EnvVar); ok { - envValInt, err := strconv.ParseUint(envVal, 0, 64) - if err != nil { - return fmt.Errorf("could not parse %s as uint64 value for flag %s: %s", envVal, f.Name, err) - } - - f.Value = envValInt - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Uint64Var(f.Destination, name, f.Value, f.Usage) - return - } - set.Uint64(name, f.Value, f.Usage) - }) - - return nil -} - -// Uint64 looks up the value of a local Uint64Flag, returns -// 0 if not found -func (c *Context) Uint64(name string) uint64 { - return lookupUint64(name, c.flagSet) -} - -// GlobalUint64 looks up the value of a global Uint64Flag, returns -// 0 if not found -func (c *Context) GlobalUint64(name string) uint64 { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupUint64(name, fs) - } - return 0 -} - -func lookupUint64(name string, set *flag.FlagSet) uint64 { - f := set.Lookup(name) - if f != nil { - parsed, err := strconv.ParseUint(f.Value.String(), 0, 64) - if err != nil { - return 0 - } - return parsed - } - return 0 -} diff --git a/vendor/github.com/urfave/cli/funcs.go b/vendor/github.com/urfave/cli/funcs.go deleted file mode 100644 index 0036b113..00000000 --- a/vendor/github.com/urfave/cli/funcs.go +++ /dev/null @@ -1,44 +0,0 @@ -package cli - -// BashCompleteFunc is an action to execute when the bash-completion flag is set -type BashCompleteFunc func(*Context) - -// BeforeFunc is an action to execute before any subcommands are run, but after -// the context is ready if a non-nil error is returned, no subcommands are run -type BeforeFunc func(*Context) error - -// AfterFunc is an action to execute after any subcommands are run, but after the -// subcommand has finished it is run even if Action() panics -type AfterFunc func(*Context) error - -// ActionFunc is the action to execute when no subcommands are specified -type ActionFunc func(*Context) error - -// CommandNotFoundFunc is executed if the proper command cannot be found -type CommandNotFoundFunc func(*Context, string) - -// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying -// customized usage error messages. This function is able to replace the -// original error messages. If this function is not set, the "Incorrect usage" -// is displayed and the execution is interrupted. -type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error - -// ExitErrHandlerFunc is executed if provided in order to handle ExitError values -// returned by Actions and Before/After functions. -type ExitErrHandlerFunc func(context *Context, err error) - -// FlagStringFunc is used by the help generation to display a flag, which is -// expected to be a single line. -type FlagStringFunc func(Flag) string - -// FlagNamePrefixFunc is used by the default FlagStringFunc to create prefix -// text for a flag's full name. -type FlagNamePrefixFunc func(fullName, placeholder string) string - -// FlagEnvHintFunc is used by the default FlagStringFunc to annotate flag help -// with the environment variable details. -type FlagEnvHintFunc func(envVar, str string) string - -// FlagFileHintFunc is used by the default FlagStringFunc to annotate flag help -// with the file path details. -type FlagFileHintFunc func(filePath, str string) string diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go deleted file mode 100644 index 2280e338..00000000 --- a/vendor/github.com/urfave/cli/help.go +++ /dev/null @@ -1,363 +0,0 @@ -package cli - -import ( - "fmt" - "io" - "os" - "strings" - "text/tabwriter" - "text/template" - "unicode/utf8" -) - -var helpCommand = Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "Shows a list of commands or help for one command", - ArgsUsage: "[command]", - Action: func(c *Context) error { - args := c.Args() - if args.Present() { - return ShowCommandHelp(c, args.First()) - } - - _ = ShowAppHelp(c) - return nil - }, -} - -var helpSubcommand = Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "Shows a list of commands or help for one command", - ArgsUsage: "[command]", - Action: func(c *Context) error { - args := c.Args() - if args.Present() { - return ShowCommandHelp(c, args.First()) - } - - return ShowSubcommandHelp(c) - }, -} - -// Prints help for the App or Command -type helpPrinter func(w io.Writer, templ string, data interface{}) - -// Prints help for the App or Command with custom template function. -type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) - -// HelpPrinter is a function that writes the help output. If not set explicitly, -// this calls HelpPrinterCustom using only the default template functions. -// -// If custom logic for printing help is required, this function can be -// overridden. If the ExtraInfo field is defined on an App, this function -// should not be modified, as HelpPrinterCustom will be used directly in order -// to capture the extra information. -var HelpPrinter helpPrinter = printHelp - -// HelpPrinterCustom is a function that writes the help output. It is used as -// the default implementation of HelpPrinter, and may be called directly if -// the ExtraInfo field is set on an App. -var HelpPrinterCustom helpPrinterCustom = printHelpCustom - -// VersionPrinter prints the version for the App -var VersionPrinter = printVersion - -// ShowAppHelpAndExit - Prints the list of subcommands for the app and exits with exit code. -func ShowAppHelpAndExit(c *Context, exitCode int) { - _ = ShowAppHelp(c) - os.Exit(exitCode) -} - -// ShowAppHelp is an action that displays the help. -func ShowAppHelp(c *Context) error { - template := c.App.CustomAppHelpTemplate - if template == "" { - template = AppHelpTemplate - } - - if c.App.ExtraInfo == nil { - HelpPrinter(c.App.Writer, template, c.App) - return nil - } - - customAppData := func() map[string]interface{} { - return map[string]interface{}{ - "ExtraInfo": c.App.ExtraInfo, - } - } - HelpPrinterCustom(c.App.Writer, template, c.App, customAppData()) - - return nil -} - -// DefaultAppComplete prints the list of subcommands as the default app completion method -func DefaultAppComplete(c *Context) { - DefaultCompleteWithFlags(nil)(c) -} - -func printCommandSuggestions(commands []Command, writer io.Writer) { - for _, command := range commands { - if command.Hidden { - continue - } - if os.Getenv("_CLI_ZSH_AUTOCOMPLETE_HACK") == "1" { - for _, name := range command.Names() { - _, _ = fmt.Fprintf(writer, "%s:%s\n", name, command.Usage) - } - } else { - for _, name := range command.Names() { - _, _ = fmt.Fprintf(writer, "%s\n", name) - } - } - } -} - -func cliArgContains(flagName string) bool { - for _, name := range strings.Split(flagName, ",") { - name = strings.TrimSpace(name) - count := utf8.RuneCountInString(name) - if count > 2 { - count = 2 - } - flag := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) - for _, a := range os.Args { - if a == flag { - return true - } - } - } - return false -} - -func printFlagSuggestions(lastArg string, flags []Flag, writer io.Writer) { - cur := strings.TrimPrefix(lastArg, "-") - cur = strings.TrimPrefix(cur, "-") - for _, flag := range flags { - if bflag, ok := flag.(BoolFlag); ok && bflag.Hidden { - continue - } - for _, name := range strings.Split(flag.GetName(), ",") { - name = strings.TrimSpace(name) - // this will get total count utf8 letters in flag name - count := utf8.RuneCountInString(name) - if count > 2 { - count = 2 // resuse this count to generate single - or -- in flag completion - } - // if flag name has more than one utf8 letter and last argument in cli has -- prefix then - // skip flag completion for short flags example -v or -x - if strings.HasPrefix(lastArg, "--") && count == 1 { - continue - } - // match if last argument matches this flag and it is not repeated - if strings.HasPrefix(name, cur) && cur != name && !cliArgContains(flag.GetName()) { - flagCompletion := fmt.Sprintf("%s%s", strings.Repeat("-", count), name) - _, _ = fmt.Fprintln(writer, flagCompletion) - } - } - } -} - -func DefaultCompleteWithFlags(cmd *Command) func(c *Context) { - return func(c *Context) { - if len(os.Args) > 2 { - lastArg := os.Args[len(os.Args)-2] - if strings.HasPrefix(lastArg, "-") { - printFlagSuggestions(lastArg, c.App.Flags, c.App.Writer) - if cmd != nil { - printFlagSuggestions(lastArg, cmd.Flags, c.App.Writer) - } - return - } - } - if cmd != nil { - printCommandSuggestions(cmd.Subcommands, c.App.Writer) - } else { - printCommandSuggestions(c.App.Commands, c.App.Writer) - } - } -} - -// ShowCommandHelpAndExit - exits with code after showing help -func ShowCommandHelpAndExit(c *Context, command string, code int) { - _ = ShowCommandHelp(c, command) - os.Exit(code) -} - -// ShowCommandHelp prints help for the given command -func ShowCommandHelp(ctx *Context, command string) error { - // show the subcommand help for a command with subcommands - if command == "" { - HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) - return nil - } - - for _, c := range ctx.App.Commands { - if c.HasName(command) { - templ := c.CustomHelpTemplate - if templ == "" { - templ = CommandHelpTemplate - } - - HelpPrinter(ctx.App.Writer, templ, c) - - return nil - } - } - - if ctx.App.CommandNotFound == nil { - return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3) - } - - ctx.App.CommandNotFound(ctx, command) - return nil -} - -// ShowSubcommandHelp prints help for the given subcommand -func ShowSubcommandHelp(c *Context) error { - return ShowCommandHelp(c, c.Command.Name) -} - -// ShowVersion prints the version number of the App -func ShowVersion(c *Context) { - VersionPrinter(c) -} - -func printVersion(c *Context) { - _, _ = fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) -} - -// ShowCompletions prints the lists of commands within a given context -func ShowCompletions(c *Context) { - a := c.App - if a != nil && a.BashComplete != nil { - a.BashComplete(c) - } -} - -// ShowCommandCompletions prints the custom completions for a given command -func ShowCommandCompletions(ctx *Context, command string) { - c := ctx.App.Command(command) - if c != nil { - if c.BashComplete != nil { - c.BashComplete(ctx) - } else { - DefaultCompleteWithFlags(c)(ctx) - } - } - -} - -// printHelpCustom is the default implementation of HelpPrinterCustom. -// -// The customFuncs map will be combined with a default template.FuncMap to -// allow using arbitrary functions in template rendering. -func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { - funcMap := template.FuncMap{ - "join": strings.Join, - } - for key, value := range customFuncs { - funcMap[key] = value - } - - w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0) - t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) - err := t.Execute(w, data) - if err != nil { - // If the writer is closed, t.Execute will fail, and there's nothing - // we can do to recover. - if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" { - _, _ = fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err) - } - return - } - _ = w.Flush() -} - -func printHelp(out io.Writer, templ string, data interface{}) { - HelpPrinterCustom(out, templ, data, nil) -} - -func checkVersion(c *Context) bool { - found := false - if VersionFlag.GetName() != "" { - eachName(VersionFlag.GetName(), func(name string) { - if c.GlobalBool(name) || c.Bool(name) { - found = true - } - }) - } - return found -} - -func checkHelp(c *Context) bool { - found := false - if HelpFlag.GetName() != "" { - eachName(HelpFlag.GetName(), func(name string) { - if c.GlobalBool(name) || c.Bool(name) { - found = true - } - }) - } - return found -} - -func checkCommandHelp(c *Context, name string) bool { - if c.Bool("h") || c.Bool("help") { - _ = ShowCommandHelp(c, name) - return true - } - - return false -} - -func checkSubcommandHelp(c *Context) bool { - if c.Bool("h") || c.Bool("help") { - _ = ShowSubcommandHelp(c) - return true - } - - return false -} - -func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { - if !a.EnableBashCompletion { - return false, arguments - } - - pos := len(arguments) - 1 - lastArg := arguments[pos] - - if lastArg != "--"+BashCompletionFlag.GetName() { - return false, arguments - } - - return true, arguments[:pos] -} - -func checkCompletions(c *Context) bool { - if !c.shellComplete { - return false - } - - if args := c.Args(); args.Present() { - name := args.First() - if cmd := c.App.Command(name); cmd != nil { - // let the command handle the completion - return false - } - } - - ShowCompletions(c) - return true -} - -func checkCommandCompletions(c *Context, name string) bool { - if !c.shellComplete { - return false - } - - ShowCommandCompletions(c, name) - return true -} diff --git a/vendor/github.com/urfave/cli/parse.go b/vendor/github.com/urfave/cli/parse.go deleted file mode 100644 index 7df17296..00000000 --- a/vendor/github.com/urfave/cli/parse.go +++ /dev/null @@ -1,94 +0,0 @@ -package cli - -import ( - "flag" - "strings" -) - -type iterativeParser interface { - newFlagSet() (*flag.FlagSet, error) - useShortOptionHandling() bool -} - -// To enable short-option handling (e.g., "-it" vs "-i -t") we have to -// iteratively catch parsing errors. This way we achieve LR parsing without -// transforming any arguments. Otherwise, there is no way we can discriminate -// combined short options from common arguments that should be left untouched. -// Pass `shellComplete` to continue parsing options on failure during shell -// completion when, the user-supplied options may be incomplete. -func parseIter(set *flag.FlagSet, ip iterativeParser, args []string, shellComplete bool) error { - for { - err := set.Parse(args) - if !ip.useShortOptionHandling() || err == nil { - if shellComplete { - return nil - } - return err - } - - errStr := err.Error() - trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: -") - if errStr == trimmed { - return err - } - - // regenerate the initial args with the split short opts - argsWereSplit := false - for i, arg := range args { - // skip args that are not part of the error message - if name := strings.TrimLeft(arg, "-"); name != trimmed { - continue - } - - // if we can't split, the error was accurate - shortOpts := splitShortOptions(set, arg) - if len(shortOpts) == 1 { - return err - } - - // swap current argument with the split version - args = append(args[:i], append(shortOpts, args[i+1:]...)...) - argsWereSplit = true - break - } - - // This should be an impossible to reach code path, but in case the arg - // splitting failed to happen, this will prevent infinite loops - if !argsWereSplit { - return err - } - - // Since custom parsing failed, replace the flag set before retrying - newSet, err := ip.newFlagSet() - if err != nil { - return err - } - *set = *newSet - } -} - -func splitShortOptions(set *flag.FlagSet, arg string) []string { - shortFlagsExist := func(s string) bool { - for _, c := range s[1:] { - if f := set.Lookup(string(c)); f == nil { - return false - } - } - return true - } - - if !isSplittable(arg) || !shortFlagsExist(arg) { - return []string{arg} - } - - separated := make([]string, 0, len(arg)-1) - for _, flagChar := range arg[1:] { - separated = append(separated, "-"+string(flagChar)) - } - - return separated -} - -func isSplittable(flagArg string) bool { - return strings.HasPrefix(flagArg, "-") && !strings.HasPrefix(flagArg, "--") && len(flagArg) > 2 -} diff --git a/vendor/github.com/urfave/cli/sort.go b/vendor/github.com/urfave/cli/sort.go deleted file mode 100644 index 23d1c2f7..00000000 --- a/vendor/github.com/urfave/cli/sort.go +++ /dev/null @@ -1,29 +0,0 @@ -package cli - -import "unicode" - -// lexicographicLess compares strings alphabetically considering case. -func lexicographicLess(i, j string) bool { - iRunes := []rune(i) - jRunes := []rune(j) - - lenShared := len(iRunes) - if lenShared > len(jRunes) { - lenShared = len(jRunes) - } - - for index := 0; index < lenShared; index++ { - ir := iRunes[index] - jr := jRunes[index] - - if lir, ljr := unicode.ToLower(ir), unicode.ToLower(jr); lir != ljr { - return lir < ljr - } - - if ir != jr { - return ir < jr - } - } - - return i < j -} diff --git a/vendor/github.com/urfave/cli/template.go b/vendor/github.com/urfave/cli/template.go deleted file mode 100644 index c631fb97..00000000 --- a/vendor/github.com/urfave/cli/template.go +++ /dev/null @@ -1,121 +0,0 @@ -package cli - -// AppHelpTemplate is the text template for the Default help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var AppHelpTemplate = `NAME: - {{.Name}}{{if .Usage}} - {{.Usage}}{{end}} - -USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} - -VERSION: - {{.Version}}{{end}}{{end}}{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if len .Authors}} - -AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: - {{range $index, $author := .Authors}}{{if $index}} - {{end}}{{$author}}{{end}}{{end}}{{if .VisibleCommands}} - -COMMANDS:{{range .VisibleCategories}}{{if .Name}} - - {{.Name}}:{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} - -GLOBAL OPTIONS: - {{range $index, $option := .VisibleFlags}}{{if $index}} - {{end}}{{$option}}{{end}}{{end}}{{if .Copyright}} - -COPYRIGHT: - {{.Copyright}}{{end}} -` - -// CommandHelpTemplate is the text template for the command help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var CommandHelpTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} - -CATEGORY: - {{.Category}}{{end}}{{if .Description}} - -DESCRIPTION: - {{.Description}}{{end}}{{if .VisibleFlags}} - -OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -` - -// SubcommandHelpTemplate is the text template for the subcommand help topic. -// cli.go uses text/template to render templates. You can -// render custom help text by setting this variable. -var SubcommandHelpTemplate = `NAME: - {{.HelpName}} - {{if .Description}}{{.Description}}{{else}}{{.Usage}}{{end}} - -USAGE: - {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}} - -COMMANDS:{{range .VisibleCategories}}{{if .Name}} - - {{.Name}}:{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{else}}{{range .VisibleCommands}} - {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}{{end}}{{end}}{{if .VisibleFlags}} - -OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -` - -var MarkdownDocTemplate = `% {{ .App.Name }}(8) {{ .App.Description }} - -% {{ .App.Author }} - -# NAME - -{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }} - -# SYNOPSIS - -{{ .App.Name }} -{{ if .SynopsisArgs }} -` + "```" + ` -{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + ` -{{ end }}{{ if .App.UsageText }} -# DESCRIPTION - -{{ .App.UsageText }} -{{ end }} -**Usage**: - -` + "```" + ` -{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] -` + "```" + ` -{{ if .GlobalArgs }} -# GLOBAL OPTIONS -{{ range $v := .GlobalArgs }} -{{ $v }}{{ end }} -{{ end }}{{ if .Commands }} -# COMMANDS -{{ range $v := .Commands }} -{{ $v }}{{ end }}{{ end }}` - -var FishCompletionTemplate = `# {{ .App.Name }} fish shell completion - -function __fish_{{ .App.Name }}_no_subcommand --description 'Test if there has been any subcommand yet' - for i in (commandline -opc) - if contains -- $i{{ range $v := .AllCommands }} {{ $v }}{{ end }} - return 1 - end - end - return 0 -end - -{{ range $v := .Completions }}{{ $v }} -{{ end }}` diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore deleted file mode 100644 index 18312f00..00000000 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -*.prof -*.test -*.swp -/bin/ -cover.out -/.idea -*.iml diff --git a/vendor/go.etcd.io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml deleted file mode 100644 index 452601e4..00000000 --- a/vendor/go.etcd.io/bbolt/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -go_import_path: go.etcd.io/bbolt - -sudo: false - -go: -- 1.15 - -before_install: -- go get -v golang.org/x/sys/unix -- go get -v honnef.co/go/tools/... -- go get -v github.com/kisielk/errcheck - -script: -- make fmt -- make test -- make race -# - make errcheck diff --git a/vendor/go.etcd.io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE deleted file mode 100644 index 004e77fe..00000000 --- a/vendor/go.etcd.io/bbolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile deleted file mode 100644 index 21ecf48f..00000000 --- a/vendor/go.etcd.io/bbolt/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -race: - @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)" - @echo "array freelist test" - @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)" - -fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') - -# go get honnef.co/go/tools/simple -gosimple: - gosimple ./... - -# go get honnef.co/go/tools/unused -unused: - unused ./... - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt - -test: - TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic - # Note: gets "program not an importable package" in out of path builds - TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt - - @echo "array freelist test" - - @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic - # Note: gets "program not an importable package" in out of path builds - @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt - -.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md deleted file mode 100644 index f1b4a7b2..00000000 --- a/vendor/go.etcd.io/bbolt/README.md +++ /dev/null @@ -1,958 +0,0 @@ -bbolt -===== - -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) -[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) -[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) - -bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value -store. The purpose of this fork is to provide the Go community with an active -maintenance and development target for Bolt; the goal is improved reliability -and stability. bbolt includes bug fixes, performance enhancements, and features -not found in Bolt while preserving backwards compatibility with the Bolt API. - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[gh_ben]: https://github.com/benbjohnson -[bolt]: https://github.com/boltdb/bolt -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently used in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. - -## Project versioning - -bbolt uses [semantic versioning](http://semver.org). -API should not change between patch and minor releases. -New minor versions may add additional features to the API. - -## Table of Contents - - - [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) - - [Resources](#resources) - - [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) - - [Caveats & Limitations](#caveats--limitations) - - [Reading the Source](#reading-the-source) - - [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get go.etcd.io/bbolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Importing bbolt - -To use bbolt as an embedded key-value store, import as: - -```go -import bolt "go.etcd.io/bbolt" - -db, err := bolt.Open(path, 0666, nil) -if err != nil { - return err -} -defer db.Close() -``` - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - bolt "go.etcd.io/bbolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Transactions should not depend on one another and generally shouldn't be opened -simultaneously in the same goroutine. This can cause a deadlock as the read-write -transaction needs to periodically re-map the data file but it cannot do so while -any read-only transaction is open. Even a nested read-only transaction can cause -a deadlock, as the child transaction can block the parent transaction from releasing -its resources. - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - -Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. - -```go - -// createUser creates a new user in the given account. -func createUser(accountID int, u *User) error { - // Start the transaction. - tx, err := db.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Retrieve the root bucket for the account. - // Assume this has already been created when the account was set up. - root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) - - // Setup the users bucket. - bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) - if err != nil { - return err - } - - // Generate an ID for the new user. - userID, err := bkt.NextSequence() - if err != nil { - return err - } - u.ID = userID - - // Marshal and save the encoded user. - if buf, err := json.Marshal(u); err != nil { - return err - } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { - return err - } - - // Commit the transaction. - if err := tx.Commit(); err != nil { - return err - } - - return nil -} - -``` - - - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Bolt uses an exclusive write lock on the database file so it cannot be - shared by multiple processes. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. -* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. -* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windowsã€Linuxã€Android system. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains -* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. -* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/go.etcd.io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go deleted file mode 100644 index aee25960..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go deleted file mode 100644 index 5dd8f3f2..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go deleted file mode 100644 index aee25960..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go deleted file mode 100644 index 810dfd55..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build arm64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go deleted file mode 100644 index 7707bcac..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -package bbolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go deleted file mode 100644 index dd8ffe12..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build mips64 mips64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x8000000000 // 512GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go deleted file mode 100644 index a669703a..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build mips mipsle - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x40000000 // 1GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go deleted file mode 100644 index d7f50358..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package bbolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go deleted file mode 100644 index 84e545ef..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go deleted file mode 100644 index a7612090..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go deleted file mode 100644 index c830f2fc..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go deleted file mode 100644 index c967613b..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build riscv64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go deleted file mode 100644 index ff2a5609..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build s390x - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go deleted file mode 100644 index 4e5f65cc..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows,!plan9,!solaris,!aix - -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - flag := syscall.LOCK_NB - if exclusive { - flag |= syscall.LOCK_EX - } else { - flag |= syscall.LOCK_SH - } - for { - // Attempt to obtain an exclusive lock. - err := syscall.Flock(int(fd), flag) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - err = unix.Madvise(b, syscall.MADV_RANDOM) - if err != nil && err != syscall.ENOSYS { - // Ignore not implemented error in kernel because it still works. - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go deleted file mode 100644 index a64c16f5..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build aix - -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - var lockType int16 - if exclusive { - lockType = syscall.F_WRLCK - } else { - lockType = syscall.F_RDLCK - } - for { - // Attempt to obtain an exclusive lock. - lock := syscall.Flock_t{Type: lockType} - err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go deleted file mode 100644 index babad657..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go +++ /dev/null @@ -1,88 +0,0 @@ -package bbolt - -import ( - "fmt" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - fd := db.file.Fd() - var lockType int16 - if exclusive { - lockType = syscall.F_WRLCK - } else { - lockType = syscall.F_RDLCK - } - for { - // Attempt to obtain an exclusive lock. - lock := syscall.Flock_t{Type: lockType} - err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // If we timed out then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go deleted file mode 100644 index fca178bd..00000000 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ /dev/null @@ -1,141 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, exclusive bool, timeout time.Duration) error { - var t time.Time - if timeout != 0 { - t = time.Now() - } - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - for { - // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range - // -1..0 as the lock on the database file. - var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{ - Offset: m1, - OffsetHigh: m1, - }) - - if err == nil { - return nil - } else if err != errLockViolation { - return err - } - - // If we timed oumercit then return an error. - if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout - } - - // Wait for a bit and try again. - time.Sleep(flockRetryTimeout) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var m1 uint32 = (1 << 32) - 1 // -1 in a uint32 - err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{ - Offset: m1, - OffsetHigh: m1, - }) - return err -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go deleted file mode 100644 index 9587afef..00000000 --- a/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bbolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go deleted file mode 100644 index d8750b14..00000000 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ /dev/null @@ -1,777 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // Unaligned access requires a copy to be made. - const unalignedMask = unsafe.Alignof(struct { - bucket - page - }{}) - 1 - unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 - if unaligned { - value = cloneBytes(value) - } - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } - return nil, ErrIncompatibleValue - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return nil if the key doesn't exist. - if !bytes.Equal(key, k) { - return nil - } - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence = v - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += int(used) - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) - s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() uintptr { - return uintptr(b.tx.db.pageSize / 4) -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go deleted file mode 100644 index e4fe91b0..00000000 --- a/vendor/go.etcd.io/bbolt/compact.go +++ /dev/null @@ -1,114 +0,0 @@ -package bbolt - -// Compact will create a copy of the source DB and in the destination DB. This may -// reclaim space that the source database no longer has use for. txMaxSize can be -// used to limit the transactions size of this process and may trigger intermittent -// commits. A value of zero will ignore transaction sizes. -// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349 -func Compact(dst, src *DB, txMaxSize int64) error { - // commit regularly, or we'll run out of memory for large datasets if using one transaction. - var size int64 - tx, err := dst.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { - // On each key/value, check if we have exceeded tx size. - sz := int64(len(k) + len(v)) - if size+sz > txMaxSize && txMaxSize != 0 { - // Commit previous transaction. - if err := tx.Commit(); err != nil { - return err - } - - // Start new transaction. - tx, err = dst.Begin(true) - if err != nil { - return err - } - size = 0 - } - size += sz - - // Create bucket on the root transaction if this is the first level. - nk := len(keys) - if nk == 0 { - bkt, err := tx.CreateBucket(k) - if err != nil { - return err - } - if err := bkt.SetSequence(seq); err != nil { - return err - } - return nil - } - - // Create buckets on subsequent levels, if necessary. - b := tx.Bucket(keys[0]) - if nk > 1 { - for _, k := range keys[1:] { - b = b.Bucket(k) - } - } - - // Fill the entire page for best compaction. - b.FillPercent = 1.0 - - // If there is no value then this is a bucket call. - if v == nil { - bkt, err := b.CreateBucket(k) - if err != nil { - return err - } - if err := bkt.SetSequence(seq); err != nil { - return err - } - return nil - } - - // Otherwise treat it as a key/value pair. - return b.Put(k, v) - }); err != nil { - return err - } - - return tx.Commit() -} - -// walkFunc is the type of the function called for keys (buckets and "normal" -// values) discovered by Walk. keys is the list of keys to descend to the bucket -// owning the discovered key/value pair k/v. -type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error - -// walk walks recursively the bolt database db, calling walkFn for each key it finds. -func walk(db *DB, walkFn walkFunc) error { - return db.View(func(tx *Tx) error { - return tx.ForEach(func(name []byte, b *Bucket) error { - return walkBucket(b, nil, name, nil, b.Sequence(), walkFn) - }) - }) -} - -func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { - // Execute callback. - if err := fn(keypath, k, v, seq); err != nil { - return err - } - - // If this is not a bucket then stop. - if v != nil { - return nil - } - - // Iterate over each child key/value. - keypath = append(keypath, k) - return b.ForEach(func(k, v []byte) error { - if v == nil { - bkt := b.Bucket(k) - return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) - } - return walkBucket(b, keypath, k, v, b.Sequence(), fn) - }) -} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go deleted file mode 100644 index 98aeb449..00000000 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ /dev/null @@ -1,396 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(ref.index) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go deleted file mode 100644 index a798c390..00000000 --- a/vendor/go.etcd.io/bbolt/db.go +++ /dev/null @@ -1,1232 +0,0 @@ -package bbolt - -import ( - "errors" - "fmt" - "hash/fnv" - "log" - "os" - "runtime" - "sort" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// The time elapsed between consecutive file locking attempts. -const flockRetryTimeout = 50 * time.Millisecond - -// FreelistType is the type of the freelist backend -type FreelistType string - -const ( - // FreelistArrayType indicates backend freelist type is array - FreelistArrayType = FreelistType("array") - // FreelistMapType indicates backend freelist type is hashmap - FreelistMapType = FreelistType("hashmap") -) - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips syncing freelist to disk. This improves the database - // write performance under normal operation, but requires a full database - // re-sync during recovery. - NoFreelistSync bool - - // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and framentation in freelist is common. - // The alternative one is using hashmap, it is faster in almost all circumstances - // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. - // The default type is array - FreelistType FreelistType - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - // Mlock locks database file in memory when set to true. - // It prevents major page faults, however used memory can't be reclaimed. - // - // Supported only on Unix via mlock/munlock syscalls. - Mlock bool - - path string - openFile func(string, int, os.FileMode) (*os.File, error) - file *os.File - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - stats Stats - - freelist *freelist - freelistLoad sync.Once - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ - opened: true, - } - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoSync = options.NoSync - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - db.NoFreelistSync = options.NoFreelistSync - db.FreelistType = options.FreelistType - db.Mlock = options.Mlock - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - db.openFile = options.OpenFile - if db.openFile == nil { - db.openFile = os.OpenFile - } - - // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - db.path = db.file.Name() - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - if db.pageSize = options.PageSize; db.pageSize == 0 { - // Set the default page size to the OS page size. - db.pageSize = defaultPageSize - } - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - _ = db.close() - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - // clean up file descriptor on initialization fail - _ = db.close() - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - // If we can't read the page size, but can read a page, assume - // it's the same as the OS or one given -- since that's how the - // page size was chosen in the first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - // - // TODO: scan for next page - if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - db.pageSize = int(m.pageSize) - } - } else { - _ = db.close() - return nil, ErrInvalid - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - if db.readOnly { - return db, nil - } - - db.loadFreelist() - - // Flush freelist when transitioning from no sync to sync so - // NoFreelistSync unaware boltdb can open the db later. - if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) - if tx != nil { - err = tx.Commit() - } - if err != nil { - _ = db.close() - return nil, err - } - } - - // Mark the database as opened and return. - return db, nil -} - -// loadFreelist reads the freelist if it is synced, or reconstructs it -// by scanning the DB if it is not synced. It assumes there are no -// concurrent accesses being made to the freelist. -func (db *DB) loadFreelist() { - db.freelistLoad.Do(func() { - db.freelist = newFreelist(db.FreelistType) - if !db.hasSyncedFreelist() { - // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) - } else { - // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) - } - db.stats.FreePageN = db.freelist.free_count() - }) -} - -func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) - var size = fileSize - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - if db.Mlock { - // Unlock db memory - if err := db.munlock(fileSize); err != nil { - return err - } - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - if db.Mlock { - // Don't allow swapping of data file - if err := db.mlock(fileSize); err != nil { - return err - } - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -func (db *DB) munlock(fileSize int) error { - if err := munlock(db, fileSize); err != nil { - return fmt.Errorf("munlock error: " + err.Error()) - } - return nil -} - -func (db *DB) mlock(fileSize int) error { - if err := mlock(db, fileSize); err != nil { - return fmt.Errorf("mlock error: " + err.Error()) - } - return nil -} - -func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error { - if err := db.munlock(fileSizeFrom); err != nil { - return err - } - if err := db.mlock(fileSizeTo); err != nil { - return err - } - return nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - db.filesz = len(buf) - - return nil -} - -// Close releases all database resources. -// It will block waiting for any open transactions to finish -// before closing the database and returning. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - db.freePages() - return t, nil -} - -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Rollback() -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - c.err <- err - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - if db.Mlock { - // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { - return fmt.Errorf("mlock/munlock error: %s", err) - } - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -func (db *DB) freepages() []pgid { - tx, err := db.beginTx() - defer func() { - err = tx.Rollback() - if err != nil { - panic("freepages: failed to rollback tx") - } - }() - if err != nil { - panic("freepages: failed to open read only tx") - } - - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) - ech := make(chan error) - go func() { - for e := range ech { - panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) - } - }() - tx.checkBucket(&tx.root, reachable, nofreed, ech) - close(ech) - - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { - if _, ok := reachable[i]; !ok { - fids = append(fids, i) - } - } - return fids -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Do not sync freelist to disk. This improves the database write performance - // under normal operation, but requires a full database re-sync during recovery. - NoFreelistSync bool - - // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures - // dramatic performance degradation if database is large and framentation in freelist is common. - // The alternative one is using hashmap, it is faster in almost all circumstances - // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. - // The default type is array - FreelistType FreelistType - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int - - // PageSize overrides the default OS page size. - PageSize int - - // NoSync sets the initial value of DB.NoSync. Normally this can just be - // set directly on the DB itself when returned from Open(), but this option - // is useful in APIs which expose Options but not the underlying DB. - NoSync bool - - // OpenFile is used to open files. It defaults to os.OpenFile. This option - // is useful for writing hermetic tests. - OpenFile func(string, int, os.FileMode) (*os.File, error) - - // Mlock locks database file in memory when set to true. - // It prevents potential page faults, however - // used memory can't be reclaimed. (UNIX only) - Mlock bool -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, - FreelistType: FreelistArrayType, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = s.TxN - other.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go deleted file mode 100644 index 95f25f01..00000000 --- a/vendor/go.etcd.io/bbolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -package bbolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bbolt diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go deleted file mode 100644 index 48758ca5..00000000 --- a/vendor/go.etcd.io/bbolt/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package bbolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index 697a4696..00000000 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,404 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]bool), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = true - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - var ids []pgid - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) - unsafeSlice(unsafe.Pointer(&ids), data, count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l) - f.copyall(ids) - } else { - p.count = 0xFFFF - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]bool, len(ids)) - for _, id := range ids { - f.cache[id] = true - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = true - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index dbd67a1e..00000000 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, size) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go deleted file mode 100644 index 6a6c7b35..00000000 --- a/vendor/go.etcd.io/bbolt/mlock_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package bbolt - -import "golang.org/x/sys/unix" - -// mlock locks memory of db file -func mlock(db *DB, fileSize int) error { - sizeToLock := fileSize - if sizeToLock > db.datasz { - // Can't lock more than mmaped slice - sizeToLock = db.datasz - } - if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil { - return err - } - return nil -} - -//munlock unlocks memory of db file -func munlock(db *DB, fileSize int) error { - if db.dataref == nil { - return nil - } - - sizeToUnlock := fileSize - if sizeToUnlock > db.datasz { - // Can't unlock more than mmaped slice - sizeToUnlock = db.datasz - } - - if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil { - return err - } - return nil -} diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go deleted file mode 100644 index b4a36a49..00000000 --- a/vendor/go.etcd.io/bbolt/mlock_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package bbolt - -// mlock locks memory of db file -func mlock(_ *DB, _ int) error { - panic("mlock is supported only on UNIX systems") -} - -//munlock unlocks memory of db file -func munlock(_ *DB, _ int) error { - panic("munlock is supported only on UNIX systems") -} diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go deleted file mode 100644 index 73988b5c..00000000 --- a/vendor/go.etcd.io/bbolt/node.go +++ /dev/null @@ -1,602 +0,0 @@ -package bbolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) - } - return int(sz) -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() uintptr { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize uintptr) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize uintptr) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = uintptr(i) - inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(uintptr(tx.db.pageSize)) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 -} - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index c9a158fb..00000000 --- a/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,204 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - var elems []leafPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - var elems []branchPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go deleted file mode 100644 index 869d4120..00000000 --- a/vendor/go.etcd.io/bbolt/tx.go +++ /dev/null @@ -1,723 +0,0 @@ -package bbolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - return fn(k, tx.root.Bucket(k)) - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - } - - if !tx.db.NoFreelistSync { - err := tx.commitFreelist() - if err != nil { - return err - } - } else { - tx.meta.freelist = pgidNoFreelist - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -func (tx *Tx) commitFreelist() error { - // Allocate new pages for the new free list. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - opgid := tx.meta.pgid - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.nonPhysicalRollback() - return nil -} - -// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk. -func (tx *Tx) nonPhysicalRollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - } - tx.close() -} - -// rollback needs to reload the free pages from disk in case some system error happens like fsync error. -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - if !tx.db.hasSyncedFreelist() { - // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) - } else { - // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. -// -// Deprecated; Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr - } - }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, nil -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - _, err = tx.WriteTo(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Force loading free list if opened in ReadOnly mode. - tx.db.loadFreelist() - - // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) - for _, id := range all { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount += count - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) - var written uintptr - - // Write out page in "max allocation" sized chunks. - for { - sz := rem - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) - - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - rem -= sz - if rem == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - written += uintptr(sz) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go deleted file mode 100644 index c0e50375..00000000 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ /dev/null @@ -1,39 +0,0 @@ -package bbolt - -import ( - "reflect" - "unsafe" -) - -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset) -} - -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) -} - -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { - // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices - // - // This memory is not allocated from C, but it is unmanaged by Go's - // garbage collector and should behave similarly, and the compiler - // should produce similar code. Note that this conversion allows a - // subslice to begin after the base address, with an optional offset, - // while the URL above does not cover this case and only slices from - // index 0. However, the wiki never says that the address must be to - // the beginning of a C allocation (or even that malloc was used at - // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] -} - -// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by -// the slice parameter. This helper should be used over other direct -// manipulation of reflect.SliceHeader to prevent misuse, namely, converting -// from reflect.SliceHeader to a Go slice type. -func unsafeSlice(slice, data unsafe.Pointer, len int) { - s := (*reflect.SliceHeader)(slice) - s.Data = uintptr(data) - s.Cap = len - s.Len = len -} diff --git a/vendor/go.mozilla.org/pkcs7/.gitignore b/vendor/go.mozilla.org/pkcs7/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/go.mozilla.org/pkcs7/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/go.mozilla.org/pkcs7/.travis.yml b/vendor/go.mozilla.org/pkcs7/.travis.yml deleted file mode 100644 index eac4c176..00000000 --- a/vendor/go.mozilla.org/pkcs7/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - "1.11" - - "1.12" - - "1.13" - - tip -before_install: - - make gettools -script: - - make diff --git a/vendor/go.mozilla.org/pkcs7/LICENSE b/vendor/go.mozilla.org/pkcs7/LICENSE deleted file mode 100644 index 75f32090..00000000 --- a/vendor/go.mozilla.org/pkcs7/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Andrew Smith - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/go.mozilla.org/pkcs7/Makefile b/vendor/go.mozilla.org/pkcs7/Makefile deleted file mode 100644 index 47c73b86..00000000 --- a/vendor/go.mozilla.org/pkcs7/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -all: vet staticcheck test - -test: - go test -covermode=count -coverprofile=coverage.out . - -showcoverage: test - go tool cover -html=coverage.out - -vet: - go vet . - -lint: - golint . - -staticcheck: - staticcheck . - -gettools: - go get -u honnef.co/go/tools/... - go get -u golang.org/x/lint/golint diff --git a/vendor/go.mozilla.org/pkcs7/README.md b/vendor/go.mozilla.org/pkcs7/README.md deleted file mode 100644 index bf37059c..00000000 --- a/vendor/go.mozilla.org/pkcs7/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# pkcs7 - -[![GoDoc](https://godoc.org/go.mozilla.org/pkcs7?status.svg)](https://godoc.org/go.mozilla.org/pkcs7) -[![Build Status](https://travis-ci.org/mozilla-services/pkcs7.svg?branch=master)](https://travis-ci.org/mozilla-services/pkcs7) - -pkcs7 implements parsing and creating signed and enveloped messages. - -```go -package main - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "os" - - "go.mozilla.org/pkcs7" -) - -func SignAndDetach(content []byte, cert *x509.Certificate, privkey *rsa.PrivateKey) (signed []byte, err error) { - toBeSigned, err := NewSignedData(content) - if err != nil { - err = fmt.Errorf("Cannot initialize signed data: %s", err) - return - } - if err = toBeSigned.AddSigner(cert, privkey, SignerInfoConfig{}); err != nil { - err = fmt.Errorf("Cannot add signer: %s", err) - return - } - - // Detach signature, omit if you want an embedded signature - toBeSigned.Detach() - - signed, err = toBeSigned.Finish() - if err != nil { - err = fmt.Errorf("Cannot finish signing data: %s", err) - return - } - - // Verify the signature - pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) - p7, err := pkcs7.Parse(signed) - if err != nil { - err = fmt.Errorf("Cannot parse our signed data: %s", err) - return - } - - // since the signature was detached, reattach the content here - p7.Content = content - - if bytes.Compare(content, p7.Content) != 0 { - err = fmt.Errorf("Our content was not in the parsed data:\n\tExpected: %s\n\tActual: %s", content, p7.Content) - return - } - if err = p7.Verify(); err != nil { - err = fmt.Errorf("Cannot verify our signed data: %s", err) - return - } - - return signed, nil -} -``` - - - -## Credits -This is a fork of [fullsailor/pkcs7](https://github.com/fullsailor/pkcs7) diff --git a/vendor/go.mozilla.org/pkcs7/ber.go b/vendor/go.mozilla.org/pkcs7/ber.go deleted file mode 100644 index 58525673..00000000 --- a/vendor/go.mozilla.org/pkcs7/ber.go +++ /dev/null @@ -1,251 +0,0 @@ -package pkcs7 - -import ( - "bytes" - "errors" -) - -var encodeIndent = 0 - -type asn1Object interface { - EncodeTo(writer *bytes.Buffer) error -} - -type asn1Structured struct { - tagBytes []byte - content []asn1Object -} - -func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { - //fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) - encodeIndent++ - inner := new(bytes.Buffer) - for _, obj := range s.content { - err := obj.EncodeTo(inner) - if err != nil { - return err - } - } - encodeIndent-- - out.Write(s.tagBytes) - encodeLength(out, inner.Len()) - out.Write(inner.Bytes()) - return nil -} - -type asn1Primitive struct { - tagBytes []byte - length int - content []byte -} - -func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { - _, err := out.Write(p.tagBytes) - if err != nil { - return err - } - if err = encodeLength(out, p.length); err != nil { - return err - } - //fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) - //fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) - out.Write(p.content) - - return nil -} - -func ber2der(ber []byte) ([]byte, error) { - if len(ber) == 0 { - return nil, errors.New("ber2der: input ber is empty") - } - //fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) - out := new(bytes.Buffer) - - obj, _, err := readObject(ber, 0) - if err != nil { - return nil, err - } - obj.EncodeTo(out) - - // if offset < len(ber) { - // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber)) - //} - - return out.Bytes(), nil -} - -// encodes lengths that are longer than 127 into string of bytes -func marshalLongLength(out *bytes.Buffer, i int) (err error) { - n := lengthLength(i) - - for ; n > 0; n-- { - err = out.WriteByte(byte(i >> uint((n-1)*8))) - if err != nil { - return - } - } - - return nil -} - -// computes the byte length of an encoded length value -func lengthLength(i int) (numBytes int) { - numBytes = 1 - for i > 255 { - numBytes++ - i >>= 8 - } - return -} - -// encodes the length in DER format -// If the length fits in 7 bits, the value is encoded directly. -// -// Otherwise, the number of bytes to encode the length is first determined. -// This number is likely to be 4 or less for a 32bit length. This number is -// added to 0x80. The length is encoded in big endian encoding follow after -// -// Examples: -// length | byte 1 | bytes n -// 0 | 0x00 | - -// 120 | 0x78 | - -// 200 | 0x81 | 0xC8 -// 500 | 0x82 | 0x01 0xF4 -// -func encodeLength(out *bytes.Buffer, length int) (err error) { - if length >= 128 { - l := lengthLength(length) - err = out.WriteByte(0x80 | byte(l)) - if err != nil { - return - } - err = marshalLongLength(out, length) - if err != nil { - return - } - } else { - err = out.WriteByte(byte(length)) - if err != nil { - return - } - } - return -} - -func readObject(ber []byte, offset int) (asn1Object, int, error) { - berLen := len(ber) - if offset >= berLen { - return nil, 0, errors.New("ber2der: offset is after end of ber data") - } - tagStart := offset - b := ber[offset] - offset++ - if offset >= berLen { - return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") - } - tag := b & 0x1F // last 5 bits - if tag == 0x1F { - tag = 0 - for ber[offset] >= 0x80 { - tag = tag*128 + ber[offset] - 0x80 - offset++ - if offset > berLen { - return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") - } - } - // jvehent 20170227: this doesn't appear to be used anywhere... - //tag = tag*128 + ber[offset] - 0x80 - offset++ - if offset > berLen { - return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") - } - } - tagEnd := offset - - kind := b & 0x20 - if kind == 0 { - debugprint("--> Primitive\n") - } else { - debugprint("--> Constructed\n") - } - // read length - var length int - l := ber[offset] - offset++ - if offset > berLen { - return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") - } - hack := 0 - if l > 0x80 { - numberOfBytes := (int)(l & 0x7F) - if numberOfBytes > 4 { // int is only guaranteed to be 32bit - return nil, 0, errors.New("ber2der: BER tag length too long") - } - if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F { - return nil, 0, errors.New("ber2der: BER tag length is negative") - } - if (int)(ber[offset]) == 0x0 { - return nil, 0, errors.New("ber2der: BER tag length has leading zero") - } - debugprint("--> (compute length) indicator byte: %x\n", l) - debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes]) - for i := 0; i < numberOfBytes; i++ { - length = length*256 + (int)(ber[offset]) - offset++ - if offset > berLen { - return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") - } - } - } else if l == 0x80 { - // find length by searching content - markerIndex := bytes.LastIndex(ber[offset:], []byte{0x0, 0x0}) - if markerIndex == -1 { - return nil, 0, errors.New("ber2der: Invalid BER format") - } - length = markerIndex - hack = 2 - debugprint("--> (compute length) marker found at offset: %d\n", markerIndex+offset) - } else { - length = (int)(l) - } - if length < 0 { - return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") - } - //fmt.Printf("--> length : %d\n", length) - contentEnd := offset + length - if contentEnd > len(ber) { - return nil, 0, errors.New("ber2der: BER tag length is more than available data") - } - debugprint("--> content start : %d\n", offset) - debugprint("--> content end : %d\n", contentEnd) - debugprint("--> content : % X\n", ber[offset:contentEnd]) - var obj asn1Object - if kind == 0 { - obj = asn1Primitive{ - tagBytes: ber[tagStart:tagEnd], - length: length, - content: ber[offset:contentEnd], - } - } else { - var subObjects []asn1Object - for offset < contentEnd { - var subObj asn1Object - var err error - subObj, offset, err = readObject(ber[:contentEnd], offset) - if err != nil { - return nil, 0, err - } - subObjects = append(subObjects, subObj) - } - obj = asn1Structured{ - tagBytes: ber[tagStart:tagEnd], - content: subObjects, - } - } - - return obj, contentEnd + hack, nil -} - -func debugprint(format string, a ...interface{}) { - //fmt.Printf(format, a) -} diff --git a/vendor/go.mozilla.org/pkcs7/decrypt.go b/vendor/go.mozilla.org/pkcs7/decrypt.go deleted file mode 100644 index 0d088d62..00000000 --- a/vendor/go.mozilla.org/pkcs7/decrypt.go +++ /dev/null @@ -1,177 +0,0 @@ -package pkcs7 - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/asn1" - "errors" - "fmt" -) - -// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed -var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") - -// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data -var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") - -// Decrypt decrypts encrypted content info for recipient cert and private key -func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) { - data, ok := p7.raw.(envelopedData) - if !ok { - return nil, ErrNotEncryptedContent - } - recipient := selectRecipientForCertificate(data.RecipientInfos, cert) - if recipient.EncryptedKey == nil { - return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") - } - switch pkey := pkey.(type) { - case *rsa.PrivateKey: - var contentKey []byte - contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey) - if err != nil { - return nil, err - } - return data.EncryptedContentInfo.decrypt(contentKey) - } - return nil, ErrUnsupportedAlgorithm -} - -// DecryptUsingPSK decrypts encrypted data using caller provided -// pre-shared secret -func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { - data, ok := p7.raw.(encryptedData) - if !ok { - return nil, ErrNotEncryptedContent - } - return data.EncryptedContentInfo.decrypt(key) -} - -func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { - alg := eci.ContentEncryptionAlgorithm.Algorithm - if !alg.Equal(OIDEncryptionAlgorithmDESCBC) && - !alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) && - !alg.Equal(OIDEncryptionAlgorithmAES256CBC) && - !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && - !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && - !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { - fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg) - return nil, ErrUnsupportedAlgorithm - } - - // EncryptedContent can either be constructed of multple OCTET STRINGs - // or _be_ a tagged OCTET STRING - var cyphertext []byte - if eci.EncryptedContent.IsCompound { - // Complex case to concat all of the children OCTET STRINGs - var buf bytes.Buffer - cypherbytes := eci.EncryptedContent.Bytes - for { - var part []byte - cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part) - buf.Write(part) - if cypherbytes == nil { - break - } - } - cyphertext = buf.Bytes() - } else { - // Simple case, the bytes _are_ the cyphertext - cyphertext = eci.EncryptedContent.Bytes - } - - var block cipher.Block - var err error - - switch { - case alg.Equal(OIDEncryptionAlgorithmDESCBC): - block, err = des.NewCipher(key) - case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC): - block, err = des.NewTripleDESCipher(key) - case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM): - fallthrough - case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC): - block, err = aes.NewCipher(key) - } - - if err != nil { - return nil, err - } - - if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) { - params := aesGCMParameters{} - paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes - - _, err := asn1.Unmarshal(paramBytes, ¶ms) - if err != nil { - return nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, err - } - - if len(params.Nonce) != gcm.NonceSize() { - return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") - } - if params.ICVLen != gcm.Overhead() { - return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") - } - - plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil) - if err != nil { - return nil, err - } - - return plaintext, nil - } - - iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes - if len(iv) != block.BlockSize() { - return nil, errors.New("pkcs7: encryption algorithm parameters are malformed") - } - mode := cipher.NewCBCDecrypter(block, iv) - plaintext := make([]byte, len(cyphertext)) - mode.CryptBlocks(plaintext, cyphertext) - if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil { - return nil, err - } - return plaintext, nil -} - -func unpad(data []byte, blocklen int) ([]byte, error) { - if blocklen < 1 { - return nil, fmt.Errorf("invalid blocklen %d", blocklen) - } - if len(data)%blocklen != 0 || len(data) == 0 { - return nil, fmt.Errorf("invalid data len %d", len(data)) - } - - // the last byte is the length of padding - padlen := int(data[len(data)-1]) - - // check padding integrity, all bytes should be the same - pad := data[len(data)-padlen:] - for _, padbyte := range pad { - if padbyte != byte(padlen) { - return nil, errors.New("invalid padding") - } - } - - return data[:len(data)-padlen], nil -} - -func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo { - for _, recp := range recipients { - if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) { - return recp - } - } - return recipientInfo{} -} diff --git a/vendor/go.mozilla.org/pkcs7/encrypt.go b/vendor/go.mozilla.org/pkcs7/encrypt.go deleted file mode 100644 index da57ae64..00000000 --- a/vendor/go.mozilla.org/pkcs7/encrypt.go +++ /dev/null @@ -1,399 +0,0 @@ -package pkcs7 - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" -) - -type envelopedData struct { - Version int - RecipientInfos []recipientInfo `asn1:"set"` - EncryptedContentInfo encryptedContentInfo -} - -type encryptedData struct { - Version int - EncryptedContentInfo encryptedContentInfo -} - -type recipientInfo struct { - Version int - IssuerAndSerialNumber issuerAndSerial - KeyEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedKey []byte -} - -type encryptedContentInfo struct { - ContentType asn1.ObjectIdentifier - ContentEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedContent asn1.RawValue `asn1:"tag:0,optional,explicit"` -} - -const ( - // EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm - EncryptionAlgorithmDESCBC = iota - - // EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm - // Avoid this algorithm unless required for interoperability; use AES GCM instead. - EncryptionAlgorithmAES128CBC - - // EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm - // Avoid this algorithm unless required for interoperability; use AES GCM instead. - EncryptionAlgorithmAES256CBC - - // EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm - EncryptionAlgorithmAES128GCM - - // EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm - EncryptionAlgorithmAES256GCM -) - -// ContentEncryptionAlgorithm determines the algorithm used to encrypt the -// plaintext message. Change the value of this variable to change which -// algorithm is used in the Encrypt() function. -var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC - -// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt -// content with an unsupported algorithm. -var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") - -// ErrPSKNotProvided is returned when attempting to encrypt -// using a PSK without actually providing the PSK. -var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") - -const nonceSize = 12 - -type aesGCMParameters struct { - Nonce []byte `asn1:"tag:4"` - ICVLen int -} - -func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { - var keyLen int - var algID asn1.ObjectIdentifier - switch ContentEncryptionAlgorithm { - case EncryptionAlgorithmAES128GCM: - keyLen = 16 - algID = OIDEncryptionAlgorithmAES128GCM - case EncryptionAlgorithmAES256GCM: - keyLen = 32 - algID = OIDEncryptionAlgorithmAES256GCM - default: - return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm) - } - if key == nil { - // Create AES key - key = make([]byte, keyLen) - - _, err := rand.Read(key) - if err != nil { - return nil, nil, err - } - } - - // Create nonce - nonce := make([]byte, nonceSize) - - _, err := rand.Read(nonce) - if err != nil { - return nil, nil, err - } - - // Encrypt content - block, err := aes.NewCipher(key) - if err != nil { - return nil, nil, err - } - - gcm, err := cipher.NewGCM(block) - if err != nil { - return nil, nil, err - } - - ciphertext := gcm.Seal(nil, nonce, content, nil) - - // Prepare ASN.1 Encrypted Content Info - paramSeq := aesGCMParameters{ - Nonce: nonce, - ICVLen: gcm.Overhead(), - } - - paramBytes, err := asn1.Marshal(paramSeq) - if err != nil { - return nil, nil, err - } - - eci := encryptedContentInfo{ - ContentType: OIDData, - ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: algID, - Parameters: asn1.RawValue{ - Tag: asn1.TagSequence, - Bytes: paramBytes, - }, - }, - EncryptedContent: marshalEncryptedContent(ciphertext), - } - - return key, &eci, nil -} - -func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { - if key == nil { - // Create DES key - key = make([]byte, 8) - - _, err := rand.Read(key) - if err != nil { - return nil, nil, err - } - } - - // Create CBC IV - iv := make([]byte, des.BlockSize) - _, err := rand.Read(iv) - if err != nil { - return nil, nil, err - } - - // Encrypt padded content - block, err := des.NewCipher(key) - if err != nil { - return nil, nil, err - } - mode := cipher.NewCBCEncrypter(block, iv) - plaintext, err := pad(content, mode.BlockSize()) - if err != nil { - return nil, nil, err - } - cyphertext := make([]byte, len(plaintext)) - mode.CryptBlocks(cyphertext, plaintext) - - // Prepare ASN.1 Encrypted Content Info - eci := encryptedContentInfo{ - ContentType: OIDData, - ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: OIDEncryptionAlgorithmDESCBC, - Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, - }, - EncryptedContent: marshalEncryptedContent(cyphertext), - } - - return key, &eci, nil -} - -func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { - var keyLen int - var algID asn1.ObjectIdentifier - switch ContentEncryptionAlgorithm { - case EncryptionAlgorithmAES128CBC: - keyLen = 16 - algID = OIDEncryptionAlgorithmAES128CBC - case EncryptionAlgorithmAES256CBC: - keyLen = 32 - algID = OIDEncryptionAlgorithmAES256CBC - default: - return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm) - } - - if key == nil { - // Create AES key - key = make([]byte, keyLen) - - _, err := rand.Read(key) - if err != nil { - return nil, nil, err - } - } - - // Create CBC IV - iv := make([]byte, aes.BlockSize) - _, err := rand.Read(iv) - if err != nil { - return nil, nil, err - } - - // Encrypt padded content - block, err := aes.NewCipher(key) - if err != nil { - return nil, nil, err - } - mode := cipher.NewCBCEncrypter(block, iv) - plaintext, err := pad(content, mode.BlockSize()) - if err != nil { - return nil, nil, err - } - cyphertext := make([]byte, len(plaintext)) - mode.CryptBlocks(cyphertext, plaintext) - - // Prepare ASN.1 Encrypted Content Info - eci := encryptedContentInfo{ - ContentType: OIDData, - ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: algID, - Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, - }, - EncryptedContent: marshalEncryptedContent(cyphertext), - } - - return key, &eci, nil -} - -// Encrypt creates and returns an envelope data PKCS7 structure with encrypted -// recipient keys for each recipient public key. -// -// The algorithm used to perform encryption is determined by the current value -// of the global ContentEncryptionAlgorithm package variable. By default, the -// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the -// value before calling Encrypt(). For example: -// -// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM -// -// TODO(fullsailor): Add support for encrypting content with other algorithms -func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { - var eci *encryptedContentInfo - var key []byte - var err error - - // Apply chosen symmetric encryption method - switch ContentEncryptionAlgorithm { - case EncryptionAlgorithmDESCBC: - key, eci, err = encryptDESCBC(content, nil) - case EncryptionAlgorithmAES128CBC: - fallthrough - case EncryptionAlgorithmAES256CBC: - key, eci, err = encryptAESCBC(content, nil) - case EncryptionAlgorithmAES128GCM: - fallthrough - case EncryptionAlgorithmAES256GCM: - key, eci, err = encryptAESGCM(content, nil) - - default: - return nil, ErrUnsupportedEncryptionAlgorithm - } - - if err != nil { - return nil, err - } - - // Prepare each recipient's encrypted cipher key - recipientInfos := make([]recipientInfo, len(recipients)) - for i, recipient := range recipients { - encrypted, err := encryptKey(key, recipient) - if err != nil { - return nil, err - } - ias, err := cert2issuerAndSerial(recipient) - if err != nil { - return nil, err - } - info := recipientInfo{ - Version: 0, - IssuerAndSerialNumber: ias, - KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: OIDEncryptionAlgorithmRSA, - }, - EncryptedKey: encrypted, - } - recipientInfos[i] = info - } - - // Prepare envelope content - envelope := envelopedData{ - EncryptedContentInfo: *eci, - Version: 0, - RecipientInfos: recipientInfos, - } - innerContent, err := asn1.Marshal(envelope) - if err != nil { - return nil, err - } - - // Prepare outer payload structure - wrapper := contentInfo{ - ContentType: OIDEnvelopedData, - Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, - } - - return asn1.Marshal(wrapper) -} - -// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, -// encrypted using caller provided pre-shared secret. -func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { - var eci *encryptedContentInfo - var err error - - if key == nil { - return nil, ErrPSKNotProvided - } - - // Apply chosen symmetric encryption method - switch ContentEncryptionAlgorithm { - case EncryptionAlgorithmDESCBC: - _, eci, err = encryptDESCBC(content, key) - - case EncryptionAlgorithmAES128GCM: - fallthrough - case EncryptionAlgorithmAES256GCM: - _, eci, err = encryptAESGCM(content, key) - - default: - return nil, ErrUnsupportedEncryptionAlgorithm - } - - if err != nil { - return nil, err - } - - // Prepare encrypted-data content - ed := encryptedData{ - Version: 0, - EncryptedContentInfo: *eci, - } - innerContent, err := asn1.Marshal(ed) - if err != nil { - return nil, err - } - - // Prepare outer payload structure - wrapper := contentInfo{ - ContentType: OIDEncryptedData, - Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, - } - - return asn1.Marshal(wrapper) -} - -func marshalEncryptedContent(content []byte) asn1.RawValue { - asn1Content, _ := asn1.Marshal(content) - return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true} -} - -func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) { - if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil { - return rsa.EncryptPKCS1v15(rand.Reader, pub, key) - } - return nil, ErrUnsupportedAlgorithm -} - -func pad(data []byte, blocklen int) ([]byte, error) { - if blocklen < 1 { - return nil, fmt.Errorf("invalid blocklen %d", blocklen) - } - padlen := blocklen - (len(data) % blocklen) - if padlen == 0 { - padlen = blocklen - } - pad := bytes.Repeat([]byte{byte(padlen)}, padlen) - return append(data, pad...), nil -} diff --git a/vendor/go.mozilla.org/pkcs7/pkcs7.go b/vendor/go.mozilla.org/pkcs7/pkcs7.go deleted file mode 100644 index ccc6cc6d..00000000 --- a/vendor/go.mozilla.org/pkcs7/pkcs7.go +++ /dev/null @@ -1,291 +0,0 @@ -// Package pkcs7 implements parsing and generation of some PKCS#7 structures. -package pkcs7 - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "sort" - - _ "crypto/sha1" // for crypto.SHA1 -) - -// PKCS7 Represents a PKCS7 structure -type PKCS7 struct { - Content []byte - Certificates []*x509.Certificate - CRLs []pkix.CertificateList - Signers []signerInfo - raw interface{} -} - -type contentInfo struct { - ContentType asn1.ObjectIdentifier - Content asn1.RawValue `asn1:"explicit,optional,tag:0"` -} - -// ErrUnsupportedContentType is returned when a PKCS7 content is not supported. -// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), -// and Enveloped Data are supported (1.2.840.113549.1.7.3) -var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") - -type unsignedData []byte - -var ( - // Signed Data OIDs - OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1} - OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2} - OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3} - OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6} - OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3} - OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4} - OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5} - - // Digest Algorithms - OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} - OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} - OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} - OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} - - OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} - OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} - - OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} - OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} - OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} - OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} - - // Signature Algorithms - OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} - OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} - - OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} - OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} - OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} - - // Encryption Algorithms - OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} - OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} - OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} - OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} -) - -func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { - switch { - case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1), - oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1), - oid.Equal(OIDEncryptionAlgorithmRSA): - return crypto.SHA1, nil - case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256): - return crypto.SHA256, nil - case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384): - return crypto.SHA384, nil - case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512): - return crypto.SHA512, nil - } - return crypto.Hash(0), ErrUnsupportedAlgorithm -} - -// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm -// and returns the corresponding OID digest algorithm -func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) { - switch digestAlg { - case x509.SHA1WithRSA, x509.ECDSAWithSHA1: - return OIDDigestAlgorithmSHA1, nil - case x509.SHA256WithRSA, x509.ECDSAWithSHA256: - return OIDDigestAlgorithmSHA256, nil - case x509.SHA384WithRSA, x509.ECDSAWithSHA384: - return OIDDigestAlgorithmSHA384, nil - case x509.SHA512WithRSA, x509.ECDSAWithSHA512: - return OIDDigestAlgorithmSHA512, nil - } - return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") -} - -// getOIDForEncryptionAlgorithm takes the private key type of the signer and -// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm -func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { - switch pkey.(type) { - case *rsa.PrivateKey: - switch { - default: - return OIDEncryptionAlgorithmRSA, nil - case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA): - return OIDEncryptionAlgorithmRSA, nil - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): - return OIDEncryptionAlgorithmRSASHA1, nil - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): - return OIDEncryptionAlgorithmRSASHA256, nil - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): - return OIDEncryptionAlgorithmRSASHA384, nil - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): - return OIDEncryptionAlgorithmRSASHA512, nil - } - case *ecdsa.PrivateKey: - switch { - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): - return OIDDigestAlgorithmECDSASHA1, nil - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): - return OIDDigestAlgorithmECDSASHA256, nil - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): - return OIDDigestAlgorithmECDSASHA384, nil - case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): - return OIDDigestAlgorithmECDSASHA512, nil - } - case *dsa.PrivateKey: - return OIDDigestAlgorithmDSA, nil - } - return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey) - -} - -// Parse decodes a DER encoded PKCS7 package -func Parse(data []byte) (p7 *PKCS7, err error) { - if len(data) == 0 { - return nil, errors.New("pkcs7: input data is empty") - } - var info contentInfo - der, err := ber2der(data) - if err != nil { - return nil, err - } - rest, err := asn1.Unmarshal(der, &info) - if len(rest) > 0 { - err = asn1.SyntaxError{Msg: "trailing data"} - return - } - if err != nil { - return - } - - // fmt.Printf("--> Content Type: %s", info.ContentType) - switch { - case info.ContentType.Equal(OIDSignedData): - return parseSignedData(info.Content.Bytes) - case info.ContentType.Equal(OIDEnvelopedData): - return parseEnvelopedData(info.Content.Bytes) - case info.ContentType.Equal(OIDEncryptedData): - return parseEncryptedData(info.Content.Bytes) - } - return nil, ErrUnsupportedContentType -} - -func parseEnvelopedData(data []byte) (*PKCS7, error) { - var ed envelopedData - if _, err := asn1.Unmarshal(data, &ed); err != nil { - return nil, err - } - return &PKCS7{ - raw: ed, - }, nil -} - -func parseEncryptedData(data []byte) (*PKCS7, error) { - var ed encryptedData - if _, err := asn1.Unmarshal(data, &ed); err != nil { - return nil, err - } - return &PKCS7{ - raw: ed, - }, nil -} - -func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { - if len(raw.Raw) == 0 { - return nil, nil - } - - var val asn1.RawValue - if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil { - return nil, err - } - - return x509.ParseCertificates(val.Bytes) -} - -func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { - return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes) -} - -// Attribute represents a key value pair attribute. Value must be marshalable byte -// `encoding/asn1` -type Attribute struct { - Type asn1.ObjectIdentifier - Value interface{} -} - -type attributes struct { - types []asn1.ObjectIdentifier - values []interface{} -} - -// Add adds the attribute, maintaining insertion order -func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) { - attrs.types = append(attrs.types, attrType) - attrs.values = append(attrs.values, value) -} - -type sortableAttribute struct { - SortKey []byte - Attribute attribute -} - -type attributeSet []sortableAttribute - -func (sa attributeSet) Len() int { - return len(sa) -} - -func (sa attributeSet) Less(i, j int) bool { - return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0 -} - -func (sa attributeSet) Swap(i, j int) { - sa[i], sa[j] = sa[j], sa[i] -} - -func (sa attributeSet) Attributes() []attribute { - attrs := make([]attribute, len(sa)) - for i, attr := range sa { - attrs[i] = attr.Attribute - } - return attrs -} - -func (attrs *attributes) ForMarshalling() ([]attribute, error) { - sortables := make(attributeSet, len(attrs.types)) - for i := range sortables { - attrType := attrs.types[i] - attrValue := attrs.values[i] - asn1Value, err := asn1.Marshal(attrValue) - if err != nil { - return nil, err - } - attr := attribute{ - Type: attrType, - Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag - } - encoded, err := asn1.Marshal(attr) - if err != nil { - return nil, err - } - sortables[i] = sortableAttribute{ - SortKey: encoded, - Attribute: attr, - } - } - sort.Sort(sortables) - return sortables.Attributes(), nil -} diff --git a/vendor/go.mozilla.org/pkcs7/sign.go b/vendor/go.mozilla.org/pkcs7/sign.go deleted file mode 100644 index addd7638..00000000 --- a/vendor/go.mozilla.org/pkcs7/sign.go +++ /dev/null @@ -1,429 +0,0 @@ -package pkcs7 - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "math/big" - "time" -) - -// SignedData is an opaque data structure for creating signed data payloads -type SignedData struct { - sd signedData - certs []*x509.Certificate - data, messageDigest []byte - digestOid asn1.ObjectIdentifier - encryptionOid asn1.ObjectIdentifier -} - -// NewSignedData takes data and initializes a PKCS7 SignedData struct that is -// ready to be signed via AddSigner. The digest algorithm is set to SHA1 by default -// and can be changed by calling SetDigestAlgorithm. -func NewSignedData(data []byte) (*SignedData, error) { - content, err := asn1.Marshal(data) - if err != nil { - return nil, err - } - ci := contentInfo{ - ContentType: OIDData, - Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, - } - sd := signedData{ - ContentInfo: ci, - Version: 1, - } - return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA1}, nil -} - -// SignerInfoConfig are optional values to include when adding a signer -type SignerInfoConfig struct { - ExtraSignedAttributes []Attribute - ExtraUnsignedAttributes []Attribute -} - -type signedData struct { - Version int `asn1:"default:1"` - DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` - ContentInfo contentInfo - Certificates rawCertificates `asn1:"optional,tag:0"` - CRLs []pkix.CertificateList `asn1:"optional,tag:1"` - SignerInfos []signerInfo `asn1:"set"` -} - -type signerInfo struct { - Version int `asn1:"default:1"` - IssuerAndSerialNumber issuerAndSerial - DigestAlgorithm pkix.AlgorithmIdentifier - AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` - DigestEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedDigest []byte - UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` -} - -type attribute struct { - Type asn1.ObjectIdentifier - Value asn1.RawValue `asn1:"set"` -} - -func marshalAttributes(attrs []attribute) ([]byte, error) { - encodedAttributes, err := asn1.Marshal(struct { - A []attribute `asn1:"set"` - }{A: attrs}) - if err != nil { - return nil, err - } - - // Remove the leading sequence octets - var raw asn1.RawValue - asn1.Unmarshal(encodedAttributes, &raw) - return raw.Bytes, nil -} - -type rawCertificates struct { - Raw asn1.RawContent -} - -type issuerAndSerial struct { - IssuerName asn1.RawValue - SerialNumber *big.Int -} - -// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. -// -// This should be called before adding signers -func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { - sd.digestOid = d -} - -// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. -// -// This should be called before adding signers -func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { - sd.encryptionOid = d -} - -// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. -func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { - var parents []*x509.Certificate - return sd.AddSignerChain(ee, pkey, parents, config) -} - -// AddSignerChain signs attributes about the content and adds certificates -// and signers infos to the Signed Data. The certificate and private key -// of the end-entity signer are used to issue the signature, and any -// parent of that end-entity that need to be added to the list of -// certifications can be specified in the parents slice. -// -// The signature algorithm used to hash the data is the one of the end-entity -// certificate. -func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error { -// Following RFC 2315, 9.2 SignerInfo type, the distinguished name of -// the issuer of the end-entity signer is stored in the issuerAndSerialNumber -// section of the SignedData.SignerInfo, alongside the serial number of -// the end-entity. - var ias issuerAndSerial - ias.SerialNumber = ee.SerialNumber - if len(parents) == 0 { - // no parent, the issuer is the end-entity cert itself - ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} - } else { - err := verifyPartialChain(ee, parents) - if err != nil { - return err - } - // the first parent is the issuer - ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} - } - sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, - pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, - ) - hash, err := getHashForOID(sd.digestOid) - if err != nil { - return err - } - h := hash.New() - h.Write(sd.data) - sd.messageDigest = h.Sum(nil) - encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid) - if err != nil { - return err - } - attrs := &attributes{} - attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) - attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) - attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) - for _, attr := range config.ExtraSignedAttributes { - attrs.Add(attr.Type, attr.Value) - } - finalAttrs, err := attrs.ForMarshalling() - if err != nil { - return err - } - unsignedAttrs := &attributes{} - for _, attr := range config.ExtraUnsignedAttributes { - unsignedAttrs.Add(attr.Type, attr.Value) - } - finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() - if err != nil { - return err - } - // create signature of signed attributes - signature, err := signAttributes(finalAttrs, pkey, hash) - if err != nil { - return err - } - signer := signerInfo{ - AuthenticatedAttributes: finalAttrs, - UnauthenticatedAttributes: finalUnsignedAttrs, - DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, - DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, - IssuerAndSerialNumber: ias, - EncryptedDigest: signature, - Version: 1, - } - sd.certs = append(sd.certs, ee) - if len(parents) > 0 { - sd.certs = append(sd.certs, parents...) - } - sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) - return nil -} - -// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. -// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone -// and does not include any signed attributes like timestamp and so on. -// -// This function is needed to sign old Android APKs, something you probably -// shouldn't do unless you're maintaining backward compatibility for old -// applications. -func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { - var signature []byte - sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) - hash, err := getHashForOID(sd.digestOid) - if err != nil { - return err - } - h := hash.New() - h.Write(sd.data) - sd.messageDigest = h.Sum(nil) - switch pkey := pkey.(type) { - case *dsa.PrivateKey: - // dsa doesn't implement crypto.Signer so we make a special case - // https://github.com/golang/go/issues/27889 - r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) - if err != nil { - return err - } - signature, err = asn1.Marshal(dsaSignature{r, s}) - if err != nil { - return err - } - default: - key, ok := pkey.(crypto.Signer) - if !ok { - return errors.New("pkcs7: private key does not implement crypto.Signer") - } - signature, err = key.Sign(rand.Reader, sd.messageDigest, hash) - if err != nil { - return err - } - } - var ias issuerAndSerial - ias.SerialNumber = ee.SerialNumber - // no parent, the issue is the end-entity cert itself - ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} - if sd.encryptionOid == nil { - // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, - // infer it from the digest algorithm - sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid) - } - if err != nil { - return err - } - signer := signerInfo{ - DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, - DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, - IssuerAndSerialNumber: ias, - EncryptedDigest: signature, - Version: 1, - } - // create signature of signed attributes - sd.certs = append(sd.certs, ee) - sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) - return nil -} - -func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { - unsignedAttrs := &attributes{} - for _, attr := range extraUnsignedAttrs { - unsignedAttrs.Add(attr.Type, attr.Value) - } - finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() - if err != nil { - return err - } - - si.UnauthenticatedAttributes = finalUnsignedAttrs - - return nil -} - -// AddCertificate adds the certificate to the payload. Useful for parent certificates -func (sd *SignedData) AddCertificate(cert *x509.Certificate) { - sd.certs = append(sd.certs, cert) -} - -// Detach removes content from the signed data struct to make it a detached signature. -// This must be called right before Finish() -func (sd *SignedData) Detach() { - sd.sd.ContentInfo = contentInfo{ContentType: OIDData} -} - -// GetSignedData returns the private Signed Data -func (sd *SignedData) GetSignedData() *signedData { - return &sd.sd -} - -// Finish marshals the content and its signers -func (sd *SignedData) Finish() ([]byte, error) { - sd.sd.Certificates = marshalCertificates(sd.certs) - inner, err := asn1.Marshal(sd.sd) - if err != nil { - return nil, err - } - outer := contentInfo{ - ContentType: OIDSignedData, - Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, - } - return asn1.Marshal(outer) -} - -// RemoveAuthenticatedAttributes removes authenticated attributes from signedData -// similar to OpenSSL's PKCS7_NOATTR or -noattr flags -func (sd *SignedData) RemoveAuthenticatedAttributes() { - for i := range sd.sd.SignerInfos { - sd.sd.SignerInfos[i].AuthenticatedAttributes = nil - } -} - -// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData -func (sd *SignedData) RemoveUnauthenticatedAttributes() { - for i := range sd.sd.SignerInfos { - sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil - } -} - -// verifyPartialChain checks that a given cert is issued by the first parent in the list, -// then continue down the path. It doesn't require the last parent to be a root CA, -// or to be trusted in any truststore. It simply verifies that the chain provided, albeit -// partial, makes sense. -func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { - if len(parents) == 0 { - return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) - } - err := cert.CheckSignatureFrom(parents[0]) - if err != nil { - return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) - } - if len(parents) == 1 { - // there is no more parent to check, return - return nil - } - return verifyPartialChain(parents[0], parents[1:]) -} - -func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { - var ias issuerAndSerial - // The issuer RDNSequence has to match exactly the sequence in the certificate - // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence - ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} - ias.SerialNumber = cert.SerialNumber - - return ias, nil -} - -// signs the DER encoded form of the attributes with the private key -func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) { - attrBytes, err := marshalAttributes(attrs) - if err != nil { - return nil, err - } - h := digestAlg.New() - h.Write(attrBytes) - hash := h.Sum(nil) - - // dsa doesn't implement crypto.Signer so we make a special case - // https://github.com/golang/go/issues/27889 - switch pkey := pkey.(type) { - case *dsa.PrivateKey: - r, s, err := dsa.Sign(rand.Reader, pkey, hash) - if err != nil { - return nil, err - } - return asn1.Marshal(dsaSignature{r, s}) - } - - key, ok := pkey.(crypto.Signer) - if !ok { - return nil, errors.New("pkcs7: private key does not implement crypto.Signer") - } - return key.Sign(rand.Reader, hash, digestAlg) -} - -type dsaSignature struct { - R, S *big.Int -} - -// concats and wraps the certificates in the RawValue structure -func marshalCertificates(certs []*x509.Certificate) rawCertificates { - var buf bytes.Buffer - for _, cert := range certs { - buf.Write(cert.Raw) - } - rawCerts, _ := marshalCertificateBytes(buf.Bytes()) - return rawCerts -} - -// Even though, the tag & length are stripped out during marshalling the -// RawContent, we have to encode it into the RawContent. If its missing, -// then `asn1.Marshal()` will strip out the certificate wrapper instead. -func marshalCertificateBytes(certs []byte) (rawCertificates, error) { - var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} - b, err := asn1.Marshal(val) - if err != nil { - return rawCertificates{}, err - } - return rawCertificates{Raw: b}, nil -} - -// DegenerateCertificate creates a signed data structure containing only the -// provided certificate or certificate chain. -func DegenerateCertificate(cert []byte) ([]byte, error) { - rawCert, err := marshalCertificateBytes(cert) - if err != nil { - return nil, err - } - emptyContent := contentInfo{ContentType: OIDData} - sd := signedData{ - Version: 1, - ContentInfo: emptyContent, - Certificates: rawCert, - CRLs: []pkix.CertificateList{}, - } - content, err := asn1.Marshal(sd) - if err != nil { - return nil, err - } - signedContent := contentInfo{ - ContentType: OIDSignedData, - Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, - } - return asn1.Marshal(signedContent) -} diff --git a/vendor/go.mozilla.org/pkcs7/verify.go b/vendor/go.mozilla.org/pkcs7/verify.go deleted file mode 100644 index c8ead236..00000000 --- a/vendor/go.mozilla.org/pkcs7/verify.go +++ /dev/null @@ -1,264 +0,0 @@ -package pkcs7 - -import ( - "crypto/subtle" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "time" -) - -// Verify is a wrapper around VerifyWithChain() that initializes an empty -// trust store, effectively disabling certificate verification when validating -// a signature. -func (p7 *PKCS7) Verify() (err error) { - return p7.VerifyWithChain(nil) -} - -// VerifyWithChain checks the signatures of a PKCS7 object. -// If truststore is not nil, it also verifies the chain of trust of the end-entity -// signer cert to one of the root in the truststore. -func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { - if len(p7.Signers) == 0 { - return errors.New("pkcs7: Message has no signers") - } - for _, signer := range p7.Signers { - if err := verifySignature(p7, signer, truststore); err != nil { - return err - } - } - return nil -} - -func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (err error) { - signedData := p7.Content - ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) - if ee == nil { - return errors.New("pkcs7: No certificate for signer") - } - signingTime := time.Now().UTC() - if len(signer.AuthenticatedAttributes) > 0 { - // TODO(fullsailor): First check the content type match - var digest []byte - err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) - if err != nil { - return err - } - hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) - if err != nil { - return err - } - h := hash.New() - h.Write(p7.Content) - computed := h.Sum(nil) - if subtle.ConstantTimeCompare(digest, computed) != 1 { - return &MessageDigestMismatchError{ - ExpectedDigest: digest, - ActualDigest: computed, - } - } - signedData, err = marshalAttributes(signer.AuthenticatedAttributes) - if err != nil { - return err - } - err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) - if err == nil { - // signing time found, performing validity check - if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { - return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", - signingTime.Format(time.RFC3339), - ee.NotBefore.Format(time.RFC3339), - ee.NotBefore.Format(time.RFC3339)) - } - } - } - if truststore != nil { - _, err = verifyCertChain(ee, p7.Certificates, truststore, signingTime) - if err != nil { - return err - } - } - sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) - if err != nil { - return err - } - return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) -} - -// GetOnlySigner returns an x509.Certificate for the first signer of the signed -// data payload. If there are more or less than one signer, nil is returned -func (p7 *PKCS7) GetOnlySigner() *x509.Certificate { - if len(p7.Signers) != 1 { - return nil - } - signer := p7.Signers[0] - return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) -} - -// UnmarshalSignedAttribute decodes a single attribute from the signer info -func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error { - sd, ok := p7.raw.(signedData) - if !ok { - return errors.New("pkcs7: payload is not signedData content") - } - if len(sd.SignerInfos) < 1 { - return errors.New("pkcs7: payload has no signers") - } - attributes := sd.SignerInfos[0].AuthenticatedAttributes - return unmarshalAttribute(attributes, attributeType, out) -} - -func parseSignedData(data []byte) (*PKCS7, error) { - var sd signedData - asn1.Unmarshal(data, &sd) - certs, err := sd.Certificates.Parse() - if err != nil { - return nil, err - } - // fmt.Printf("--> Signed Data Version %d\n", sd.Version) - - var compound asn1.RawValue - var content unsignedData - - // The Content.Bytes maybe empty on PKI responses. - if len(sd.ContentInfo.Content.Bytes) > 0 { - if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil { - return nil, err - } - } - // Compound octet string - if compound.IsCompound { - if compound.Tag == 4 { - if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil { - return nil, err - } - } else { - content = compound.Bytes - } - } else { - // assuming this is tag 04 - content = compound.Bytes - } - return &PKCS7{ - Content: content, - Certificates: certs, - CRLs: sd.CRLs, - Signers: sd.SignerInfos, - raw: sd}, nil -} - -// verifyCertChain takes an end-entity certs, a list of potential intermediates and a -// truststore, and built all potential chains between the EE and a trusted root. -// -// When verifying chains that may have expired, currentTime can be set to a past date -// to allow the verification to pass. If unset, currentTime is set to the current UTC time. -func verifyCertChain(ee *x509.Certificate, certs []*x509.Certificate, truststore *x509.CertPool, currentTime time.Time) (chains [][]*x509.Certificate, err error) { - intermediates := x509.NewCertPool() - for _, intermediate := range certs { - intermediates.AddCert(intermediate) - } - verifyOptions := x509.VerifyOptions{ - Roots: truststore, - Intermediates: intermediates, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, - CurrentTime: currentTime, - } - chains, err = ee.Verify(verifyOptions) - if err != nil { - return chains, fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) - } - return -} - -// MessageDigestMismatchError is returned when the signer data digest does not -// match the computed digest for the contained content -type MessageDigestMismatchError struct { - ExpectedDigest []byte - ActualDigest []byte -} - -func (err *MessageDigestMismatchError) Error() string { - return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest) -} - -func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) { - switch { - case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1): - return x509.ECDSAWithSHA1, nil - case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256): - return x509.ECDSAWithSHA256, nil - case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384): - return x509.ECDSAWithSHA384, nil - case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512): - return x509.ECDSAWithSHA512, nil - case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA), - digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1), - digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256), - digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), - digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): - switch { - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): - return x509.SHA1WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): - return x509.SHA256WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): - return x509.SHA384WithRSA, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): - return x509.SHA512WithRSA, nil - default: - return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", - digest.Algorithm.String(), digestEncryption.Algorithm.String()) - } - case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA), - digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1): - switch { - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): - return x509.DSAWithSHA1, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): - return x509.DSAWithSHA256, nil - default: - return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", - digest.Algorithm.String(), digestEncryption.Algorithm.String()) - } - case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256), - digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384), - digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521): - switch { - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): - return x509.ECDSAWithSHA1, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): - return x509.ECDSAWithSHA256, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): - return x509.ECDSAWithSHA384, nil - case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): - return x509.ECDSAWithSHA512, nil - default: - return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", - digest.Algorithm.String(), digestEncryption.Algorithm.String()) - } - default: - return -1, fmt.Errorf("pkcs7: unsupported algorithm %q", - digestEncryption.Algorithm.String()) - } -} - -func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate { - for _, cert := range certs { - if isCertMatchForIssuerAndSerial(cert, ias) { - return cert - } - } - return nil -} - -func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error { - for _, attr := range attrs { - if attr.Type.Equal(attributeType) { - _, err := asn1.Unmarshal(attr.Value.Bytes, out) - return err - } - } - return errors.New("pkcs7: attribute type not in attributes") -} diff --git a/vendor/go.step.sm/cli-utils/LICENSE b/vendor/go.step.sm/cli-utils/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.step.sm/cli-utils/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.step.sm/cli-utils/command/command.go b/vendor/go.step.sm/cli-utils/command/command.go deleted file mode 100644 index 41d7b4c4..00000000 --- a/vendor/go.step.sm/cli-utils/command/command.go +++ /dev/null @@ -1,212 +0,0 @@ -package command - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "strings" - - "github.com/pkg/errors" - "github.com/urfave/cli" - "go.step.sm/cli-utils/config" - "go.step.sm/cli-utils/usage" -) - -// IgnoreEnvVar is a value added to a flag EnvVar to avoid the use of -// environment variables or configuration files. -const IgnoreEnvVar = "STEP_IGNORE_ENV_VAR" - -var cmds []cli.Command -var currentContext *cli.Context - -func init() { - os.Unsetenv(IgnoreEnvVar) - cmds = []cli.Command{ - usage.HelpCommand(), - } -} - -// Register adds the given command to the global list of commands. -// It sets recursively the command Flags environment variables. -func Register(c cli.Command) { - setEnvVar(&c) - cmds = append(cmds, c) -} - -// Retrieve returns all commands -func Retrieve() []cli.Command { - return cmds -} - -// ActionFunc returns a cli.ActionFunc that stores the context. -func ActionFunc(fn cli.ActionFunc) cli.ActionFunc { - return func(ctx *cli.Context) error { - currentContext = ctx - return fn(ctx) - } -} - -// IsForce returns if the force flag was passed -func IsForce() bool { - return currentContext != nil && currentContext.Bool("force") -} - -// getConfigVars load the defaults.json file and sets the flags if they are not -// already set or the EnvVar is set to IgnoreEnvVar. -// -// TODO(mariano): right now it only supports parameters at first level. -func getConfigVars(ctx *cli.Context) error { - configFile := ctx.GlobalString("config") - if configFile == "" { - configFile = filepath.Join(config.StepPath(), "config", "defaults.json") - } - - b, err := ioutil.ReadFile(configFile) - if err != nil { - return nil - } - - m := make(map[string]interface{}) - if err := json.Unmarshal(b, &m); err != nil { - return errors.Wrapf(err, "error parsing %s", configFile) - } - - flags := make(map[string]cli.Flag) - for _, f := range ctx.Command.Flags { - name := strings.Split(f.GetName(), ",")[0] - flags[name] = f - } - - for _, name := range ctx.FlagNames() { - if ctx.IsSet(name) { - continue - } - - // Skip if EnvVar == IgnoreEnvVar - if f, ok := flags[name]; ok { - if getFlagEnvVar(f) == IgnoreEnvVar { - continue - } - } - - if v, ok := m[name]; ok { - ctx.Set(name, fmt.Sprintf("%v", v)) - } - } - - return nil -} - -// getEnvVar generates the environment variable for the given flag name. -func getEnvVar(name string) string { - parts := strings.Split(name, ",") - name = strings.TrimSpace(parts[0]) - name = strings.Replace(name, "-", "_", -1) - return "STEP_" + strings.ToUpper(name) -} - -// getFlagEnvVar returns the value of the EnvVar field of a flag. -func getFlagEnvVar(f cli.Flag) string { - v := reflect.ValueOf(f) - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if v.Kind() == reflect.Struct { - envVar := v.FieldByName("EnvVar") - if envVar.IsValid() { - return envVar.String() - } - } - return "" -} - -// setEnvVar sets the the EnvVar element to each flag recursively. -func setEnvVar(c *cli.Command) { - if c == nil { - return - } - - // Enable getting the flags from a json file - if c.Before == nil && c.Action != nil { - c.Before = getConfigVars - } - - // Enable getting the flags from environment variables - for i := range c.Flags { - envVar := getEnvVar(c.Flags[i].GetName()) - switch f := c.Flags[i].(type) { - case cli.BoolFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.BoolTFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.DurationFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.Float64Flag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.GenericFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.Int64Flag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.IntFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.IntSliceFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.Int64SliceFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.StringFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.StringSliceFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.Uint64Flag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - case cli.UintFlag: - if f.EnvVar == "" { - f.EnvVar = envVar - c.Flags[i] = f - } - } - } - - for i := range c.Subcommands { - setEnvVar(&c.Subcommands[i]) - } -} diff --git a/vendor/go.step.sm/cli-utils/config/config.go b/vendor/go.step.sm/cli-utils/config/config.go deleted file mode 100644 index 4ee3e544..00000000 --- a/vendor/go.step.sm/cli-utils/config/config.go +++ /dev/null @@ -1,136 +0,0 @@ -package config - -import ( - "fmt" - "log" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" -) - -// version and buildTime are filled in during build by the Makefile -var ( - name = "Smallstep CLI" - buildTime = "N/A" - commit = "N/A" -) - -// StepPathEnv defines the name of the environment variable that can overwrite -// the default configuration path. -const StepPathEnv = "STEPPATH" - -// HomeEnv defines the name of the environment variable that can overwrite the -// default home directory. -const HomeEnv = "HOME" - -// stepPath will be populated in init() with the proper STEPPATH. -var stepPath string - -// homePath will be populated in init() with the proper HOME. -var homePath string - -// StepPath returns the path for the step configuration directory, this is -// defined by the environment variable STEPPATH or if this is not set it will -// default to '$HOME/.step'. -func StepPath() string { - return stepPath -} - -// Home returns the user home directory using the environment variable HOME or -// the os/user package. -func Home() string { - return homePath -} - -// StepAbs returns the given path relative to the StepPath if it's not an -// absolute path, relative to the home directory using the special string "~/", -// or relative to the working directory using "./" -// -// Relative paths like 'certs/root_ca.crt' will be converted to -// '$STEPPATH/certs/root_ca.crt', but paths like './certs/root_ca.crt' will be -// relative to the current directory. Home relative paths like -// ~/certs/root_ca.crt will be converted to '$HOME/certs/root_ca.crt'. And -// absolute paths like '/certs/root_ca.crt' will remain the same. -func StepAbs(path string) string { - if filepath.IsAbs(path) { - return path - } - // Windows accept both \ and / - slashed := filepath.ToSlash(path) - switch { - case strings.HasPrefix(slashed, "~/"): - return filepath.Join(homePath, path[2:]) - case strings.HasPrefix(slashed, "./"), strings.HasPrefix(slashed, "../"): - if abs, err := filepath.Abs(path); err == nil { - return abs - } - return path - default: - return filepath.Join(stepPath, path) - } -} - -func init() { - l := log.New(os.Stderr, "", 0) - - // Get home path from environment or from the user object. - homePath = os.Getenv(HomeEnv) - if homePath == "" { - usr, err := user.Current() - if err == nil && usr.HomeDir != "" { - homePath = usr.HomeDir - } else { - l.Fatalf("Error obtaining home directory, please define environment variable %s.", HomeEnv) - } - } - - // Get step path from environment or relative to home. - stepPath = os.Getenv(StepPathEnv) - if stepPath == "" { - stepPath = filepath.Join(homePath, ".step") - } - - // Check for presence or attempt to create it if necessary. - // - // Some environments (e.g. third party docker images) might fail creating - // the directory, so this should not panic if it can't. - if fi, err := os.Stat(stepPath); err != nil { - os.MkdirAll(stepPath, 0700) - } else if !fi.IsDir() { - l.Fatalf("File '%s' is not a directory.", stepPath) - } - // cleanup - homePath = filepath.Clean(homePath) - stepPath = filepath.Clean(stepPath) -} - -// Set updates the Version and ReleaseDate -func Set(n, v, t string) { - name = n - buildTime = t - commit = v -} - -// Version returns the current version of the binary -func Version() string { - out := commit - if commit == "N/A" { - out = "0000000-dev" - } - - return fmt.Sprintf("%s/%s (%s/%s)", - name, out, runtime.GOOS, runtime.GOARCH) -} - -// ReleaseDate returns the time of when the binary was built -func ReleaseDate() string { - out := buildTime - if buildTime == "N/A" { - out = time.Now().UTC().Format("2006-01-02 15:04 MST") - } - - return out -} diff --git a/vendor/go.step.sm/cli-utils/errs/errs.go b/vendor/go.step.sm/cli-utils/errs/errs.go deleted file mode 100644 index fa4be767..00000000 --- a/vendor/go.step.sm/cli-utils/errs/errs.go +++ /dev/null @@ -1,327 +0,0 @@ -package errs - -import ( - "fmt" - "os" - "strings" - - "github.com/pkg/errors" - "github.com/urfave/cli" -) - -// NewError returns a new Error for the given format and arguments -func NewError(format string, args ...interface{}) error { - return errors.Errorf(format, args...) -} - -// NewExitError returns an error that the urfave/cli package will handle and -// will show the given error and exit with the given code. -func NewExitError(err error, exitCode int) error { - return cli.NewExitError(err, exitCode) -} - -// Wrap returns a new error wrapped by the given error with the given message. -// If the given error implements the errors.Cause interface, the base error is -// used. If the given error is wrapped by a package name, the error wrapped -// will be the string after the last colon. -func Wrap(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - cause := errors.Cause(err) - if cause == err { - str := err.Error() - if i := strings.LastIndexByte(str, ':'); i >= 0 { - str = strings.TrimSpace(str[i:]) - return errors.Wrapf(fmt.Errorf(str), format, args...) - } - } - return errors.Wrapf(cause, format, args...) -} - -// InsecureCommand returns an error with a message saying that the current -// command requires the insecure flag. -func InsecureCommand(ctx *cli.Context) error { - return errors.Errorf("'%s %s' requires the '--insecure' flag", ctx.App.Name, ctx.Command.Name) -} - -// EqualArguments returns an error saying that the given positional arguments -// cannot be equal. -func EqualArguments(ctx *cli.Context, arg1, arg2 string) error { - return errors.Errorf("positional arguments <%s> and <%s> cannot be equal in '%s'", arg1, arg2, usage(ctx)) -} - -// MissingArguments returns an error with a missing arguments message for the -// given positional argument names. -func MissingArguments(ctx *cli.Context, argNames ...string) error { - switch len(argNames) { - case 0: - return errors.Errorf("missing positional arguments in '%s'", usage(ctx)) - case 1: - return errors.Errorf("missing positional argument <%s> in '%s'", argNames[0], usage(ctx)) - default: - args := make([]string, len(argNames)) - for i, name := range argNames { - args[i] = "<" + name + ">" - } - return errors.Errorf("missing positional argument %s in '%s'", strings.Join(args, " "), usage(ctx)) - } -} - -// NumberOfArguments returns nil if the number of positional arguments is -// equal to the required one. It will return an appropriate error if they are -// not. -func NumberOfArguments(ctx *cli.Context, required int) error { - n := ctx.NArg() - switch { - case n < required: - return TooFewArguments(ctx) - case n > required: - return TooManyArguments(ctx) - default: - return nil - } -} - -// MinMaxNumberOfArguments returns nil if the number of positional arguments -// between the min/max range. It will return an appropriate error if they are -// not. -func MinMaxNumberOfArguments(ctx *cli.Context, min, max int) error { - n := ctx.NArg() - switch { - case n < min: - return TooFewArguments(ctx) - case n > max: - return TooManyArguments(ctx) - default: - return nil - } -} - -// TooFewArguments returns an error with a few arguments were provided message. -func TooFewArguments(ctx *cli.Context) error { - return errors.Errorf("not enough positional arguments were provided in '%s'", usage(ctx)) -} - -// TooManyArguments returns an error with a too many arguments were provided -// message. -func TooManyArguments(ctx *cli.Context) error { - return errors.Errorf("too many positional arguments were provided in '%s'", usage(ctx)) -} - -// InsecureArgument returns an error with the given argument requiring the -// --insecure flag. -func InsecureArgument(ctx *cli.Context, name string) error { - return errors.Errorf("positional argument <%s> requires the '--insecure' flag", name) -} - -// FlagValueInsecure returns an error with the given flag and value requiring -// the --insecure flag. -func FlagValueInsecure(ctx *cli.Context, flag string, value string) error { - return errors.Errorf("flag '--%s %s' requires the '--insecure' flag", flag, value) -} - -// InvalidFlagValue returns an error with the given value being missing or -// invalid for the given flag. Optionally it lists the given formatted options -// at the end. -func InvalidFlagValue(ctx *cli.Context, flag string, value string, options string) error { - var format string - if len(value) == 0 { - format = fmt.Sprintf("missing value for flag '--%s'", flag) - } else { - format = fmt.Sprintf("invalid value '%s' for flag '--%s'", value, flag) - } - - if len(options) == 0 { - return errors.New(format) - } - - return errors.New(format + "; options are " + options) -} - -// InvalidFlagValueMsg returns an error with the given value being missing or -// invalid for the given flag. Optionally it returns an error message to aid -// in debugging. -func InvalidFlagValueMsg(ctx *cli.Context, flag string, value string, msg string) error { - var format string - if len(value) == 0 { - format = fmt.Sprintf("missing value for flag '--%s'", flag) - } else { - format = fmt.Sprintf("invalid value '%s' for flag '--%s'", value, flag) - } - - if len(msg) == 0 { - return errors.New(format) - } - - return errors.New(format + "; " + msg) -} - -// IncompatibleFlag returns an error with the flag being incompatible with the -// given value. -func IncompatibleFlag(ctx *cli.Context, flag string, value string) error { - return errors.Errorf("flag '--%s' is incompatible with '%s'", flag, value) -} - -// IncompatibleFlagWithFlag returns an error with the flag being incompatible with the -// given value. -func IncompatibleFlagWithFlag(ctx *cli.Context, flag string, withFlag string) error { - return errors.Errorf("flag '--%s' is incompatible with '--%s'", flag, withFlag) -} - -// IncompatibleFlagValue returns an error with the flag being incompatible with the -// given value. -func IncompatibleFlagValue(ctx *cli.Context, flag, incompatibleWith, - incompatibleWithValue string) error { - return errors.Errorf("flag '--%s' is incompatible with flag '--%s %s'", - flag, incompatibleWith, incompatibleWithValue) -} - -// IncompatibleFlagValues returns an error with the flag being incompatible with the -// given value. -func IncompatibleFlagValues(ctx *cli.Context, flag, value, incompatibleWith, - incompatibleWithValue string) error { - return errors.Errorf("flag '--%s %s' is incompatible with flag '--%s %s'", - flag, value, incompatibleWith, incompatibleWithValue) -} - -// IncompatibleFlagValueWithFlagValue returns an error with the given value -// being missing or invalid for the given flag. Optionally it lists the given -// formatted options at the end. -func IncompatibleFlagValueWithFlagValue(ctx *cli.Context, flag string, value string, - withFlag string, withValue, options string) error { - format := fmt.Sprintf("flag '--%s %s' is incompatible with flag '--%s %s'", - flag, value, withFlag, withValue) - - if len(options) == 0 { - return errors.New(format) - } - - return errors.Errorf("%s\n\n Option(s): --%s %s", format, withFlag, options) -} - -// RequiredFlag returns an error with the required flag message. -func RequiredFlag(ctx *cli.Context, flag string) error { - return errors.Errorf("'%s %s' requires the '--%s' flag", ctx.App.HelpName, - ctx.Command.Name, flag) -} - -// RequiredWithFlag returns an error with the required flag message with another flag. -func RequiredWithFlag(ctx *cli.Context, flag, required string) error { - return errors.Errorf("flag '--%s' requires the '--%s' flag", flag, required) -} - -// RequiredWithFlagValue returns an error with the required flag message. -func RequiredWithFlagValue(ctx *cli.Context, flag, value, required string) error { - return errors.Errorf("'--%s %s' requires the '--%s' flag", flag, value, required) -} - -// RequiredWithProvisionerTypeFlag returns an error with the required flag message. -func RequiredWithProvisionerTypeFlag(ctx *cli.Context, provisionerType, required string) error { - return errors.Errorf("provisioner type '%s' requires the '--%s' flag", provisionerType, required) -} - -// RequiredInsecureFlag returns an error with the given flag requiring the -// insecure flag message. -func RequiredInsecureFlag(ctx *cli.Context, flag string) error { - return errors.Errorf("flag '--%s' requires the '--insecure' flag", flag) -} - -// RequiredSubtleFlag returns an error with the given flag requiring the -// subtle flag message.. -func RequiredSubtleFlag(ctx *cli.Context, flag string) error { - return errors.Errorf("flag '--%s' requires the '--subtle' flag", flag) -} - -// RequiredUnlessInsecureFlag returns an error with the required flag message unless -// the insecure flag is used. -func RequiredUnlessInsecureFlag(ctx *cli.Context, flag string) error { - return errors.Errorf("flag '--%s' is required unless the '--insecure' flag is provided", flag) -} - -// RequiredUnlessFlag returns an error with the required flag message unless -// the specified flag is used. -func RequiredUnlessFlag(ctx *cli.Context, flag, unlessFlag string) error { - return errors.Errorf("flag '--%s' is required unless the '--%s' flag is provided", flag, unlessFlag) -} - -// RequiredUnlessSubtleFlag returns an error with the required flag message unless -// the subtle flag is used. -func RequiredUnlessSubtleFlag(ctx *cli.Context, flag string) error { - return errors.Errorf("flag '--%s' is required unless the '--subtle' flag is provided", flag) -} - -// RequiredOrFlag returns an error with a list of flags being required messages. -func RequiredOrFlag(ctx *cli.Context, flags ...string) error { - params := make([]string, len(flags)) - for i, flag := range flags { - params[i] = "--" + flag - } - return errors.Errorf("one of flag %s is required", strings.Join(params, " or ")) -} - -// RequiredWithOrFlag returns an error with a list of flags at least one of which -// is required in conjunction with the last flag in the list. -func RequiredWithOrFlag(ctx *cli.Context, withFlag string, flags ...string) error { - params := make([]string, len(flags)) - for i := 0; i < len(flags); i++ { - params[i] = "--" + flags[i] - } - return errors.Errorf("one of flag %s is required with flag --%s", strings.Join(params, " or "), withFlag) -} - -// MinSizeFlag returns an error with a greater or equal message message for -// the given flag and size. -func MinSizeFlag(ctx *cli.Context, flag string, size string) error { - return errors.Errorf("flag '--%s' must be greater or equal than %s", flag, size) -} - -// MinSizeInsecureFlag returns an error with a requiring --insecure flag -// message with the given flag an size. -func MinSizeInsecureFlag(ctx *cli.Context, flag, size string) error { - return errors.Errorf("flag '--%s' requires at least %s unless '--insecure' flag is provided", flag, size) -} - -// MutuallyExclusiveFlags returns an error with mutually exclusive message for -// the given flags. -func MutuallyExclusiveFlags(ctx *cli.Context, flag1, flag2 string) error { - return errors.Errorf("flag '--%s' and flag '--%s' are mutually exclusive", flag1, flag2) -} - -// UnsupportedFlag returns an error with a message saying that the given flag is -// not yet supported. -func UnsupportedFlag(ctx *cli.Context, flag string) error { - return errors.Errorf("flag '--%s' is not yet supported", flag) -} - -// usage returns the command usage text if set or a default usage string. -func usage(ctx *cli.Context) string { - if len(ctx.Command.UsageText) == 0 { - return fmt.Sprintf("%s %s [command options]", ctx.App.HelpName, ctx.Command.Name) - } - // keep just the first line and remove markdown - lines := strings.Split(ctx.Command.UsageText, "\n") - return strings.Replace(lines[0], "**", "", -1) -} - -// FileError is a wrapper for errors of the os package. -func FileError(err error, filename string) error { - if err == nil { - return nil - } - switch e := err.(type) { - case *os.PathError: - return errors.Errorf("%s %s failed: %v", e.Op, e.Path, e.Err) - case *os.LinkError: - return errors.Errorf("%s %s %s failed: %v", e.Op, e.Old, e.New, e.Err) - case *os.SyscallError: - return errors.Errorf("%s failed: %v", e.Syscall, e.Err) - default: - return Wrap(err, "unexpected error on %s", filename) - } -} - -// FriendlyError is an interface for returning friendly error messages to the user. -type FriendlyError interface { - Message() string -} diff --git a/vendor/go.step.sm/cli-utils/fileutil/file.go b/vendor/go.step.sm/cli-utils/fileutil/file.go deleted file mode 100644 index 06fb8edb..00000000 --- a/vendor/go.step.sm/cli-utils/fileutil/file.go +++ /dev/null @@ -1,116 +0,0 @@ -package fileutil - -import ( - "os" - - "github.com/pkg/errors" -) - -// File represents a wrapper on os.File that supports read, write, seek and -// close methods, but they won't be called if an error occurred before. -type File struct { - File *os.File - err error -} - -// OpenFile calls os.OpenFile method and returns the os.File wrapped. -func OpenFile(name string, flag int, perm os.FileMode) (*File, error) { - f, err := os.OpenFile(name, flag, perm) - if err != nil { - return nil, FileError(err, name) - } - return &File{ - File: f, - }, nil -} - -// error writes f.err if it's not set and returns f.err. -func (f *File) error(err error) error { - if f.err == nil && err != nil { - f.err = FileError(err, f.File.Name()) - } - return f.err -} - -// Close wraps `func (*os.File) Close` it will always call Close but the error -// return will be the first error thrown if any. -func (f *File) Close() error { - return f.error(f.File.Close()) -} - -// Read wraps `func (*os.File) Read` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) Read(b []byte) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.Read(b) - return n, f.error(err) -} - -// ReadAt wraps `func (*os.File) ReadAt` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.ReadAt(b, off) - return n, f.error(err) -} - -// Seek wraps `func (*os.File) Seek` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) Seek(offset int64, whence int) (ret int64, err error) { - if f.err != nil { - return 0, f.err - } - ret, err = f.File.Seek(offset, whence) - return ret, f.error(err) -} - -// Write wraps `func (*os.File) Write` but doesn't perform the operation if a -// previous error was thrown. -func (f *File) Write(b []byte) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.Write(b) - return n, f.error(err) -} - -// WriteAt wraps `func (*os.File) WriteAt` but doesn't perform the operation if -// a previous error was thrown. -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.WriteAt(b, off) - return n, f.error(err) -} - -// WriteString wraps `func (*os.File) WriteString` but doesn't perform the -// operation if a previous error was thrown. -func (f *File) WriteString(s string) (n int, err error) { - if f.err != nil { - return 0, f.err - } - n, err = f.File.WriteString(s) - return n, f.error(err) -} - -// FileError is a wrapper for errors of the os package. -func FileError(err error, filename string) error { - if err == nil { - return nil - } - switch e := err.(type) { - case *os.PathError: - return errors.Errorf("%s %s failed: %v", e.Op, e.Path, e.Err) - case *os.LinkError: - return errors.Errorf("%s %s %s failed: %v", e.Op, e.Old, e.New, e.Err) - case *os.SyscallError: - return errors.Errorf("%s failed: %v", e.Syscall, e.Err) - default: - return errors.Wrapf(err, "unexpected error on %s", filename) - } -} diff --git a/vendor/go.step.sm/cli-utils/fileutil/write.go b/vendor/go.step.sm/cli-utils/fileutil/write.go deleted file mode 100644 index 18d107cf..00000000 --- a/vendor/go.step.sm/cli-utils/fileutil/write.go +++ /dev/null @@ -1,159 +0,0 @@ -package fileutil - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/pkg/errors" - "go.step.sm/cli-utils/command" - "go.step.sm/cli-utils/ui" -) - -var ( - // ErrFileExists is the error returned if a file exists. - ErrFileExists = errors.New("file exists") - - // ErrIsDir is the error returned if the file is a directory. - ErrIsDir = errors.New("file is a directory") - - // SnippetHeader is the header of a step generated snippet in a - // configuration file. - SnippetHeader = "# autogenerated by step" - - // SnippetFooter is the header of a step generated snippet in a - // configuration file. - SnippetFooter = "# end" -) - -// WriteFile wraps ioutil.WriteFile with a prompt to overwrite a file if -// the file exists. It returns ErrFileExists if the user picks to not overwrite -// the file. If force is set to true, the prompt will not be presented and the -// file if exists will be overwritten. -func WriteFile(filename string, data []byte, perm os.FileMode) error { - if command.IsForce() { - return ioutil.WriteFile(filename, data, perm) - } - - st, err := os.Stat(filename) - if err != nil { - if os.IsNotExist(err) { - return ioutil.WriteFile(filename, data, perm) - } - return errors.Wrapf(err, "error reading information for %s", filename) - } - - if st.IsDir() { - return ErrIsDir - } - - str, err := ui.Prompt(fmt.Sprintf("Would you like to overwrite %s [y/n]", filename), ui.WithValidateYesNo()) - if err != nil { - return err - } - switch strings.ToLower(strings.TrimSpace(str)) { - case "y", "yes": - case "n", "no": - return ErrFileExists - } - - return ioutil.WriteFile(filename, data, perm) -} - -// AppendNewLine appends the given data at the end of the file. If the last -// character of the file does not contain an LF it prepends it to the data. -func AppendNewLine(filename string, data []byte, perm os.FileMode) error { - f, err := OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, perm) - if err != nil { - return err - } - // Read last character - if st, err := f.File.Stat(); err == nil && st.Size() != 0 { - last := make([]byte, 1) - f.Seek(-1, 2) - f.Read(last) - if last[0] != '\n' { - f.WriteString("\n") - } - } - f.Write(data) - return f.Close() -} - -// WriteSnippet writes the given data on the given filename. It surrounds the -// data with a header and footer, and it will replace the previous one. -func WriteSnippet(filename string, data []byte, perm os.FileMode) error { - // Get file permissions - if st, err := os.Stat(filename); err == nil { - perm = st.Mode() - } else if !os.IsNotExist(err) { - return FileError(err, filename) - } - - // Read file contents - b, err := ioutil.ReadFile(filename) - if err != nil && !os.IsNotExist(err) { - return FileError(err, filename) - } - - // Detect previous configuration - _, start, end := findConfiguration(bytes.NewReader(b)) - - // Replace previous configuration - f, err := OpenFile(filename, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, perm) - if err != nil { - return FileError(err, filename) - } - if len(b) > 0 { - f.Write(b[:start]) - if start == end { - f.WriteString("\n") - } - } - f.WriteString(fmt.Sprintf("%s @ %s\n", SnippetHeader, time.Now().UTC().Format(time.RFC3339))) - f.Write(data) - if !bytes.HasSuffix(data, []byte("\n")) { - f.WriteString("\n") - } - f.WriteString(SnippetFooter + "\n") - if len(b) > 0 { - f.Write(b[end:]) - } - return f.Close() -} - -type offsetCounter struct { - offset int64 -} - -func (o *offsetCounter) ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - advance, token, err = bufio.ScanLines(data, atEOF) - o.offset += int64(advance) - return -} - -func findConfiguration(r io.Reader) (lines []string, start int64, end int64) { - var inConfig bool - counter := new(offsetCounter) - scanner := bufio.NewScanner(r) - scanner.Split(counter.ScanLines) - for scanner.Scan() { - line := scanner.Text() - switch { - case !inConfig && strings.HasPrefix(line, SnippetHeader): - inConfig = true - start = counter.offset - int64(len(line)+1) - case inConfig && strings.HasPrefix(line, SnippetFooter): - return lines, start, counter.offset - case inConfig: - lines = append(lines, line) - } - } - - return lines, counter.offset, counter.offset -} diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/LICENSE.txt b/vendor/go.step.sm/cli-utils/pkg/blackfriday/LICENSE.txt deleted file mode 100644 index 2885af36..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/README.md b/vendor/go.step.sm/cli-utils/pkg/blackfriday/README.md deleted file mode 100644 index 2e0db355..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/README.md +++ /dev/null @@ -1,283 +0,0 @@ -Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with any modern Go release. With Go 1.7 and git -installed: - - go get gopkg.in/russross/blackfriday.v2 - -will download, compile, and install the package into your `$GOPATH` -directory hierarchy. Alternatively, you can achieve the same if you -import it into a project: - - import "gopkg.in/russross/blackfriday.v2" - -and `go get` without parameters. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/v2. You -should install and import it via [gopkg.in][6] at -`gopkg.in/russross/blackfriday.v2`. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -Usage ------ - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "github.com/russross/blackfriday" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ```go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled newlines in the input - translate into line breaks in the output. This extension is off by default. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): - renders output as LaTeX. - - -Todo ----- - -* More unit testing -* Improve unicode support. It does not understand all unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all utf-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/block.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/block.go deleted file mode 100644 index 1f9c1a84..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/block.go +++ /dev/null @@ -1,1558 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse block-level elements. -// - -package blackfriday - -import ( - "bytes" - "html" - "regexp" - - "github.com/shurcooL/sanitized_anchor_name" -) - -const ( - charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" - escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" -) - -var ( - reBackslashOrAmp = regexp.MustCompile("[\\&]") - reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) -) - -// Parse block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *Markdown) block(data []byte) { - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // prefixed heading: - // - // # Heading 1 - // ## Heading 2 - // ... - // ###### Heading 6 - if p.isPrefixHeading(data) { - data = data[p.prefixHeading(data):] - continue - } - - // block of preformatted HTML: - // - //
    - // ... - //
    - if data[0] == '<' { - if i := p.html(data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.extensions&Titleblock != 0 { - if data[0] == '%' { - if i := p.titleBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(data):] - continue - } - - // fenced code block: - // - // ``` go - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.addBlock(HorizontalRule, nil) - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.extensions&Tables != 0 { - if i := p.table(data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(data, ListTypeOrdered):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(data, ListTypeDefinition):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headings, too - data = data[p.paragraph(data):] - } - - p.nesting-- -} - -func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { - p.closeUnmatchedBlocks() - container := p.addChild(typ, 0) - container.content = content - return container -} - -func (p *Markdown) isPrefixHeading(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.extensions&SpaceHeadings != 0 { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - if level == len(data) || data[level] != ' ' { - return false - } - } - return true -} - -func (p *Markdown) prefixHeading(data []byte) int { - level := 0 - for level < 6 && level < len(data) && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.extensions&HeadingIDs != 0 { - j, k := 0, 0 - // find start/end of heading id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract heading id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[i:end])) - } - block := p.addBlock(Heading, data[i:end]) - block.HeadingID = id - block.Level = level - } - return skip -} - -func (p *Markdown) isUnderlinedHeading(data []byte) int { - // test of level 1 heading - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 1 - } - return 0 - } - - // test of level 2 heading - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if i < len(data) && data[i] == '\n' { - return 2 - } - return 0 - } - - return 0 -} - -func (p *Markdown) titleBlock(data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - consumed := len(data) - data = bytes.TrimPrefix(data, []byte("% ")) - data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) - block := p.addBlock(Heading, data) - block.Level = 1 - block.IsTitleblock = true - - return consumed -} - -func (p *Markdown) html(data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(data, doRender); size > 0 { - return size - } - - // check for an
    tag - if size := p.htmlHr(data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - - return i -} - -func finalizeHTMLBlock(block *Node) { - block.Literal = block.content - block.content = nil -} - -// HTML comment, lax form -func (p *Markdown) htmlComment(data []byte, doRender bool) int { - i := p.inlineHTMLComment(data) - // needs to end with a blank line - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - block := p.addBlock(HTMLBlock, data[:end]) - finalizeHTMLBlock(block) - } - return size - } - return 0 -} - -// HR, which is the only self-closing block tag considered -func (p *Markdown) htmlHr(data []byte, doRender bool) int { - if len(data) < 4 { - return 0 - } - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
    tag after all; at least not a valid one - return 0 - } - i := 3 - for i < len(data) && data[i] != '>' && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '>' { - i++ - if j := p.isEmpty(data[i:]); j > 0 { - size := i + j - if doRender { - // trim newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) - } - return size - } - } - return 0 -} - -func (p *Markdown) htmlFindTag(data []byte) (string, bool) { - i := 0 - for i < len(data) && isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *Markdown) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - if tag == "hr" { - return 2 - } - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.extensions&LaxHTMLBlocks != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*Markdown) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - if i < len(data) && data[i] == '\n' { - i++ - } - return i -} - -func (*Markdown) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for i < len(data) && data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If syntax is not nil, it gets set to the syntax specified in the fence line. -func isFenceLine(data []byte, syntax *string, oldmarker string) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the syntax, and discard it if the caller doesn't care. - if syntax != nil { - syn := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if i == len(data) { - return i, marker - } - return 0, "" - } - - syntaxStart := i - - if data[i] == '{' { - i++ - syntaxStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - syn++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for syn > 0 && isspace(data[syntaxStart]) { - syntaxStart++ - syn-- - } - - for syn > 0 && isspace(data[syntaxStart+syn-1]) { - syn-- - } - - i++ - } else { - for i < len(data) && !isspace(data[i]) { - syn++ - i++ - } - } - - *syntax = string(data[syntaxStart : syntaxStart+syn]) - } - - i = skipChar(data, i, ' ') - if i >= len(data) || data[i] != '\n' { - if i == len(data) { - return i, marker - } - return 0, "" - } - return i + 1, marker // Take newline into account. -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { - var syntax string - beg, marker := isFenceLine(data, &syntax, "") - if beg == 0 || beg >= len(data) { - return 0 - } - - var work bytes.Buffer - work.Write([]byte(syntax)) - work.WriteByte('\n') - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - fenceEnd, _ := isFenceLine(data[beg:], nil, marker) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = true - finalizeCodeBlock(block) - } - - return beg -} - -func unescapeChar(str []byte) []byte { - if str[0] == '\\' { - return []byte{str[1]} - } - return []byte(html.UnescapeString(string(str))) -} - -func unescapeString(str []byte) []byte { - if reBackslashOrAmp.Match(str) { - return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) - } - return str -} - -func finalizeCodeBlock(block *Node) { - if block.IsFenced { - newlinePos := bytes.IndexByte(block.content, '\n') - firstLine := block.content[:newlinePos] - rest := block.content[newlinePos+1:] - block.Info = unescapeString(bytes.Trim(firstLine, "\n")) - block.Literal = rest - } else { - block.Literal = block.content - } - block.content = nil -} - -func (p *Markdown) table(data []byte) int { - table := p.addBlock(Table, nil) - i, columns := p.tableHeader(data) - if i == 0 { - p.tip = table.Parent - table.Unlink() - return 0 - } - - p.addBlock(TableBody, nil) - - for i < len(data) { - pipes, rowStart := 0, i - for ; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - if i < len(data) && data[i] == '\n' { - i++ - } - p.tableRow(data[rowStart:i], columns, false) - } - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { - i := 0 - colCount := 1 - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - j := i - if j < len(data) && data[j] == '\n' { - j++ - } - header := data[:j] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]CellAlignFlags, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for i < len(data) && data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TableAlignmentLeft - dashes++ - } - for i < len(data) && data[i] == '-' { - i++ - dashes++ - } - if i < len(data) && data[i] == ':' { - i++ - columns[col] |= TableAlignmentRight - dashes++ - } - for i < len(data) && data[i] == ' ' { - i++ - } - if i == len(data) { - return - } - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for i < len(data) && data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && i < len(data) && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.addBlock(TableHead, nil) - p.tableRow(header, columns, true) - size = i - if size < len(data) && data[size] == '\n' { - size++ - } - return -} - -func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { - p.addBlock(TableRow, nil) - i, col := 0, 0 - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for i < len(data) && data[i] == ' ' { - i++ - } - - cellStart := i - - for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { - cellEnd-- - } - - cell := p.addBlock(TableCell, data[cellStart:cellEnd]) - cell.IsHeader = header - cell.Align = columns[col] - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - cell := p.addBlock(TableCell, nil) - cell.IsHeader = header - cell.Align = columns[col] - } - - // silently ignore rows with too many cells -} - -// returns blockquote prefix length -func (p *Markdown) quotePrefix(data []byte) int { - i := 0 - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - if i < len(data) && data[i] == '>' { - if i+1 < len(data) && data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *Markdown) quote(data []byte) int { - block := p.addBlock(BlockQuote, nil) - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for end < len(data) && data[end] != '\n' { - if p.extensions&FencedCode != 0 { - if i := p.fencedCodeBlock(data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - if end < len(data) && data[end] == '\n' { - end++ - } - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - p.block(raw.Bytes()) - p.finalize(block) - return end -} - -// returns prefix length for block code -func (p *Markdown) codePrefix(data []byte) int { - if len(data) >= 1 && data[0] == '\t' { - return 1 - } - if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *Markdown) code(data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for i < len(data) && data[i] != '\n' { - i++ - } - if i < len(data) && data[i] == '\n' { - i++ - } - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffer - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer - block.IsFenced = false - finalizeCodeBlock(block) - - return i -} - -// returns unordered list item prefix -func (p *Markdown) uliPrefix(data []byte) int { - i := 0 - // start with up to 3 spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - if i >= len(data)-1 { - return 0 - } - // need one of {'*', '+', '-'} followed by a space or a tab - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - (data[i+1] != ' ' && data[i+1] != '\t') { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *Markdown) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && i < len(data) && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for i < len(data) && data[i] >= '0' && data[i] <= '9' { - i++ - } - if start == i || i >= len(data)-1 { - return 0 - } - - // we need >= 1 digits followed by a dot and a space or a tab - if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *Markdown) dliPrefix(data []byte) int { - if len(data) < 2 { - return 0 - } - i := 0 - // need a ':' followed by a space or a tab - if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { - return 0 - } - for i < len(data) && data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *Markdown) list(data []byte, flags ListType) int { - i := 0 - flags |= ListItemBeginningOfList - block := p.addBlock(List, nil) - block.ListFlags = flags - block.Tight = true - - for i < len(data) { - skip := p.listItem(data[i:], &flags) - if flags&ListItemContainsBlock != 0 { - block.ListData.Tight = false - } - i += skip - if skip == 0 || flags&ListItemEndOfList != 0 { - break - } - flags &= ^ListItemBeginningOfList - } - - above := block.Parent - finalizeList(block) - p.tip = above - return i -} - -// Returns true if block ends with a blank line, descending if needed -// into lists and sublists. -func endsWithBlankLine(block *Node) bool { - // TODO: figure this out. Always false now. - for block != nil { - //if block.lastLineBlank { - //return true - //} - t := block.Type - if t == List || t == Item { - block = block.LastChild - } else { - break - } - } - return false -} - -func finalizeList(block *Node) { - block.open = false - item := block.FirstChild - for item != nil { - // check for non-final list item ending with blank line: - if endsWithBlankLine(item) && item.Next != nil { - block.ListData.Tight = false - break - } - // recurse into children of list item, to see if there are spaces - // between any of them: - subItem := item.FirstChild - for subItem != nil { - if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { - block.ListData.Tight = false - break - } - subItem = subItem.Next - } - item = item.Next - } -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *Markdown) listItem(data []byte, flags *ListType) int { - // keep track of the indentation of the first line - itemIndent := 0 - if data[0] == '\t' { - itemIndent += 4 - } else { - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - } - - var bulletChar byte = '*' - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } else { - bulletChar = data[i-2] - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^ListTypeTerm - } - } - if i == 0 { - // if in definition list, set term flag and continue - if *flags&ListTypeDefinition != 0 { - *flags |= ListTypeTerm - } else { - return 0 - } - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - lastChunkSize := 0 - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - line = i - continue - } - - // calculate the indentation - indent := 0 - indentIndex := 0 - if data[line] == '\t' { - indentIndex++ - indent += 4 - } else { - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - indentIndex++ - } - } - - chunk := data[line+indentIndex : i] - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - if containsBlankLine { - *flags |= ListItemContainsBlock - } - - // to be a nested list, it must be indented more - // if not, it is the next item in the same list - if indent <= itemIndent { - break gatherlines - } - - // is this the first item in the nested list? - if sublist == 0 { - if p.dliPrefix(chunk) > 0 { - sublist = raw.Len() - lastChunkSize - } else { - sublist = raw.Len() - } - } - - // is this a nested prefix heading? - case p.isPrefixHeading(chunk): - // if the heading is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= ListItemEndOfList - break gatherlines - } - *flags |= ListItemContainsBlock - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&ListTypeDefinition != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for next < len(data) && data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= ListItemEndOfList - } - } else { - *flags |= ListItemEndOfList - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - raw.WriteByte('\n') - *flags |= ListItemContainsBlock - } - - // if this line was preceded by one or more blanks, - // re-introduce the blank into the buffer - if containsBlankLine { - containsBlankLine = false - raw.WriteByte('\n') - } - - // add the line into the working buffer without prefix - raw.Write(data[line+indentIndex : i]) - - // remember how much was written into raw, if this turns out to be a - // definition list we'll need this number to know where the sublist starts - lastChunkSize = i - (line + indentIndex) - - line = i - } - - rawBytes := raw.Bytes() - - block := p.addBlock(Item, nil) - block.ListFlags = *flags - block.Tight = false - block.BulletChar = bulletChar - block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark - - // render the contents of the list item - if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(rawBytes[:sublist]) - p.block(rawBytes[sublist:]) - } else { - p.block(rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - child := p.addChild(Paragraph, 0) - child.content = rawBytes[:sublist] - p.block(rawBytes[sublist:]) - } else { - child := p.addChild(Paragraph, 0) - child.content = rawBytes - } - } - return line -} - -// render a single paragraph that has already been parsed out -func (p *Markdown) renderParagraph(data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - end := len(data) - // trim trailing newline - if data[len(data)-1] == '\n' { - end-- - } - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - p.addBlock(Paragraph, data[beg:end]) -} - -func (p *Markdown) paragraph(data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - tabSize := TabSizeDefault - if p.extensions&TabSizeEight != 0 { - tabSize = TabSizeDouble - } - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a reference or a footnote? If so, end a paragraph - // preceding it and report that we have consumed up to the end of that - // reference: - if refEnd := isReference(p, current, tabSize); refEnd > 0 { - p.renderParagraph(data[:i]) - return i + refEnd - } - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.extensions&DefinitionLists != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(data[prev:], ListTypeDefinition) - } - } - - p.renderParagraph(data[:i]) - return i + n - } - - // an underline under some text marks a heading, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeading(current); level > 0 { - // render the paragraph - p.renderParagraph(data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - id := "" - if p.extensions&AutoHeadingIDs != 0 { - id = sanitized_anchor_name.Create(string(data[prev:eol])) - } - - block := p.addBlock(Heading, data[prev:eol]) - block.Level = level - block.HeadingID = id - - // find the end of the underline - for i < len(data) && data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.extensions&LaxHTMLBlocks != 0 { - if data[i] == '<' && p.html(current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a prefixed heading or a horizontal rule after this, paragraph is over - if p.isPrefixHeading(current) || p.isHRule(current) { - p.renderParagraph(data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.extensions&FencedCode != 0 { - if p.fencedCodeBlock(current, false) > 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.extensions&DefinitionLists != 0 { - if p.dliPrefix(current) != 0 { - ret := p.list(data[prev:], ListTypeDefinition) - return ret - } - } - - // if there's a list after this, paragraph is over - if p.extensions&NoEmptyLineBeforeBlock != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - nl := bytes.IndexByte(data[i:], '\n') - if nl >= 0 { - i += nl + 1 - } else { - i += len(data[i:]) - } - } - - p.renderParagraph(data[:i]) - return i -} - -func skipChar(data []byte, start int, char byte) int { - i := start - for i < len(data) && data[i] == char { - i++ - } - return i -} - -func skipUntilChar(text []byte, start int, char byte) int { - i := start - for i < len(text) && text[i] != char { - i++ - } - return i -} diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/doc.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/doc.go deleted file mode 100644 index 5b3fa987..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package blackfriday is a markdown processor. -// -// It translates plain text with simple formatting rules into an AST, which can -// then be further processed to HTML (provided by Blackfriday itself) or other -// formats (provided by the community). -// -// The simplest way to invoke Blackfriday is to call the Run function. It will -// take a text input and produce a text output in HTML (or other format). -// -// A slightly more sophisticated way to use Blackfriday is to create a Markdown -// processor and to call Parse, which returns a syntax tree for the input -// document. You can leverage Blackfriday's parsing for content extraction from -// markdown documents. You can assign a custom renderer and set various options -// to the Markdown processor. -// -// If you're interested in calling Blackfriday from command line, see -// https://github.com/russross/blackfriday-tool. -package blackfriday diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/esc.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/esc.go deleted file mode 100644 index 6385f27c..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/esc.go +++ /dev/null @@ -1,34 +0,0 @@ -package blackfriday - -import ( - "html" - "io" -) - -var htmlEscaper = [256][]byte{ - '&': []byte("&"), - '<': []byte("<"), - '>': []byte(">"), - '"': []byte("""), -} - -func escapeHTML(w io.Writer, s []byte) { - var start, end int - for end < len(s) { - escSeq := htmlEscaper[s[end]] - if escSeq != nil { - w.Write(s[start:end]) - w.Write(escSeq) - start = end + 1 - } - end++ - } - if start < len(s) && end <= len(s) { - w.Write(s[start:end]) - } -} - -func escLink(w io.Writer, text []byte) { - unesc := html.UnescapeString(string(text)) - escapeHTML(w, []byte(unesc)) -} diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/html.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/html.go deleted file mode 100644 index 25fb185e..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/html.go +++ /dev/null @@ -1,940 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -// HTMLFlags control optional behavior of HTML renderer. -type HTMLFlags int - -// HTML renderer configuration options. -const ( - HTMLFlagsNone HTMLFlags = 0 - SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks - SkipImages // Skip embedded images - SkipLinks // Skip all links - Safelink // Only link to trusted protocols - NofollowLinks // Only link with rel="nofollow" - NoreferrerLinks // Only link with rel="noreferrer" - HrefTargetBlank // Add a blank target - CompletePage // Generate a complete HTML page - UseXHTML // Generate XHTML output instead of HTML - FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source - Smartypants // Enable smart punctuation substitutions - SmartypantsFractions // Enable smart fractions (with Smartypants) - SmartypantsDashes // Enable smart dashes (with Smartypants) - SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) - SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering - SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) - TOC // Generate a table of contents -) - -var ( - htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) -) - -const ( - htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + - processingInstruction + "|" + declaration + "|" + cdata + ")" - closeTag = "]" - openTag = "<" + tagName + attribute + "*" + "\\s*/?>" - attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" - attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" - attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" - attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" - cdata = "" - declaration = "]*>" - doubleQuotedValue = "\"[^\"]*\"" - htmlComment = "|" - processingInstruction = "[<][?].*?[?][>]" - singleQuotedValue = "'[^']*'" - tagName = "[A-Za-z][A-Za-z0-9-]*" - unquotedValue = "[^\"'=<>`\\x00-\\x20]+" -) - -// HTMLRendererParameters is a collection of supplementary parameters tweaking -// the behavior of various parts of HTML renderer. -type HTMLRendererParameters struct { - // Prepend this text to each relative URL. - AbsolutePrefix string - // Add this text to each footnote anchor, to ensure uniqueness. - FootnoteAnchorPrefix string - // Show this text inside the tag for a footnote return link, if the - // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string - // [return] is used. - FootnoteReturnLinkContents string - // If set, add this text to the front of each Heading ID, to ensure - // uniqueness. - HeadingIDPrefix string - // If set, add this text to the back of each Heading ID, to ensure uniqueness. - HeadingIDSuffix string - - Title string // Document title (used if CompletePage is set) - CSS string // Optional CSS file URL (used if CompletePage is set) - Icon string // Optional icon file URL (used if CompletePage is set) - - Flags HTMLFlags // Flags allow customizing this renderer's behavior -} - -// HTMLRenderer is a type that implements the Renderer interface for HTML output. -// -// Do not create this directly, instead use the NewHTMLRenderer function. -type HTMLRenderer struct { - HTMLRendererParameters - - closeTag string // how to end singleton tags: either " />" or ">" - - // Track heading IDs to prevent ID collision in a single generation. - headingIDs map[string]int - - lastOutputLen int - disableTags int - - sr *SPRenderer -} - -const ( - xhtmlClose = " />" - htmlClose = ">" -) - -// NewHTMLRenderer creates and configures an HTMLRenderer object, which -// satisfies the Renderer interface. -func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { - // configure the rendering engine - closeTag := htmlClose - if params.Flags&UseXHTML != 0 { - closeTag = xhtmlClose - } - - if params.FootnoteReturnLinkContents == "" { - params.FootnoteReturnLinkContents = `[return]` - } - - return &HTMLRenderer{ - HTMLRendererParameters: params, - - closeTag: closeTag, - headingIDs: make(map[string]int), - - sr: NewSmartypantsRenderer(params.Flags), - } -} - -func isHTMLTag(tag []byte, tagname string) bool { - found, _ := findHTMLTagPos(tag, tagname) - return found -} - -// Look for a character, but ignore it when it's in any kind of quotes, it -// might be JavaScript -func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { - inSingleQuote := false - inDoubleQuote := false - inGraveQuote := false - i := start - for i < len(html) { - switch { - case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: - return i - case html[i] == '\'': - inSingleQuote = !inSingleQuote - case html[i] == '"': - inDoubleQuote = !inDoubleQuote - case html[i] == '`': - inGraveQuote = !inGraveQuote - } - i++ - } - return start -} - -func findHTMLTagPos(tag []byte, tagname string) (bool, int) { - i := 0 - if i < len(tag) && tag[0] != '<' { - return false, -1 - } - i++ - i = skipSpace(tag, i) - - if i < len(tag) && tag[i] == '/' { - i++ - } - - i = skipSpace(tag, i) - j := 0 - for ; i < len(tag); i, j = i+1, j+1 { - if j >= len(tagname) { - break - } - - if strings.ToLower(string(tag[i]))[0] != tagname[j] { - return false, -1 - } - } - - if i == len(tag) { - return false, -1 - } - - rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') - if rightAngle >= i { - return true, rightAngle - } - - return false, -1 -} - -func skipSpace(tag []byte, i int) int { - for i < len(tag) && isspace(tag[i]) { - i++ - } - return i -} - -func isRelativeLink(link []byte) (yes bool) { - // a tag begin with '#' - if link[0] == '#' { - return true - } - - // link begin with '/' but not '//', the second maybe a protocol relative link - if len(link) >= 2 && link[0] == '/' && link[1] != '/' { - return true - } - - // only the root '/' - if len(link) == 1 && link[0] == '/' { - return true - } - - // current directory : begin with "./" - if bytes.HasPrefix(link, []byte("./")) { - return true - } - - // parent directory : begin with "../" - if bytes.HasPrefix(link, []byte("../")) { - return true - } - - return false -} - -func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { - for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { - tmp := fmt.Sprintf("%s-%d", id, count+1) - - if _, tmpFound := r.headingIDs[tmp]; !tmpFound { - r.headingIDs[id] = count + 1 - id = tmp - } else { - id = id + "-1" - } - } - - if _, found := r.headingIDs[id]; !found { - r.headingIDs[id] = 0 - } - - return id -} - -func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { - if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { - newDest := r.AbsolutePrefix - if link[0] != '/' { - newDest += "/" - } - newDest += string(link) - return []byte(newDest) - } - return link -} - -func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { - if isRelativeLink(link) { - return attrs - } - val := []string{} - if flags&NofollowLinks != 0 { - val = append(val, "nofollow") - } - if flags&NoreferrerLinks != 0 { - val = append(val, "noreferrer") - } - if flags&HrefTargetBlank != 0 { - attrs = append(attrs, "target=\"_blank\"") - } - if len(val) == 0 { - return attrs - } - attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) - return append(attrs, attr) -} - -func isMailto(link []byte) bool { - return bytes.HasPrefix(link, []byte("mailto:")) -} - -func needSkipLink(flags HTMLFlags, dest []byte) bool { - if flags&SkipLinks != 0 { - return true - } - return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) -} - -func isSmartypantable(node *Node) bool { - pt := node.Parent.Type - return pt != Link && pt != CodeBlock && pt != Code -} - -func appendLanguageAttr(attrs []string, info []byte) []string { - if len(info) == 0 { - return attrs - } - endOfLang := bytes.IndexAny(info, "\t ") - if endOfLang < 0 { - endOfLang = len(info) - } - return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) -} - -func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { - w.Write(name) - if len(attrs) > 0 { - w.Write(spaceBytes) - w.Write([]byte(strings.Join(attrs, " "))) - } - w.Write(gtBytes) - r.lastOutputLen = 1 -} - -func footnoteRef(prefix string, node *Node) []byte { - urlFrag := prefix + string(slugify(node.Destination)) - anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) - return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) -} - -func footnoteItem(prefix string, slug []byte) []byte { - return []byte(fmt.Sprintf(`
  • `, prefix, slug)) -} - -func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { - const format = ` %s` - return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) -} - -func itemOpenCR(node *Node) bool { - if node.Prev == nil { - return false - } - ld := node.Parent.ListData - return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 -} - -func skipParagraphTags(node *Node) bool { - grandparent := node.Parent.Parent - if grandparent == nil || grandparent.Type != List { - return false - } - tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 - return grandparent.Type == List && tightOrTerm -} - -func cellAlignment(align CellAlignFlags) string { - switch align { - case TableAlignmentLeft: - return "left" - case TableAlignmentRight: - return "right" - case TableAlignmentCenter: - return "center" - default: - return "" - } -} - -func (r *HTMLRenderer) out(w io.Writer, text []byte) { - if r.disableTags > 0 { - w.Write(htmlTagRe.ReplaceAll(text, []byte{})) - } else { - w.Write(text) - } - r.lastOutputLen = len(text) -} - -func (r *HTMLRenderer) cr(w io.Writer) { - if r.lastOutputLen > 0 { - r.out(w, nlBytes) - } -} - -var ( - nlBytes = []byte{'\n'} - gtBytes = []byte{'>'} - spaceBytes = []byte{' '} -) - -var ( - brTag = []byte("
    ") - brXHTMLTag = []byte("
    ") - emTag = []byte("") - emCloseTag = []byte("") - strongTag = []byte("") - strongCloseTag = []byte("") - delTag = []byte("") - delCloseTag = []byte("") - ttTag = []byte("") - ttCloseTag = []byte("") - aTag = []byte("") - preTag = []byte("
    ")
    -	preCloseTag        = []byte("
    ") - codeTag = []byte("") - codeCloseTag = []byte("") - pTag = []byte("

    ") - pCloseTag = []byte("

    ") - blockquoteTag = []byte("
    ") - blockquoteCloseTag = []byte("
    ") - hrTag = []byte("
    ") - hrXHTMLTag = []byte("
    ") - ulTag = []byte("
      ") - ulCloseTag = []byte("
    ") - olTag = []byte("
      ") - olCloseTag = []byte("
    ") - dlTag = []byte("
    ") - dlCloseTag = []byte("
    ") - liTag = []byte("
  • ") - liCloseTag = []byte("
  • ") - ddTag = []byte("
    ") - ddCloseTag = []byte("
    ") - dtTag = []byte("
    ") - dtCloseTag = []byte("
    ") - tableTag = []byte("") - tableCloseTag = []byte("
    ") - tdTag = []byte("") - thTag = []byte("") - theadTag = []byte("") - theadCloseTag = []byte("") - tbodyTag = []byte("") - tbodyCloseTag = []byte("") - trTag = []byte("") - trCloseTag = []byte("") - h1Tag = []byte("") - h2Tag = []byte("") - h3Tag = []byte("") - h4Tag = []byte("") - h5Tag = []byte("") - h6Tag = []byte("") - - footnotesDivBytes = []byte("\n
    \n\n") - footnotesCloseDivBytes = []byte("\n
    \n") -) - -func headingTagsFromLevel(level int) ([]byte, []byte) { - switch level { - case 1: - return h1Tag, h1CloseTag - case 2: - return h2Tag, h2CloseTag - case 3: - return h3Tag, h3CloseTag - case 4: - return h4Tag, h4CloseTag - case 5: - return h5Tag, h5CloseTag - default: - return h6Tag, h6CloseTag - } -} - -func (r *HTMLRenderer) outHRTag(w io.Writer) { - if r.Flags&UseXHTML == 0 { - r.out(w, hrTag) - } else { - r.out(w, hrXHTMLTag) - } -} - -// RenderNode is a default renderer of a single node of a syntax tree. For -// block nodes it will be called twice: first time with entering=true, second -// time with entering=false, so that it could know when it's working on an open -// tag and when on close. It writes the result to w. -// -// The return value is a way to tell the calling walker to adjust its walk -// pattern: e.g. it can terminate the traversal by returning Terminate. Or it -// can ask the walker to skip a subtree of this node by returning SkipChildren. -// The typical behavior is to return GoToNext, which asks for the usual -// traversal to the next node. -func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { - attrs := []string{} - switch node.Type { - case Text: - if r.Flags&Smartypants != 0 { - var tmp bytes.Buffer - escapeHTML(&tmp, node.Literal) - r.sr.Process(w, tmp.Bytes()) - } else { - if node.Parent.Type == Link { - escLink(w, node.Literal) - } else { - escapeHTML(w, node.Literal) - } - } - case Softbreak: - r.cr(w) - // TODO: make it configurable via out(renderer.softbreak) - case Hardbreak: - if r.Flags&UseXHTML == 0 { - r.out(w, brTag) - } else { - r.out(w, brXHTMLTag) - } - r.cr(w) - case Emph: - if entering { - r.out(w, emTag) - } else { - r.out(w, emCloseTag) - } - case Strong: - if entering { - r.out(w, strongTag) - } else { - r.out(w, strongCloseTag) - } - case Del: - if entering { - r.out(w, delTag) - } else { - r.out(w, delCloseTag) - } - case HTMLSpan: - if r.Flags&SkipHTML != 0 { - break - } - r.out(w, node.Literal) - case Link: - // mark it but don't link it if it is not a safe link: no smartypants - dest := node.LinkData.Destination - if needSkipLink(r.Flags, dest) { - if entering { - r.out(w, ttTag) - } else { - r.out(w, ttCloseTag) - } - } else { - if entering { - dest = r.addAbsPrefix(dest) - var hrefBuf bytes.Buffer - hrefBuf.WriteString("href=\"") - escLink(&hrefBuf, dest) - hrefBuf.WriteByte('"') - attrs = append(attrs, hrefBuf.String()) - if node.NoteID != 0 { - r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) - break - } - attrs = appendLinkAttrs(attrs, r.Flags, dest) - if len(node.LinkData.Title) > 0 { - var titleBuff bytes.Buffer - titleBuff.WriteString("title=\"") - escapeHTML(&titleBuff, node.LinkData.Title) - titleBuff.WriteByte('"') - attrs = append(attrs, titleBuff.String()) - } - r.tag(w, aTag, attrs) - } else { - if node.NoteID != 0 { - break - } - r.out(w, aCloseTag) - } - } - case Image: - if r.Flags&SkipImages != 0 { - return SkipChildren - } - if entering { - dest := node.LinkData.Destination - dest = r.addAbsPrefix(dest) - if r.disableTags == 0 { - //if options.safe && potentiallyUnsafe(dest) { - //out(w, ``)
-				//} else {
-				r.out(w, []byte(`<img src=`)) - } - } - case Code: - r.out(w, codeTag) - escapeHTML(w, node.Literal) - r.out(w, codeCloseTag) - case Document: - break - case Paragraph: - if skipParagraphTags(node) { - break - } - if entering { - // TODO: untangle this clusterfuck about when the newlines need - // to be added and when not. - if node.Prev != nil { - switch node.Prev.Type { - case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: - r.cr(w) - } - } - if node.Parent.Type == BlockQuote && node.Prev == nil { - r.cr(w) - } - r.out(w, pTag) - } else { - r.out(w, pCloseTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case BlockQuote: - if entering { - r.cr(w) - r.out(w, blockquoteTag) - } else { - r.out(w, blockquoteCloseTag) - r.cr(w) - } - case HTMLBlock: - if r.Flags&SkipHTML != 0 { - break - } - r.cr(w) - r.out(w, node.Literal) - r.cr(w) - case Heading: - openTag, closeTag := headingTagsFromLevel(node.Level) - if entering { - if node.IsTitleblock { - attrs = append(attrs, `class="title"`) - } - if node.HeadingID != "" { - id := r.ensureUniqueHeadingID(node.HeadingID) - if r.HeadingIDPrefix != "" { - id = r.HeadingIDPrefix + id - } - if r.HeadingIDSuffix != "" { - id = id + r.HeadingIDSuffix - } - attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) - } - r.cr(w) - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - if !(node.Parent.Type == Item && node.Next == nil) { - r.cr(w) - } - } - case HorizontalRule: - r.cr(w) - r.outHRTag(w) - r.cr(w) - case List: - openTag := ulTag - closeTag := ulCloseTag - if node.ListFlags&ListTypeOrdered != 0 { - openTag = olTag - closeTag = olCloseTag - } - if node.ListFlags&ListTypeDefinition != 0 { - openTag = dlTag - closeTag = dlCloseTag - } - if entering { - if node.IsFootnotesList { - r.out(w, footnotesDivBytes) - r.outHRTag(w) - r.cr(w) - } - r.cr(w) - if node.Parent.Type == Item && node.Parent.Parent.Tight { - r.cr(w) - } - r.tag(w, openTag[:len(openTag)-1], attrs) - r.cr(w) - } else { - r.out(w, closeTag) - //cr(w) - //if node.parent.Type != Item { - // cr(w) - //} - if node.Parent.Type == Item && node.Next != nil { - r.cr(w) - } - if node.Parent.Type == Document || node.Parent.Type == BlockQuote { - r.cr(w) - } - if node.IsFootnotesList { - r.out(w, footnotesCloseDivBytes) - } - } - case Item: - openTag := liTag - closeTag := liCloseTag - if node.ListFlags&ListTypeDefinition != 0 { - openTag = ddTag - closeTag = ddCloseTag - } - if node.ListFlags&ListTypeTerm != 0 { - openTag = dtTag - closeTag = dtCloseTag - } - if entering { - if itemOpenCR(node) { - r.cr(w) - } - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) - break - } - r.out(w, openTag) - } else { - if node.ListData.RefLink != nil { - slug := slugify(node.ListData.RefLink) - if r.Flags&FootnoteReturnLinks != 0 { - r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) - } - } - r.out(w, closeTag) - r.cr(w) - } - case CodeBlock: - attrs = appendLanguageAttr(attrs, node.Info) - r.cr(w) - r.out(w, preTag) - r.tag(w, codeTag[:len(codeTag)-1], attrs) - escapeHTML(w, node.Literal) - r.out(w, codeCloseTag) - r.out(w, preCloseTag) - if node.Parent.Type != Item { - r.cr(w) - } - case Table: - if entering { - r.cr(w) - r.out(w, tableTag) - } else { - r.out(w, tableCloseTag) - r.cr(w) - } - case TableCell: - openTag := tdTag - closeTag := tdCloseTag - if node.IsHeader { - openTag = thTag - closeTag = thCloseTag - } - if entering { - align := cellAlignment(node.Align) - if align != "" { - attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) - } - if node.Prev == nil { - r.cr(w) - } - r.tag(w, openTag, attrs) - } else { - r.out(w, closeTag) - r.cr(w) - } - case TableHead: - if entering { - r.cr(w) - r.out(w, theadTag) - } else { - r.out(w, theadCloseTag) - r.cr(w) - } - case TableBody: - if entering { - r.cr(w) - r.out(w, tbodyTag) - // XXX: this is to adhere to a rather silly test. Should fix test. - if node.FirstChild == nil { - r.cr(w) - } - } else { - r.out(w, tbodyCloseTag) - r.cr(w) - } - case TableRow: - if entering { - r.cr(w) - r.out(w, trTag) - } else { - r.out(w, trCloseTag) - r.cr(w) - } - default: - panic("Unknown node type " + node.Type.String()) - } - return GoToNext -} - -// RenderHeader writes HTML document preamble and TOC if requested. -func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { - r.writeDocumentHeader(w) - if r.Flags&TOC != 0 { - r.writeTOC(w, ast) - } -} - -// RenderFooter writes HTML document footer. -func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { - if r.Flags&CompletePage == 0 { - return - } - io.WriteString(w, "\n\n\n") -} - -func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { - if r.Flags&CompletePage == 0 { - return - } - ending := "" - if r.Flags&UseXHTML != 0 { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - ending = " /" - } else { - io.WriteString(w, "\n") - io.WriteString(w, "\n") - } - io.WriteString(w, "\n") - io.WriteString(w, " ") - if r.Flags&Smartypants != 0 { - r.sr.Process(w, []byte(r.Title)) - } else { - escapeHTML(w, []byte(r.Title)) - } - io.WriteString(w, "\n") - io.WriteString(w, " \n") - io.WriteString(w, " \n") - if r.CSS != "" { - io.WriteString(w, " \n") - } - if r.Icon != "" { - io.WriteString(w, " \n") - } - io.WriteString(w, "\n") - io.WriteString(w, "\n\n") -} - -func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { - buf := bytes.Buffer{} - - inHeading := false - tocLevel := 0 - headingCount := 0 - - ast.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Heading && !node.HeadingData.IsTitleblock { - inHeading = entering - if entering { - node.HeadingID = fmt.Sprintf("toc_%d", headingCount) - if node.Level == tocLevel { - buf.WriteString("\n\n
  • ") - } else if node.Level < tocLevel { - for node.Level < tocLevel { - tocLevel-- - buf.WriteString("
  • \n") - } - buf.WriteString("\n\n
  • ") - } else { - for node.Level > tocLevel { - tocLevel++ - buf.WriteString("\n") - } - - if buf.Len() > 0 { - io.WriteString(w, "\n") - } - r.lastOutputLen = buf.Len() -} diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/inline.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/inline.go deleted file mode 100644 index 3d633106..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/inline.go +++ /dev/null @@ -1,1214 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse inline elements. -// - -package blackfriday - -import ( - "bytes" - "regexp" - "strconv" -) - -var ( - urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` - anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) - - // TODO: improve this regexp to catch all possible entities: - htmlEntityRe = regexp.MustCompile(`&[a-z]{2,5};`) -) - -// Functions to parse text within a block -// Each function returns the number of chars taken care of -// data is the complete block being rendered -// offset is the number of valid chars before the current cursor - -func (p *Markdown) inline(currBlock *Node, data []byte) { - // handlers might call us recursively: enforce a maximum depth - if p.nesting >= p.maxNesting || len(data) == 0 { - return - } - p.nesting++ - beg, end := 0, 0 - for end < len(data) { - handler := p.inlineCallback[data[end]] - if handler != nil { - if consumed, node := handler(p, data, end); consumed == 0 { - // No action from the callback. - end++ - } else { - // Copy inactive chars into the output. - currBlock.AppendChild(text(data[beg:end])) - if node != nil { - currBlock.AppendChild(node) - } - // Skip past whatever the callback used. - beg = end + consumed - end = beg - } - } else { - end++ - } - } - if beg < len(data) { - if data[end-1] == '\n' { - end-- - } - currBlock.AppendChild(text(data[beg:end])) - } - p.nesting-- -} - -// single and double emphasis parsing -func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - c := data[0] - - if len(data) > 2 && data[1] != c { - // whitespace cannot follow an opening emphasis; - // strikethrough only takes two characters '~~' - if c == '~' || isspace(data[1]) { - return 0, nil - } - ret, node := helperEmphasis(p, data[1:], c) - if ret == 0 { - return 0, nil - } - - return ret + 1, node - } - - if len(data) > 3 && data[1] == c && data[2] != c { - if isspace(data[2]) { - return 0, nil - } - ret, node := helperDoubleEmphasis(p, data[2:], c) - if ret == 0 { - return 0, nil - } - - return ret + 2, node - } - - if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { - if c == '~' || isspace(data[3]) { - return 0, nil - } - ret, node := helperTripleEmphasis(p, data, 3, c) - if ret == 0 { - return 0, nil - } - - return ret + 3, node - } - - return 0, nil -} - -func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - nb := 0 - - // count the number of backticks in the delimiter - for nb < len(data) && data[nb] == '`' { - nb++ - } - - // find the next delimiter - i, end := 0, 0 - for end = nb; end < len(data) && i < nb; end++ { - if data[end] == '`' { - i++ - } else { - i = 0 - } - } - - // no matching delimiter? - if i < nb && end >= len(data) { - return 0, nil - } - - // trim outside whitespace - fBegin := nb - for fBegin < end && data[fBegin] == ' ' { - fBegin++ - } - - fEnd := end - nb - for fEnd > fBegin && data[fEnd-1] == ' ' { - fEnd-- - } - - // render the code span - if fBegin != fEnd { - code := NewNode(Code) - code.Literal = data[fBegin:fEnd] - return end, code - } - - return end, nil -} - -// newline preceded by two spaces becomes
    -func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - origOffset := offset - for offset < len(data) && data[offset] == ' ' { - offset++ - } - - if offset < len(data) && data[offset] == '\n' { - if offset-origOffset >= 2 { - return offset - origOffset + 1, NewNode(Hardbreak) - } - return offset - origOffset, nil - } - return 0, nil -} - -// newline without two spaces works when HardLineBreak is enabled -func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { - if p.extensions&HardLineBreak != 0 { - return 1, NewNode(Hardbreak) - } - return 0, nil -} - -type linkType int - -const ( - linkNormal linkType = iota - linkImg - linkDeferredFootnote - linkInlineFootnote -) - -func isReferenceStyleLink(data []byte, pos int, t linkType) bool { - if t == linkDeferredFootnote { - return false - } - return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' -} - -func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { - if offset < len(data)-1 && data[offset+1] == '[' { - return link(p, data, offset) - } - return 0, nil -} - -// '[': parse a link or an image or a footnote -func link(p *Markdown, data []byte, offset int) (int, *Node) { - // no links allowed inside regular links, footnote, and deferred footnotes - if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { - return 0, nil - } - - var t linkType - switch { - // special case: ![^text] == deferred footnote (that follows something with - // an exclamation point) - case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': - t = linkDeferredFootnote - // ![alt] == image - case offset >= 0 && data[offset] == '!': - t = linkImg - offset++ - // ^[text] == inline footnote - // [^refId] == deferred footnote - case p.extensions&Footnotes != 0: - if offset >= 0 && data[offset] == '^' { - t = linkInlineFootnote - offset++ - } else if len(data)-1 > offset && data[offset+1] == '^' { - t = linkDeferredFootnote - } - // [text] == regular link - default: - t = linkNormal - } - - data = data[offset:] - - var ( - i = 1 - noteID int - title, link, altContent []byte - textHasNl = false - ) - - if t == linkDeferredFootnote { - i++ - } - - // look for the matching closing bracket - for level := 1; level > 0 && i < len(data); i++ { - switch { - case data[i] == '\n': - textHasNl = true - - case data[i-1] == '\\': - continue - - case data[i] == '[': - level++ - - case data[i] == ']': - level-- - if level <= 0 { - i-- // compensate for extra i++ in for loop - } - } - } - - if i >= len(data) { - return 0, nil - } - - txtE := i - i++ - var footnoteNode *Node - - // skip any amount of whitespace or newline - // (this is much more lax than original markdown syntax) - for i < len(data) && isspace(data[i]) { - i++ - } - - // inline style link - switch { - case i < len(data) && data[i] == '(': - // skip initial whitespace - i++ - - for i < len(data) && isspace(data[i]) { - i++ - } - - linkB := i - - // look for link end: ' " ) - findlinkend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')' || data[i] == '\'' || data[i] == '"': - break findlinkend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - linkE := i - - // look for title end if present - titleB, titleE := 0, 0 - if data[i] == '\'' || data[i] == '"' { - i++ - titleB = i - - findtitleend: - for i < len(data) { - switch { - case data[i] == '\\': - i += 2 - - case data[i] == ')': - break findtitleend - - default: - i++ - } - } - - if i >= len(data) { - return 0, nil - } - - // skip whitespace after title - titleE = i - 1 - for titleE > titleB && isspace(data[titleE]) { - titleE-- - } - - // check for closing quote presence - if data[titleE] != '\'' && data[titleE] != '"' { - titleB, titleE = 0, 0 - linkE = i - } - } - - // remove whitespace at the end of the link - for linkE > linkB && isspace(data[linkE-1]) { - linkE-- - } - - // remove optional angle brackets around the link - if data[linkB] == '<' { - linkB++ - } - if data[linkE-1] == '>' { - linkE-- - } - - // build escaped link and title - if linkE > linkB { - link = data[linkB:linkE] - } - - if titleE > titleB { - title = data[titleB:titleE] - } - - i++ - - // reference style link - case isReferenceStyleLink(data, i, t): - var id []byte - altContentConsidered := false - - // look for the id - i++ - linkB := i - for i < len(data) && data[i] != ']' { - i++ - } - if i >= len(data) { - return 0, nil - } - linkE := i - - // find the reference - if linkB == linkE { - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - id = data[1:txtE] - altContentConsidered = true - } - } else { - id = data[linkB:linkE] - } - - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - // keep link and title from reference - link = lr.link - title = lr.title - if altContentConsidered { - altContent = lr.text - } - i++ - - // shortcut reference style link or reference or inline footnote - default: - var id []byte - - // craft the id - if textHasNl { - var b bytes.Buffer - - for j := 1; j < txtE; j++ { - switch { - case data[j] != '\n': - b.WriteByte(data[j]) - case data[j-1] != ' ': - b.WriteByte(' ') - } - } - - id = b.Bytes() - } else { - if t == linkDeferredFootnote { - id = data[2:txtE] // get rid of the ^ - } else { - id = data[1:txtE] - } - } - - footnoteNode = NewNode(Item) - if t == linkInlineFootnote { - // create a new reference - noteID = len(p.notes) + 1 - - var fragment []byte - if len(id) > 0 { - if len(id) < 16 { - fragment = make([]byte, len(id)) - } else { - fragment = make([]byte, 16) - } - copy(fragment, slugify(id)) - } else { - fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) - } - - ref := &reference{ - noteID: noteID, - hasBlock: false, - link: fragment, - title: id, - footnote: footnoteNode, - } - - p.notes = append(p.notes, ref) - - link = ref.link - title = ref.title - } else { - // find the reference with matching id - lr, ok := p.getRef(string(id)) - if !ok { - return 0, nil - } - - if t == linkDeferredFootnote { - lr.noteID = len(p.notes) + 1 - lr.footnote = footnoteNode - p.notes = append(p.notes, lr) - } - - // keep link and title from reference - link = lr.link - // if inline footnote, title == footnote contents - title = lr.title - noteID = lr.noteID - } - - // rewind the whitespace - i = txtE + 1 - } - - var uLink []byte - if t == linkNormal || t == linkImg { - if len(link) > 0 { - var uLinkBuf bytes.Buffer - unescapeText(&uLinkBuf, link) - uLink = uLinkBuf.Bytes() - } - - // links need something to click on and somewhere to go - if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { - return 0, nil - } - } - - // call the relevant rendering function - var linkNode *Node - switch t { - case linkNormal: - linkNode = NewNode(Link) - linkNode.Destination = normalizeURI(uLink) - linkNode.Title = title - if len(altContent) > 0 { - linkNode.AppendChild(text(altContent)) - } else { - // links cannot contain other links, so turn off link parsing - // temporarily and recurse - insideLink := p.insideLink - p.insideLink = true - p.inline(linkNode, data[1:txtE]) - p.insideLink = insideLink - } - - case linkImg: - linkNode = NewNode(Image) - linkNode.Destination = uLink - linkNode.Title = title - linkNode.AppendChild(text(data[1:txtE])) - i++ - - case linkInlineFootnote, linkDeferredFootnote: - linkNode = NewNode(Link) - linkNode.Destination = link - linkNode.Title = title - linkNode.NoteID = noteID - linkNode.Footnote = footnoteNode - if t == linkInlineFootnote { - i++ - } - - default: - return 0, nil - } - - return i, linkNode -} - -func (p *Markdown) inlineHTMLComment(data []byte) int { - if len(data) < 5 { - return 0 - } - if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { - return 0 - } - i := 5 - // scan for an end-of-comment marker, across lines if necessary - for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { - i++ - } - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return i + 1 -} - -func stripMailto(link []byte) []byte { - if bytes.HasPrefix(link, []byte("mailto://")) { - return link[9:] - } else if bytes.HasPrefix(link, []byte("mailto:")) { - return link[7:] - } else { - return link - } -} - -// autolinkType specifies a kind of autolink that gets detected. -type autolinkType int - -// These are the possible flag values for the autolink renderer. -const ( - notAutolink autolinkType = iota - normalAutolink - emailAutolink -) - -// '<' when tags or autolinks are allowed -func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - altype, end := tagLength(data) - if size := p.inlineHTMLComment(data); size > 0 { - end = size - } - if end > 2 { - if altype != notAutolink { - var uLink bytes.Buffer - unescapeText(&uLink, data[1:end+1-2]) - if uLink.Len() > 0 { - link := uLink.Bytes() - node := NewNode(Link) - node.Destination = link - if altype == emailAutolink { - node.Destination = append([]byte("mailto:"), link...) - } - node.AppendChild(text(stripMailto(link))) - return end, node - } - } else { - htmlTag := NewNode(HTMLSpan) - htmlTag.Literal = data[:end] - return end, htmlTag - } - } - - return end, nil -} - -// '\\' backslash escape -var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") - -func escape(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - if len(data) > 1 { - if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { - return 2, NewNode(Hardbreak) - } - if bytes.IndexByte(escapeChars, data[1]) < 0 { - return 0, nil - } - - return 2, text(data[1:2]) - } - - return 2, nil -} - -func unescapeText(ob *bytes.Buffer, src []byte) { - i := 0 - for i < len(src) { - org := i - for i < len(src) && src[i] != '\\' { - i++ - } - - if i > org { - ob.Write(src[org:i]) - } - - if i+1 >= len(src) { - break - } - - ob.WriteByte(src[i+1]) - i += 2 - } -} - -// '&' escaped when it doesn't belong to an entity -// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; -func entity(p *Markdown, data []byte, offset int) (int, *Node) { - data = data[offset:] - - end := 1 - - if end < len(data) && data[end] == '#' { - end++ - } - - for end < len(data) && isalnum(data[end]) { - end++ - } - - if end < len(data) && data[end] == ';' { - end++ // real entity - } else { - return 0, nil // lone '&' - } - - ent := data[:end] - // undo & escaping or it will be converted to &amp; by another - // escaper in the renderer - if bytes.Equal(ent, []byte("&")) { - ent = []byte{'&'} - } - - return end, text(ent) -} - -func linkEndsWithEntity(data []byte, linkEnd int) bool { - entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) - return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd -} - -// hasPrefixCaseInsensitive is a custom implementation of -// strings.HasPrefix(strings.ToLower(s), prefix) -// we rolled our own because ToLower pulls in a huge machinery of lowercasing -// anything from Unicode and that's very slow. Since this func will only be -// used on ASCII protocol prefixes, we can take shortcuts. -func hasPrefixCaseInsensitive(s, prefix []byte) bool { - if len(s) < len(prefix) { - return false - } - delta := byte('a' - 'A') - for i, b := range prefix { - if b != s[i] && b != s[i]+delta { - return false - } - } - return true -} - -var protocolPrefixes = [][]byte{ - []byte("http://"), - []byte("https://"), - []byte("ftp://"), - []byte("file://"), - []byte("mailto:"), -} - -const shortestPrefix = 6 // len("ftp://"), the shortest of the above - -func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // quick check to rule out most false hits - if p.insideLink || len(data) < offset+shortestPrefix { - return 0, nil - } - for _, prefix := range protocolPrefixes { - endOfHead := offset + 8 // 8 is the len() of the longest prefix - if endOfHead > len(data) { - endOfHead = len(data) - } - if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { - return autoLink(p, data, offset) - } - } - return 0, nil -} - -func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { - // Now a more expensive check to see if we're not inside an anchor element - anchorStart := offset - offsetFromAnchor := 0 - for anchorStart > 0 && data[anchorStart] != '<' { - anchorStart-- - offsetFromAnchor++ - } - - anchorStr := anchorRe.Find(data[anchorStart:]) - if anchorStr != nil { - anchorClose := NewNode(HTMLSpan) - anchorClose.Literal = anchorStr[offsetFromAnchor:] - return len(anchorStr) - offsetFromAnchor, anchorClose - } - - // scan backward for a word boundary - rewind := 0 - for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { - rewind++ - } - if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters - return 0, nil - } - - origData := data - data = data[offset-rewind:] - - if !isSafeLink(data) { - return 0, nil - } - - linkEnd := 0 - for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { - linkEnd++ - } - - // Skip punctuation at the end of the link - if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { - linkEnd-- - } - - // But don't skip semicolon if it's a part of escaped entity: - if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { - linkEnd-- - } - - // See if the link finishes with a punctuation sign that can be closed. - var copen byte - switch data[linkEnd-1] { - case '"': - copen = '"' - case '\'': - copen = '\'' - case ')': - copen = '(' - case ']': - copen = '[' - case '}': - copen = '{' - default: - copen = 0 - } - - if copen != 0 { - bufEnd := offset - rewind + linkEnd - 2 - - openDelim := 1 - - /* Try to close the final punctuation sign in this same line; - * if we managed to close it outside of the URL, that means that it's - * not part of the URL. If it closes inside the URL, that means it - * is part of the URL. - * - * Examples: - * - * foo http://www.pokemon.com/Pikachu_(Electric) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo (http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric) - * - * foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => http://www.pokemon.com/Pikachu_(Electric)) - * - * (foo http://www.pokemon.com/Pikachu_(Electric)) bar - * => foo http://www.pokemon.com/Pikachu_(Electric) - */ - - for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { - if origData[bufEnd] == data[linkEnd-1] { - openDelim++ - } - - if origData[bufEnd] == copen { - openDelim-- - } - - bufEnd-- - } - - if openDelim == 0 { - linkEnd-- - } - } - - var uLink bytes.Buffer - unescapeText(&uLink, data[:linkEnd]) - - if uLink.Len() > 0 { - node := NewNode(Link) - node.Destination = uLink.Bytes() - node.AppendChild(text(uLink.Bytes())) - return linkEnd, node - } - - return linkEnd, nil -} - -func isEndOfLink(char byte) bool { - return isspace(char) || char == '<' -} - -var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} -var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} - -func isSafeLink(link []byte) bool { - for _, path := range validPaths { - if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { - if len(link) == len(path) { - return true - } else if isalnum(link[len(path)]) { - return true - } - } - } - - for _, prefix := range validUris { - // TODO: handle unicode here - // case-insensitive prefix test - if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { - return true - } - } - - return false -} - -// return the length of the given tag, or 0 is it's not valid -func tagLength(data []byte) (autolink autolinkType, end int) { - var i, j int - - // a valid tag can't be shorter than 3 chars - if len(data) < 3 { - return notAutolink, 0 - } - - // begins with a '<' optionally followed by '/', followed by letter or number - if data[0] != '<' { - return notAutolink, 0 - } - if data[1] == '/' { - i = 2 - } else { - i = 1 - } - - if !isalnum(data[i]) { - return notAutolink, 0 - } - - // scheme test - autolink = notAutolink - - // try to find the beginning of an URI - for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { - i++ - } - - if i > 1 && i < len(data) && data[i] == '@' { - if j = isMailtoAutoLink(data[i:]); j != 0 { - return emailAutolink, i + j - } - } - - if i > 2 && i < len(data) && data[i] == ':' { - autolink = normalAutolink - i++ - } - - // complete autolink test: no whitespace or ' or " - switch { - case i >= len(data): - autolink = notAutolink - case autolink != notAutolink: - j = i - - for i < len(data) { - if data[i] == '\\' { - i += 2 - } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { - break - } else { - i++ - } - - } - - if i >= len(data) { - return autolink, 0 - } - if i > j && data[i] == '>' { - return autolink, i + 1 - } - - // one of the forbidden chars has been found - autolink = notAutolink - } - i += bytes.IndexByte(data[i:], '>') - if i < 0 { - return autolink, 0 - } - return autolink, i + 1 -} - -// look for the address part of a mail autolink and '>' -// this is less strict than the original markdown e-mail address matching -func isMailtoAutoLink(data []byte) int { - nb := 0 - - // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' - for i := 0; i < len(data); i++ { - if isalnum(data[i]) { - continue - } - - switch data[i] { - case '@': - nb++ - - case '-', '.', '_': - break - - case '>': - if nb == 1 { - return i + 1 - } - return 0 - default: - return 0 - } - } - - return 0 -} - -// look for the next emph char, skipping other constructs -func helperFindEmphChar(data []byte, c byte) int { - i := 0 - - for i < len(data) { - for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { - i++ - } - if i >= len(data) { - return 0 - } - // do not count escaped chars - if i != 0 && data[i-1] == '\\' { - i++ - continue - } - if data[i] == c { - return i - } - - if data[i] == '`' { - // skip a code span - tmpI := 0 - i++ - for i < len(data) && data[i] != '`' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } else if data[i] == '[' { - // skip a link - tmpI := 0 - i++ - for i < len(data) && data[i] != ']' { - if tmpI == 0 && data[i] == c { - tmpI = i - } - i++ - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\n') { - i++ - } - if i >= len(data) { - return tmpI - } - if data[i] != '[' && data[i] != '(' { // not a link - if tmpI > 0 { - return tmpI - } - continue - } - cc := data[i] - i++ - for i < len(data) && data[i] != cc { - if tmpI == 0 && data[i] == c { - return i - } - i++ - } - if i >= len(data) { - return tmpI - } - i++ - } - } - return 0 -} - -func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - // skip one symbol if coming from emph3 - if len(data) > 1 && data[0] == c && data[1] == c { - i = 1 - } - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - if i >= len(data) { - return 0, nil - } - - if i+1 < len(data) && data[i+1] == c { - i++ - continue - } - - if data[i] == c && !isspace(data[i-1]) { - - if p.extensions&NoIntraEmphasis != 0 { - if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { - continue - } - } - - emph := NewNode(Emph) - p.inline(emph, data[:i]) - return i + 1, emph - } - } - - return 0, nil -} - -func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { - i := 0 - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { - nodeType := Strong - if c == '~' { - nodeType = Del - } - node := NewNode(nodeType) - p.inline(node, data[:i]) - return i + 2, node - } - i++ - } - return 0, nil -} - -func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { - i := 0 - origData := data - data = data[offset:] - - for i < len(data) { - length := helperFindEmphChar(data[i:], c) - if length == 0 { - return 0, nil - } - i += length - - // skip whitespace preceded symbols - if data[i] != c || isspace(data[i-1]) { - continue - } - - switch { - case i+2 < len(data) && data[i+1] == c && data[i+2] == c: - // triple symbol found - strong := NewNode(Strong) - em := NewNode(Emph) - strong.AppendChild(em) - p.inline(em, data[:i]) - return i + 3, strong - case (i+1 < len(data) && data[i+1] == c): - // double symbol found, hand over to emph1 - length, node := helperEmphasis(p, origData[offset-2:], c) - if length == 0 { - return 0, nil - } - return length - 2, node - default: - // single symbol found, hand over to emph2 - length, node := helperDoubleEmphasis(p, origData[offset-1:], c) - if length == 0 { - return 0, nil - } - return length - 1, node - } - } - return 0, nil -} - -func text(s []byte) *Node { - node := NewNode(Text) - node.Literal = s - return node -} - -func normalizeURI(s []byte) []byte { - return s // TODO: implement -} diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/markdown.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/markdown.go deleted file mode 100644 index 1146a105..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/markdown.go +++ /dev/null @@ -1,940 +0,0 @@ -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. - -package blackfriday - -import ( - "bytes" - "fmt" - "io" - "strings" - "unicode/utf8" -) - -// -// Markdown parsing and processing -// - -// Version string of the package. Appears in the rendered document when -// CompletePage flag is on. -const Version = "2.0" - -// Extensions is a bitwise or'ed collection of enabled Blackfriday's -// extensions. -type Extensions int - -// These are the supported markdown parsing extensions. -// OR these values together to select multiple extensions. -const ( - NoExtensions Extensions = 0 - NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words - Tables // Render tables - FencedCode // Render fenced code blocks - Autolink // Detect embedded URLs that are not explicitly marked - Strikethrough // Strikethrough text using ~~test~~ - LaxHTMLBlocks // Loosen up HTML block parsing rules - SpaceHeadings // Be strict about prefix heading rules - HardLineBreak // Translate newlines into line breaks - TabSizeEight // Expand tabs to eight spaces instead of four - Footnotes // Pandoc-style footnotes - NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block - HeadingIDs // specify heading IDs with {#id} - Titleblock // Titleblock ala pandoc - AutoHeadingIDs // Create the heading ID from the text - BackslashLineBreak // Translate trailing backslashes into line breaks - DefinitionLists // Render definition lists - - CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | - SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes - - CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | - Autolink | Strikethrough | SpaceHeadings | HeadingIDs | - BackslashLineBreak | DefinitionLists -) - -// ListType contains bitwise or'ed flags for list and list item objects. -type ListType int - -// These are the possible flag values for the ListItem renderer. -// Multiple flag values may be ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - ListTypeOrdered ListType = 1 << iota - ListTypeDefinition - ListTypeTerm - - ListItemContainsBlock - ListItemBeginningOfList // TODO: figure out if this is of any use now - ListItemEndOfList -) - -// CellAlignFlags holds a type of alignment in a table cell. -type CellAlignFlags int - -// These are the possible flag values for the table cell renderer. -// Only a single one of these values will be used; they are not ORed together. -// These are mostly of interest if you are writing a new output format. -const ( - TableAlignmentLeft CellAlignFlags = 1 << iota - TableAlignmentRight - TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) -) - -// The size of a tab stop. -const ( - TabSizeDefault = 4 - TabSizeDouble = 8 -) - -// blockTags is a set of tags that are recognized as HTML block tags. -// Any of these can be included in markdown text without special escaping. -var blockTags = map[string]struct{}{ - "blockquote": {}, - "del": {}, - "div": {}, - "dl": {}, - "fieldset": {}, - "form": {}, - "h1": {}, - "h2": {}, - "h3": {}, - "h4": {}, - "h5": {}, - "h6": {}, - "iframe": {}, - "ins": {}, - "math": {}, - "noscript": {}, - "ol": {}, - "pre": {}, - "p": {}, - "script": {}, - "style": {}, - "table": {}, - "ul": {}, - - // HTML5 - "address": {}, - "article": {}, - "aside": {}, - "canvas": {}, - "figcaption": {}, - "figure": {}, - "footer": {}, - "header": {}, - "hgroup": {}, - "main": {}, - "nav": {}, - "output": {}, - "progress": {}, - "section": {}, - "video": {}, -} - -// Renderer is the rendering interface. This is mostly of interest if you are -// implementing a new rendering format. -// -// Only an HTML implementation is provided in this repository, see the README -// for external implementations. -type Renderer interface { - // RenderNode is the main rendering method. It will be called once for - // every leaf node and twice for every non-leaf node (first with - // entering=true, then with entering=false). The method should write its - // rendition of the node to the supplied writer w. - RenderNode(w io.Writer, node *Node, entering bool) WalkStatus - - // RenderHeader is a method that allows the renderer to produce some - // content preceding the main body of the output document. The header is - // understood in the broad sense here. For example, the default HTML - // renderer will write not only the HTML document preamble, but also the - // table of contents if it was requested. - // - // The method will be passed an entire document tree, in case a particular - // implementation needs to inspect it to produce output. - // - // The output should be written to the supplied writer w. If your - // implementation has no header to write, supply an empty implementation. - RenderHeader(w io.Writer, ast *Node) - - // RenderFooter is a symmetric counterpart of RenderHeader. - RenderFooter(w io.Writer, ast *Node) -} - -// Callback functions for inline parsing. One such function is defined -// for each character that triggers a response when parsing inline data. -type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) - -// Markdown is a type that holds extensions and the runtime state used by -// Parse, and the renderer. You can not use it directly, construct it with New. -type Markdown struct { - renderer Renderer - referenceOverride ReferenceOverrideFunc - refs map[string]*reference - inlineCallback [256]inlineParser - extensions Extensions - nesting int - maxNesting int - insideLink bool - - // Footnotes need to be ordered as well as available to quickly check for - // presence. If a ref is also a footnote, it's stored both in refs and here - // in notes. Slice is nil if footnotes not enabled. - notes []*reference - - doc *Node - tip *Node // = doc - oldTip *Node - lastMatchedContainer *Node // = doc - allClosed bool -} - -func (p *Markdown) getRef(refid string) (ref *reference, found bool) { - if p.referenceOverride != nil { - r, overridden := p.referenceOverride(refid) - if overridden { - if r == nil { - return nil, false - } - return &reference{ - link: []byte(r.Link), - title: []byte(r.Title), - noteID: 0, - hasBlock: false, - text: []byte(r.Text)}, true - } - } - // refs are case insensitive - ref, found = p.refs[strings.ToLower(refid)] - return ref, found -} - -func (p *Markdown) finalize(block *Node) { - above := block.Parent - block.open = false - p.tip = above -} - -func (p *Markdown) addChild(node NodeType, offset uint32) *Node { - return p.addExistingChild(NewNode(node), offset) -} - -func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { - for !p.tip.canContain(node.Type) { - p.finalize(p.tip) - } - p.tip.AppendChild(node) - p.tip = node - return node -} - -func (p *Markdown) closeUnmatchedBlocks() { - if !p.allClosed { - for p.oldTip != p.lastMatchedContainer { - parent := p.oldTip.Parent - p.finalize(p.oldTip) - p.oldTip = parent - } - p.allClosed = true - } -} - -// -// -// Public interface -// -// - -// Reference represents the details of a link. -// See the documentation in Options for more details on use-case. -type Reference struct { - // Link is usually the URL the reference points to. - Link string - // Title is the alternate text describing the link in more detail. - Title string - // Text is the optional text to override the ref with if the syntax used was - // [refid][] - Text string -} - -// ReferenceOverrideFunc is expected to be called with a reference string and -// return either a valid Reference type that the reference string maps to or -// nil. If overridden is false, the default reference logic will be executed. -// See the documentation in Options for more details on use-case. -type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) - -// New constructs a Markdown processor. You can use the same With* functions as -// for Run() to customize parser's behavior and the renderer. -func New(opts ...Option) *Markdown { - var p Markdown - for _, opt := range opts { - opt(&p) - } - p.refs = make(map[string]*reference) - p.maxNesting = 16 - p.insideLink = false - docNode := NewNode(Document) - p.doc = docNode - p.tip = docNode - p.oldTip = docNode - p.lastMatchedContainer = docNode - p.allClosed = true - // register inline parsers - p.inlineCallback[' '] = maybeLineBreak - p.inlineCallback['*'] = emphasis - p.inlineCallback['_'] = emphasis - if p.extensions&Strikethrough != 0 { - p.inlineCallback['~'] = emphasis - } - p.inlineCallback['`'] = codeSpan - p.inlineCallback['\n'] = lineBreak - p.inlineCallback['['] = link - p.inlineCallback['<'] = leftAngle - p.inlineCallback['\\'] = escape - p.inlineCallback['&'] = entity - p.inlineCallback['!'] = maybeImage - p.inlineCallback['^'] = maybeInlineFootnote - if p.extensions&Autolink != 0 { - p.inlineCallback['h'] = maybeAutoLink - p.inlineCallback['m'] = maybeAutoLink - p.inlineCallback['f'] = maybeAutoLink - p.inlineCallback['H'] = maybeAutoLink - p.inlineCallback['M'] = maybeAutoLink - p.inlineCallback['F'] = maybeAutoLink - } - if p.extensions&Footnotes != 0 { - p.notes = make([]*reference, 0) - } - return &p -} - -// Option customizes the Markdown processor's default behavior. -type Option func(*Markdown) - -// WithRenderer allows you to override the default renderer. -func WithRenderer(r Renderer) Option { - return func(p *Markdown) { - p.renderer = r - } -} - -// WithExtensions allows you to pick some of the many extensions provided by -// Blackfriday. You can bitwise OR them. -func WithExtensions(e Extensions) Option { - return func(p *Markdown) { - p.extensions = e - } -} - -// WithNoExtensions turns off all extensions and custom behavior. -func WithNoExtensions() Option { - return func(p *Markdown) { - p.extensions = NoExtensions - p.renderer = NewHTMLRenderer(HTMLRendererParameters{ - Flags: HTMLFlagsNone, - }) - } -} - -// WithRefOverride sets an optional function callback that is called every -// time a reference is resolved. -// -// In Markdown, the link reference syntax can be made to resolve a link to -// a reference instead of an inline URL, in one of the following ways: -// -// * [link text][refid] -// * [refid][] -// -// Usually, the refid is defined at the bottom of the Markdown document. If -// this override function is provided, the refid is passed to the override -// function first, before consulting the defined refids at the bottom. If -// the override function indicates an override did not occur, the refids at -// the bottom will be used to fill in the link details. -func WithRefOverride(o ReferenceOverrideFunc) Option { - return func(p *Markdown) { - p.referenceOverride = o - } -} - -// Run is the main entry point to Blackfriday. It parses and renders a -// block of markdown-encoded text. -// -// The simplest invocation of Run takes one argument, input: -// output := Run(input) -// This will parse the input with CommonExtensions enabled and render it with -// the default HTMLRenderer (with CommonHTMLFlags). -// -// Variadic arguments opts can customize the default behavior. Since Markdown -// type does not contain exported fields, you can not use it directly. Instead, -// use the With* functions. For example, this will call the most basic -// functionality, with no extensions: -// output := Run(input, WithNoExtensions()) -// -// You can use any number of With* arguments, even contradicting ones. They -// will be applied in order of appearance and the latter will override the -// former: -// output := Run(input, WithNoExtensions(), WithExtensions(exts), -// WithRenderer(yourRenderer)) -func Run(input []byte, opts ...Option) []byte { - r := NewHTMLRenderer(HTMLRendererParameters{ - Flags: CommonHTMLFlags, - }) - optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} - optList = append(optList, opts...) - parser := New(optList...) - ast := parser.Parse(input) - var buf bytes.Buffer - parser.renderer.RenderHeader(&buf, ast) - ast.Walk(func(node *Node, entering bool) WalkStatus { - return parser.renderer.RenderNode(&buf, node, entering) - }) - parser.renderer.RenderFooter(&buf, ast) - return buf.Bytes() -} - -// Parse is an entry point to the parsing part of Blackfriday. It takes an -// input markdown document and produces a syntax tree for its contents. This -// tree can then be rendered with a default or custom renderer, or -// analyzed/transformed by the caller to whatever non-standard needs they have. -// The return value is the root node of the syntax tree. -func (p *Markdown) Parse(input []byte) *Node { - p.block(input) - // Walk the tree and finish up some of unfinished blocks - for p.tip != nil { - p.finalize(p.tip) - } - // Walk the tree again and process inline markdown in each block - p.doc.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) - p.parseRefsToAST() - return p.doc -} - -func (p *Markdown) parseRefsToAST() { - if p.extensions&Footnotes == 0 || len(p.notes) == 0 { - return - } - p.tip = p.doc - block := p.addBlock(List, nil) - block.IsFootnotesList = true - block.ListFlags = ListTypeOrdered - flags := ListItemBeginningOfList - // Note: this loop is intentionally explicit, not range-form. This is - // because the body of the loop will append nested footnotes to p.notes and - // we need to process those late additions. Range form would only walk over - // the fixed initial set. - for i := 0; i < len(p.notes); i++ { - ref := p.notes[i] - p.addExistingChild(ref.footnote, 0) - block := ref.footnote - block.ListFlags = flags | ListTypeOrdered - block.RefLink = ref.link - if ref.hasBlock { - flags |= ListItemContainsBlock - p.block(ref.title) - } else { - p.inline(block, ref.title) - } - flags &^= ListItemBeginningOfList | ListItemContainsBlock - } - above := block.Parent - finalizeList(block) - p.tip = above - block.Walk(func(node *Node, entering bool) WalkStatus { - if node.Type == Paragraph || node.Type == Heading { - p.inline(node, node.content) - node.content = nil - } - return GoToNext - }) -} - -// -// Link references -// -// This section implements support for references that (usually) appear -// as footnotes in a document, and can be referenced anywhere in the document. -// The basic format is: -// -// [1]: http://www.google.com/ "Google" -// [2]: http://www.github.com/ "Github" -// -// Anywhere in the document, the reference can be linked by referring to its -// label, i.e., 1 and 2 in this example, as in: -// -// This library is hosted on [Github][2], a git hosting site. -// -// Actual footnotes as specified in Pandoc and supported by some other Markdown -// libraries such as php-markdown are also taken care of. They look like this: -// -// This sentence needs a bit of further explanation.[^note] -// -// [^note]: This is the explanation. -// -// Footnotes should be placed at the end of the document in an ordered list. -// Inline footnotes such as: -// -// Inline footnotes^[Not supported.] also exist. -// -// are not yet supported. - -// reference holds all information necessary for a reference-style links or -// footnotes. -// -// Consider this markdown with reference-style links: -// -// [link][ref] -// -// [ref]: /url/ "tooltip title" -// -// It will be ultimately converted to this HTML: -// -//

    link

    -// -// And a reference structure will be populated as follows: -// -// p.refs["ref"] = &reference{ -// link: "/url/", -// title: "tooltip title", -// } -// -// Alternatively, reference can contain information about a footnote. Consider -// this markdown: -// -// Text needing a footnote.[^a] -// -// [^a]: This is the note -// -// A reference structure will be populated as follows: -// -// p.refs["a"] = &reference{ -// link: "a", -// title: "This is the note", -// noteID: , -// } -// -// TODO: As you can see, it begs for splitting into two dedicated structures -// for refs and for footnotes. -type reference struct { - link []byte - title []byte - noteID int // 0 if not a footnote ref - hasBlock bool - footnote *Node // a link to the Item node within a list of footnotes - - text []byte // only gets populated by refOverride feature with Reference.Text -} - -func (r *reference) String() string { - return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", - r.link, r.title, r.text, r.noteID, r.hasBlock) -} - -// Check whether or not data starts with a reference link. -// If so, it is parsed and stored in the list of references -// (in the render struct). -// Returns the number of bytes to skip to move past it, -// or zero if the first line is not a reference. -func isReference(p *Markdown, data []byte, tabSize int) int { - // up to 3 optional leading spaces - if len(data) < 4 { - return 0 - } - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - - noteID := 0 - - // id part: anything but a newline between brackets - if data[i] != '[' { - return 0 - } - i++ - if p.extensions&Footnotes != 0 { - if i < len(data) && data[i] == '^' { - // we can set it to anything here because the proper noteIds will - // be assigned later during the second pass. It just has to be != 0 - noteID = 1 - i++ - } - } - idOffset := i - for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { - i++ - } - if i >= len(data) || data[i] != ']' { - return 0 - } - idEnd := i - // footnotes can have empty ID, like this: [^], but a reference can not be - // empty like this: []. Break early if it's not a footnote and there's no ID - if noteID == 0 && idOffset == idEnd { - return 0 - } - // spacer: colon (space | tab)* newline? (space | tab)* - i++ - if i >= len(data) || data[i] != ':' { - return 0 - } - i++ - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && (data[i] == '\n' || data[i] == '\r') { - i++ - if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { - i++ - } - } - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i >= len(data) { - return 0 - } - - var ( - linkOffset, linkEnd int - titleOffset, titleEnd int - lineEnd int - raw []byte - hasBlock bool - ) - - if p.extensions&Footnotes != 0 && noteID != 0 { - linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) - lineEnd = linkEnd - } else { - linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) - } - if lineEnd == 0 { - return 0 - } - - // a valid ref has been found - - ref := &reference{ - noteID: noteID, - hasBlock: hasBlock, - } - - if noteID > 0 { - // reusing the link field for the id since footnotes don't have links - ref.link = data[idOffset:idEnd] - // if footnote, it's not really a title, it's the contained text - ref.title = raw - } else { - ref.link = data[linkOffset:linkEnd] - ref.title = data[titleOffset:titleEnd] - } - - // id matches are case-insensitive - id := string(bytes.ToLower(data[idOffset:idEnd])) - - p.refs[id] = ref - - return lineEnd -} - -func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { - // link: whitespace-free sequence, optionally between angle brackets - if data[i] == '<' { - i++ - } - linkOffset = i - for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { - i++ - } - linkEnd = i - if data[linkOffset] == '<' && data[linkEnd-1] == '>' { - linkOffset++ - linkEnd-- - } - - // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { - return - } - - // compute end-of-line - if i >= len(data) || data[i] == '\r' || data[i] == '\n' { - lineEnd = i - } - if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { - lineEnd++ - } - - // optional (space|tab)* spacer after a newline - if lineEnd > 0 { - i = lineEnd + 1 - for i < len(data) && (data[i] == ' ' || data[i] == '\t') { - i++ - } - } - - // optional title: any non-newline sequence enclosed in '"() alone on its line - if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { - i++ - titleOffset = i - - // look for EOL - for i < len(data) && data[i] != '\n' && data[i] != '\r' { - i++ - } - if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { - titleEnd = i + 1 - } else { - titleEnd = i - } - - // step back - i-- - for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { - i-- - } - if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { - lineEnd = titleEnd - titleEnd = i - } - } - - return -} - -// The first bit of this logic is the same as Parser.listItem, but the rest -// is much simpler. This function simply finds the entire block and shifts it -// over by one tab if it is indeed a block (just returns the line if it's not). -// blockEnd is the end of the section in the input buffer, and contents is the -// extracted text that was shifted over one tab. It will need to be rendered at -// the end of the document. -func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { - if i == 0 || len(data) == 0 { - return - } - - // skip leading whitespace on first line - for i < len(data) && data[i] == ' ' { - i++ - } - - blockStart = i - - // find the end of the line - blockEnd = i - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[blockEnd:i]) - blockEnd = i - - // process the following lines - containsBlankLine := false - -gatherLines: - for blockEnd < len(data) { - i++ - - // find the end of this line - for i < len(data) && data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[blockEnd:i]) > 0 { - containsBlankLine = true - blockEnd = i - continue - } - - n := 0 - if n = isIndented(data[blockEnd:i], indentSize); n == 0 { - // this is the end of the block. - // we don't want to include this last line in the index. - break gatherLines - } - - // if there were blank lines before this one, insert a new one now - if containsBlankLine { - raw.WriteByte('\n') - containsBlankLine = false - } - - // get rid of that first tab, write to buffer - raw.Write(data[blockEnd+n : i]) - hasBlock = true - - blockEnd = i - } - - if data[blockEnd-1] != '\n' { - raw.WriteByte('\n') - } - - contents = raw.Bytes() - - return -} - -// -// -// Miscellaneous helper functions -// -// - -// Test if a character is a punctuation symbol. -// Taken from a private function in regexp in the stdlib. -func ispunct(c byte) bool { - for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { - if c == r { - return true - } - } - return false -} - -// Test if a character is a whitespace character. -func isspace(c byte) bool { - return c == ' ' || c == '\t' || c == '\n' || c == '\r' || c == '\f' || c == '\v' -} - -// Test if a character is letter. -func isletter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// Test if a character is a letter or a digit. -// TODO: check when this is looking for ASCII alnum and when it should use unicode -func isalnum(c byte) bool { - return (c >= '0' && c <= '9') || isletter(c) -} - -// Replace tab characters with spaces, aligning to the next TAB_SIZE column. -// always ends output with a newline -func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { - // first, check for common cases: no tabs, or only tabs at beginning of line - i, prefix := 0, 0 - slowcase := false - for i = 0; i < len(line); i++ { - if line[i] == '\t' { - if prefix == i { - prefix++ - } else { - slowcase = true - break - } - } - } - - // no need to decode runes if all tabs are at the beginning of the line - if !slowcase { - for i = 0; i < prefix*tabSize; i++ { - out.WriteByte(' ') - } - out.Write(line[prefix:]) - return - } - - // the slow case: we need to count runes to figure out how - // many spaces to insert for each tab - column := 0 - i = 0 - for i < len(line) { - start := i - for i < len(line) && line[i] != '\t' { - _, size := utf8.DecodeRune(line[i:]) - i += size - column++ - } - - if i > start { - out.Write(line[start:i]) - } - - if i >= len(line) { - break - } - - for { - out.WriteByte(' ') - column++ - if column%tabSize == 0 { - break - } - } - - i++ - } -} - -// Find if a line counts as indented or not. -// Returns number of characters the indent is (0 = not indented). -func isIndented(data []byte, indentSize int) int { - if len(data) == 0 { - return 0 - } - if data[0] == '\t' { - return 1 - } - if len(data) < indentSize { - return 0 - } - for i := 0; i < indentSize; i++ { - if data[i] != ' ' { - return 0 - } - } - return indentSize -} - -// Create a url-safe slug for fragments -func slugify(in []byte) []byte { - if len(in) == 0 { - return in - } - out := make([]byte, 0, len(in)) - sym := false - - for _, ch := range in { - if isalnum(ch) { - sym = false - out = append(out, ch) - } else if sym { - continue - } else { - out = append(out, '-') - sym = true - } - } - var a, b int - var ch byte - for a, ch = range out { - if ch != '-' { - break - } - } - for b = len(out) - 1; b > 0; b-- { - if out[b] != '-' { - break - } - } - return out[a : b+1] -} diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/node.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/node.go deleted file mode 100644 index 51b9e8c1..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/node.go +++ /dev/null @@ -1,354 +0,0 @@ -package blackfriday - -import ( - "bytes" - "fmt" -) - -// NodeType specifies a type of a single node of a syntax tree. Usually one -// node (and its type) corresponds to a single markdown feature, e.g. emphasis -// or code block. -type NodeType int - -// Constants for identifying different types of nodes. See NodeType. -const ( - Document NodeType = iota - BlockQuote - List - Item - Paragraph - Heading - HorizontalRule - Emph - Strong - Del - Link - Image - Text - HTMLBlock - CodeBlock - Softbreak - Hardbreak - Code - HTMLSpan - Table - TableCell - TableHead - TableBody - TableRow -) - -var nodeTypeNames = []string{ - Document: "Document", - BlockQuote: "BlockQuote", - List: "List", - Item: "Item", - Paragraph: "Paragraph", - Heading: "Heading", - HorizontalRule: "HorizontalRule", - Emph: "Emph", - Strong: "Strong", - Del: "Del", - Link: "Link", - Image: "Image", - Text: "Text", - HTMLBlock: "HTMLBlock", - CodeBlock: "CodeBlock", - Softbreak: "Softbreak", - Hardbreak: "Hardbreak", - Code: "Code", - HTMLSpan: "HTMLSpan", - Table: "Table", - TableCell: "TableCell", - TableHead: "TableHead", - TableBody: "TableBody", - TableRow: "TableRow", -} - -func (t NodeType) String() string { - return nodeTypeNames[t] -} - -// ListData contains fields relevant to a List and Item node type. -type ListData struct { - ListFlags ListType - Tight bool // Skip

    s around list item data if true - BulletChar byte // '*', '+' or '-' in bullet lists - Delimiter byte // '.' or ')' after the number in ordered lists - RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering - IsFootnotesList bool // This is a list of footnotes -} - -// LinkData contains fields relevant to a Link node type. -type LinkData struct { - Destination []byte // Destination is what goes into a href - Title []byte // Title is the tooltip thing that goes in a title attribute - NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote - Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. -} - -// CodeBlockData contains fields relevant to a CodeBlock node type. -type CodeBlockData struct { - IsFenced bool // Specifies whether it's a fenced code block or an indented one - Info []byte // This holds the info string - FenceChar byte - FenceLength int - FenceOffset int -} - -// TableCellData contains fields relevant to a TableCell node type. -type TableCellData struct { - IsHeader bool // This tells if it's under the header row - Align CellAlignFlags // This holds the value for align attribute -} - -// HeadingData contains fields relevant to a Heading node type. -type HeadingData struct { - Level int // This holds the heading level number - HeadingID string // This might hold heading ID, if present - IsTitleblock bool // Specifies whether it's a title block -} - -// Node is a single element in the abstract syntax tree of the parsed document. -// It holds connections to the structurally neighboring nodes and, for certain -// types of nodes, additional information that might be needed when rendering. -type Node struct { - Type NodeType // Determines the type of the node - Parent *Node // Points to the parent - FirstChild *Node // Points to the first child, if any - LastChild *Node // Points to the last child, if any - Prev *Node // Previous sibling; nil if it's the first child - Next *Node // Next sibling; nil if it's the last child - - Literal []byte // Text contents of the leaf nodes - - HeadingData // Populated if Type is Heading - ListData // Populated if Type is List - CodeBlockData // Populated if Type is CodeBlock - LinkData // Populated if Type is Link - TableCellData // Populated if Type is TableCell - - content []byte // Markdown content of the block nodes - open bool // Specifies an open block node that has not been finished to process yet -} - -// NewNode allocates a node of a specified type. -func NewNode(typ NodeType) *Node { - return &Node{ - Type: typ, - open: true, - } -} - -func (n *Node) String() string { - ellipsis := "" - snippet := n.Literal - if len(snippet) > 16 { - snippet = snippet[:16] - ellipsis = "..." - } - return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) -} - -// Unlink removes node 'n' from the tree. -// It panics if the node is nil. -func (n *Node) Unlink() { - if n.Prev != nil { - n.Prev.Next = n.Next - } else if n.Parent != nil { - n.Parent.FirstChild = n.Next - } - if n.Next != nil { - n.Next.Prev = n.Prev - } else if n.Parent != nil { - n.Parent.LastChild = n.Prev - } - n.Parent = nil - n.Next = nil - n.Prev = nil -} - -// AppendChild adds a node 'child' as a child of 'n'. -// It panics if either node is nil. -func (n *Node) AppendChild(child *Node) { - child.Unlink() - child.Parent = n - if n.LastChild != nil { - n.LastChild.Next = child - child.Prev = n.LastChild - n.LastChild = child - } else { - n.FirstChild = child - n.LastChild = child - } -} - -// InsertBefore inserts 'sibling' immediately before 'n'. -// It panics if either node is nil. -func (n *Node) InsertBefore(sibling *Node) { - sibling.Unlink() - sibling.Prev = n.Prev - if sibling.Prev != nil { - sibling.Prev.Next = sibling - } - sibling.Next = n - n.Prev = sibling - sibling.Parent = n.Parent - if sibling.Prev == nil { - sibling.Parent.FirstChild = sibling - } -} - -func (n *Node) isContainer() bool { - switch n.Type { - case Document: - fallthrough - case BlockQuote: - fallthrough - case List: - fallthrough - case Item: - fallthrough - case Paragraph: - fallthrough - case Heading: - fallthrough - case Emph: - fallthrough - case Strong: - fallthrough - case Del: - fallthrough - case Link: - fallthrough - case Image: - fallthrough - case Table: - fallthrough - case TableHead: - fallthrough - case TableBody: - fallthrough - case TableRow: - fallthrough - case TableCell: - return true - default: - return false - } -} - -func (n *Node) canContain(t NodeType) bool { - if n.Type == List { - return t == Item - } - if n.Type == Document || n.Type == BlockQuote || n.Type == Item { - return t != Item - } - if n.Type == Table { - return t == TableHead || t == TableBody - } - if n.Type == TableHead || n.Type == TableBody { - return t == TableRow - } - if n.Type == TableRow { - return t == TableCell - } - return false -} - -// WalkStatus allows NodeVisitor to have some control over the tree traversal. -// It is returned from NodeVisitor and different values allow Node.Walk to -// decide which node to go to next. -type WalkStatus int - -const ( - // GoToNext is the default traversal of every node. - GoToNext WalkStatus = iota - // SkipChildren tells walker to skip all children of current node. - SkipChildren - // Terminate tells walker to terminate the traversal. - Terminate -) - -// NodeVisitor is a callback to be called when traversing the syntax tree. -// Called twice for every node: once with entering=true when the branch is -// first visited, then with entering=false after all the children are done. -type NodeVisitor func(node *Node, entering bool) WalkStatus - -// Walk is a convenience method that instantiates a walker and starts a -// traversal of subtree rooted at n. -func (n *Node) Walk(visitor NodeVisitor) { - w := newNodeWalker(n) - for w.current != nil { - status := visitor(w.current, w.entering) - switch status { - case GoToNext: - w.next() - case SkipChildren: - w.entering = false - w.next() - case Terminate: - return - } - } -} - -type nodeWalker struct { - current *Node - root *Node - entering bool -} - -func newNodeWalker(root *Node) *nodeWalker { - return &nodeWalker{ - current: root, - root: root, - entering: true, - } -} - -func (nw *nodeWalker) next() { - if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root { - nw.current = nil - return - } - if nw.entering && nw.current.isContainer() { - if nw.current.FirstChild != nil { - nw.current = nw.current.FirstChild - nw.entering = true - } else { - nw.entering = false - } - } else if nw.current.Next == nil { - nw.current = nw.current.Parent - nw.entering = false - } else { - nw.current = nw.current.Next - nw.entering = true - } -} - -func dump(ast *Node) { - fmt.Println(dumpString(ast)) -} - -func dumpR(ast *Node, depth int) string { - if ast == nil { - return "" - } - indent := bytes.Repeat([]byte("\t"), depth) - content := ast.Literal - if content == nil { - content = ast.content - } - result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) - for n := ast.FirstChild; n != nil; n = n.Next { - result += dumpR(n, depth+1) - } - return result -} - -func dumpString(ast *Node) string { - return dumpR(ast, 0) -} diff --git a/vendor/go.step.sm/cli-utils/pkg/blackfriday/smartypants.go b/vendor/go.step.sm/cli-utils/pkg/blackfriday/smartypants.go deleted file mode 100644 index 3a220e94..00000000 --- a/vendor/go.step.sm/cli-utils/pkg/blackfriday/smartypants.go +++ /dev/null @@ -1,457 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// SmartyPants rendering -// -// - -package blackfriday - -import ( - "bytes" - "io" -) - -// SPRenderer is a struct containing state of a Smartypants renderer. -type SPRenderer struct { - inSingleQuote bool - inDoubleQuote bool - callbacks [256]smartCallback -} - -func wordBoundary(c byte) bool { - return c == 0 || isspace(c) || ispunct(c) -} - -func tolower(c byte) byte { - if c >= 'A' && c <= 'Z' { - return c - 'A' + 'a' - } - return c -} - -func isdigit(c byte) bool { - return c >= '0' && c <= '9' -} - -func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { - // edge of the buffer is likely to be a tag that we don't get to see, - // so we treat it like text sometimes - - // enumerate all sixteen possibilities for (previousChar, nextChar) - // each can be one of {0, space, punct, other} - switch { - case previousChar == 0 && nextChar == 0: - // context is not any help here, so toggle - *isOpen = !*isOpen - case isspace(previousChar) && nextChar == 0: - // [ "] might be [ "foo...] - *isOpen = true - case ispunct(previousChar) && nextChar == 0: - // [!"] hmm... could be [Run!"] or [("...] - *isOpen = false - case /* isnormal(previousChar) && */ nextChar == 0: - // [a"] is probably a close - *isOpen = false - case previousChar == 0 && isspace(nextChar): - // [" ] might be [...foo" ] - *isOpen = false - case isspace(previousChar) && isspace(nextChar): - // [ " ] context is not any help here, so toggle - *isOpen = !*isOpen - case ispunct(previousChar) && isspace(nextChar): - // [!" ] is probably a close - *isOpen = false - case /* isnormal(previousChar) && */ isspace(nextChar): - // [a" ] this is one of the easy cases - *isOpen = false - case previousChar == 0 && ispunct(nextChar): - // ["!] hmm... could be ["$1.95] or ["!...] - *isOpen = false - case isspace(previousChar) && ispunct(nextChar): - // [ "!] looks more like [ "$1.95] - *isOpen = true - case ispunct(previousChar) && ispunct(nextChar): - // [!"!] context is not any help here, so toggle - *isOpen = !*isOpen - case /* isnormal(previousChar) && */ ispunct(nextChar): - // [a"!] is probably a close - *isOpen = false - case previousChar == 0 /* && isnormal(nextChar) */ : - // ["a] is probably an open - *isOpen = true - case isspace(previousChar) /* && isnormal(nextChar) */ : - // [ "a] this is one of the easy cases - *isOpen = true - case ispunct(previousChar) /* && isnormal(nextChar) */ : - // [!"a] is probably an open - *isOpen = true - default: - // [a'b] maybe a contraction? - *isOpen = false - } - - // Note that with the limited lookahead, this non-breaking - // space will also be appended to single double quotes. - if addNBSP && !*isOpen { - out.WriteString(" ") - } - - out.WriteByte('&') - if *isOpen { - out.WriteByte('l') - } else { - out.WriteByte('r') - } - out.WriteByte(quote) - out.WriteString("quo;") - - if addNBSP && *isOpen { - out.WriteString(" ") - } - - return true -} - -func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - t1 := tolower(text[1]) - - if t1 == '\'' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { - out.WriteString("’") - return 0 - } - - if len(text) >= 3 { - t2 := tolower(text[2]) - - if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && - (len(text) < 4 || wordBoundary(text[3])) { - out.WriteString("’") - return 0 - } - } - } - - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { - return 0 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 { - t1 := tolower(text[1]) - t2 := tolower(text[2]) - - if t1 == 'c' && t2 == ')' { - out.WriteString("©") - return 2 - } - - if t1 == 'r' && t2 == ')' { - out.WriteString("®") - return 2 - } - - if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { - out.WriteString("™") - return 3 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 { - if text[1] == '-' { - out.WriteString("—") - return 1 - } - - if wordBoundary(previousChar) && wordBoundary(text[1]) { - out.WriteString("–") - return 0 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '-' && text[2] == '-' { - out.WriteString("—") - return 2 - } - if len(text) >= 2 && text[1] == '-' { - out.WriteString("–") - return 1 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { - if bytes.HasPrefix(text, []byte(""")) { - nextChar := byte(0) - if len(text) >= 7 { - nextChar = text[6] - } - if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { - return 5 - } - } - - if bytes.HasPrefix(text, []byte("�")) { - return 3 - } - - out.WriteByte('&') - return 0 -} - -func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { - var quote byte = 'd' - if angledQuotes { - quote = 'a' - } - - return func(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) - } -} - -func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 3 && text[1] == '.' && text[2] == '.' { - out.WriteString("…") - return 2 - } - - if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { - out.WriteString("…") - return 4 - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { - if len(text) >= 2 && text[1] == '`' { - nextChar := byte(0) - if len(text) >= 3 { - nextChar = text[2] - } - if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { - return 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b - // note: check for regular slash (/) or fraction slash (â„, 0x2044, or 0xe2 81 84 in utf-8) - // and avoid changing dates like 1/23/2005 into fractions. - numEnd := 0 - for len(text) > numEnd && isdigit(text[numEnd]) { - numEnd++ - } - if numEnd == 0 { - out.WriteByte(text[0]) - return 0 - } - denStart := numEnd + 1 - if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { - denStart = numEnd + 3 - } else if len(text) < numEnd+2 || text[numEnd] != '/' { - out.WriteByte(text[0]) - return 0 - } - denEnd := denStart - for len(text) > denEnd && isdigit(text[denEnd]) { - denEnd++ - } - if denEnd == denStart { - out.WriteByte(text[0]) - return 0 - } - if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { - out.WriteString("") - out.Write(text[:numEnd]) - out.WriteString("") - out.Write(text[denStart:denEnd]) - out.WriteString("") - return denEnd - 1 - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { - if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { - if text[0] == '1' && text[1] == '/' && text[2] == '2' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { - out.WriteString("½") - return 2 - } - } - - if text[0] == '1' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { - out.WriteString("¼") - return 2 - } - } - - if text[0] == '3' && text[1] == '/' && text[2] == '4' { - if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { - out.WriteString("¾") - return 2 - } - } - } - - out.WriteByte(text[0]) - return 0 -} - -func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { - nextChar := byte(0) - if len(text) > 1 { - nextChar = text[1] - } - if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { - out.WriteString(""") - } - - return 0 -} - -func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') -} - -func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { - return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') -} - -func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { - i := 0 - - for i < len(text) && text[i] != '>' { - i++ - } - - out.Write(text[:i+1]) - return i -} - -type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int - -// NewSmartypantsRenderer constructs a Smartypants renderer object. -func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { - var ( - r SPRenderer - - smartAmpAngled = r.smartAmp(true, false) - smartAmpAngledNBSP = r.smartAmp(true, true) - smartAmpRegular = r.smartAmp(false, false) - smartAmpRegularNBSP = r.smartAmp(false, true) - - addNBSP = flags&SmartypantsQuotesNBSP != 0 - ) - - if flags&SmartypantsAngledQuotes == 0 { - r.callbacks['"'] = r.smartDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpRegular - } else { - r.callbacks['&'] = smartAmpRegularNBSP - } - } else { - r.callbacks['"'] = r.smartAngledDoubleQuote - if !addNBSP { - r.callbacks['&'] = smartAmpAngled - } else { - r.callbacks['&'] = smartAmpAngledNBSP - } - } - r.callbacks['\''] = r.smartSingleQuote - r.callbacks['('] = r.smartParens - if flags&SmartypantsDashes != 0 { - if flags&SmartypantsLatexDashes == 0 { - r.callbacks['-'] = r.smartDash - } else { - r.callbacks['-'] = r.smartDashLatex - } - } - r.callbacks['.'] = r.smartPeriod - if flags&SmartypantsFractions == 0 { - r.callbacks['1'] = r.smartNumber - r.callbacks['3'] = r.smartNumber - } else { - for ch := '1'; ch <= '9'; ch++ { - r.callbacks[ch] = r.smartNumberGeneric - } - } - r.callbacks['<'] = r.smartLeftAngle - r.callbacks['`'] = r.smartBacktick - return &r -} - -// Process is the entry point of the Smartypants renderer. -func (r *SPRenderer) Process(w io.Writer, text []byte) { - mark := 0 - for i := 0; i < len(text); i++ { - if action := r.callbacks[text[i]]; action != nil { - if i > mark { - w.Write(text[mark:i]) - } - previousChar := byte(0) - if i > 0 { - previousChar = text[i-1] - } - var tmp bytes.Buffer - i += action(&tmp, previousChar, text[i:]) - w.Write(tmp.Bytes()) - mark = i + 1 - } - } - if mark < len(text) { - w.Write(text[mark:]) - } -} diff --git a/vendor/go.step.sm/cli-utils/ui/options.go b/vendor/go.step.sm/cli-utils/ui/options.go deleted file mode 100644 index 75f730d2..00000000 --- a/vendor/go.step.sm/cli-utils/ui/options.go +++ /dev/null @@ -1,163 +0,0 @@ -package ui - -import ( - "fmt" - "regexp" - - "github.com/manifoldco/promptui" -) - -type options struct { - mask rune - defaultValue string - value string - allowEdit bool - printTemplate string - promptTemplates *promptui.PromptTemplates - selectTemplates *promptui.SelectTemplates - validateFunc promptui.ValidateFunc -} - -// apply applies the given options. -func (o *options) apply(opts []Option) *options { - for _, fn := range opts { - fn(o) - } - return o -} - -// valid returns true if the validate function passes on the value. -func (o *options) valid() bool { - if o.validateFunc == nil { - return true - } - return o.validateFunc(o.value) == nil -} - -// getValue validates the value and returns it. -func (o *options) getValue() (string, error) { - if o.validateFunc == nil { - return o.value, nil - } - if err := o.validateFunc(o.value); err != nil { - return "", err - } - return o.value, nil -} - -// getValueBytes validates the value and returns it as a byte slice. -func (o *options) getValueBytes() ([]byte, error) { - if o.validateFunc == nil { - return []byte(o.value), nil - } - if err := o.validateFunc(o.value); err != nil { - return nil, err - } - return []byte(o.value), nil -} - -// Option is the type of the functions that modify the prompt options. -type Option func(*options) - -func extractOptions(args []interface{}) (opts []Option, rest []interface{}) { - rest = args[:0] - for _, arg := range args { - if o, ok := arg.(Option); ok { - opts = append(opts, o) - } else { - rest = append(rest, arg) - } - } - return -} - -// WithMask adds a mask to a prompt. -func WithMask(r rune) Option { - return func(o *options) { - o.mask = r - } -} - -// WithDefaultValue adds a custom string as the default value. -func WithDefaultValue(s string) Option { - return func(o *options) { - o.defaultValue = s - } -} - -// WithValue sets a custom string as the result of a prompt. If value is set, -// the prompt won't be displayed. -func WithValue(value string) Option { - return func(o *options) { - o.value = value - } -} - -// WithAllowEdit if true, let's the user edit the default value set. -func WithAllowEdit(b bool) Option { - return func(o *options) { - o.allowEdit = b - } -} - -// WithPrintTemplate sets the template to use on the print methods. -func WithPrintTemplate(template string) Option { - return func(o *options) { - o.printTemplate = template - } -} - -// WithPromptTemplates adds a custom template to a prompt. -func WithPromptTemplates(t *promptui.PromptTemplates) Option { - return func(o *options) { - o.promptTemplates = t - } -} - -// WithSelectTemplates adds a custom template to a select. -func WithSelectTemplates(t *promptui.SelectTemplates) Option { - return func(o *options) { - o.selectTemplates = t - } -} - -// WithValidateFunc adds a custom validation function to a prompt. -func WithValidateFunc(fn func(string) error) Option { - return func(o *options) { - o.validateFunc = fn - } -} - -// WithValidateNotEmpty adds a custom validation function to a prompt that -// checks that the propted string is not empty. -func WithValidateNotEmpty() Option { - return WithValidateFunc(NotEmpty()) -} - -// WithValidateYesNo adds a custom validation function to a prompt for a Yes/No -// prompt. -func WithValidateYesNo() Option { - return WithValidateFunc(YesNo()) -} - -// WithRichPrompt add the template option with rich templates. -func WithRichPrompt() Option { - return WithPromptTemplates(PromptTemplates()) -} - -// WithSimplePrompt add the template option with simple templates. -func WithSimplePrompt() Option { - return WithPromptTemplates(SimplePromptTemplates()) -} - -// WithValidateRegexp checks a prompt answer with a regular expression. If the -// regular expression is not a valid one, the option will panic. -func WithValidateRegexp(re string) Option { - rx := regexp.MustCompile(re) - return WithValidateFunc(func(s string) error { - if rx.MatchString(s) { - return nil - } - return fmt.Errorf("%s does not match the regular expresion %s", s, re) - }) -} diff --git a/vendor/go.step.sm/cli-utils/ui/templates.go b/vendor/go.step.sm/cli-utils/ui/templates.go deleted file mode 100644 index 64daecf8..00000000 --- a/vendor/go.step.sm/cli-utils/ui/templates.go +++ /dev/null @@ -1,93 +0,0 @@ -package ui - -import ( - "fmt" - "runtime" - - "github.com/chzyer/readline" - "github.com/manifoldco/promptui" -) - -var ( - // IconInitial is the icon used when starting in prompt mode and the icon next to the label when - // starting in select mode. - IconInitial = promptui.Styler(promptui.FGBlue)("?") - - // IconGood is the icon used when a good answer is entered in prompt mode. - IconGood = promptui.Styler(promptui.FGGreen)("✔") - - // IconWarn is the icon used when a good, but potentially invalid answer is entered in prompt mode. - IconWarn = promptui.Styler(promptui.FGYellow)("âš ") - - // IconBad is the icon used when a bad answer is entered in prompt mode. - IconBad = promptui.Styler(promptui.FGRed)("✗") - - // IconSelect is the icon used to identify the currently selected item in select mode. - IconSelect = promptui.Styler(promptui.FGBold)("â–¸") -) - -func init() { - // Set VT100 characters for windows too - if runtime.GOOS == "windows" { - promptui.KeyEnter = readline.CharEnter - promptui.KeyBackspace = readline.CharBackspace - promptui.KeyPrev = readline.CharPrev - promptui.KeyPrevDisplay = "↑" - promptui.KeyNext = readline.CharNext - promptui.KeyNextDisplay = "↓" - promptui.KeyBackward = readline.CharBackward - promptui.KeyBackwardDisplay = "â†" - promptui.KeyForward = readline.CharForward - promptui.KeyForwardDisplay = "→" - } -} - -// PrintSelectedTemplate returns the default template used in PrintSelected. -func PrintSelectedTemplate() string { - return fmt.Sprintf(`{{ "%s" | green }} {{ .Name | bold }}{{ ":" | bold }} {{ .Value }}`, IconGood) + "\n" -} - -// PromptTemplates is the default style for a prompt. -func PromptTemplates() *promptui.PromptTemplates { - bold := promptui.Styler(promptui.FGBold) - return &promptui.PromptTemplates{ - Prompt: fmt.Sprintf("%s {{ . | bold }}%s ", IconInitial, bold(":")), - Success: fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconGood), bold(":")), - // Confirm: fmt.Sprintf(`{{ "%s" | bold }} {{ . | bold }}? {{ "[]" | faint }} `, IconInitial), - Valid: fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconGood), bold(":")), - Invalid: fmt.Sprintf("%s {{ . | bold }}%s ", bold(IconBad), bold(":")), - } -} - -// SimplePromptTemplates is a prompt with a simple style, used by default on password prompts. -func SimplePromptTemplates() *promptui.PromptTemplates { - return &promptui.PromptTemplates{ - Prompt: "{{ . }}: ", - Success: "{{ . }}: ", - Valid: "{{ . }}: ", - Invalid: "{{ . }}: ", - } -} - -// SelectTemplates returns the default promptui.SelectTemplate for string -// slices. The given name is the prompt of the selected option. -func SelectTemplates(name string) *promptui.SelectTemplates { - return &promptui.SelectTemplates{ - Label: fmt.Sprintf("%s {{ . }}: ", IconInitial), - Active: fmt.Sprintf("%s {{ . | underline }}", IconSelect), - Inactive: " {{ . }}", - Selected: fmt.Sprintf(`{{ "%s" | green }} {{ "%s:" | bold }} {{ .Name }}`, IconGood, name), - } -} - -// NamedSelectTemplates returns the default promptui.SelectTemplate for struct -// slices with a name property. The given name is the prompt of the selected -// option. -func NamedSelectTemplates(name string) *promptui.SelectTemplates { - return &promptui.SelectTemplates{ - Label: fmt.Sprintf("%s {{.Name}}: ", IconInitial), - Active: fmt.Sprintf("%s {{ .Name | underline }}", IconSelect), - Inactive: " {{.Name}}", - Selected: fmt.Sprintf(`{{ "%s" | green }} {{ "%s:" | bold }} {{ .Name }}`, IconGood, name), - } -} diff --git a/vendor/go.step.sm/cli-utils/ui/ui.go b/vendor/go.step.sm/cli-utils/ui/ui.go deleted file mode 100644 index 2208511c..00000000 --- a/vendor/go.step.sm/cli-utils/ui/ui.go +++ /dev/null @@ -1,322 +0,0 @@ -package ui - -import ( - "fmt" - "os" - "strings" - "text/template" - - "github.com/chzyer/readline" - "github.com/manifoldco/promptui" - "github.com/pkg/errors" - "go.step.sm/crypto/randutil" -) - -// stderr implements an io.WriteCloser that skips the terminal bell character -// (ASCII code 7), and writes the rest to os.Stderr. It's used to replace -// readline.Stdout, that is the package used by promptui to display the prompts. -type stderr struct{} - -// Write implements an io.WriterCloser over os.Stderr, but it skips the terminal -// bell character. -func (s *stderr) Write(b []byte) (int, error) { - if len(b) == 1 && b[0] == readline.CharBell { - return 0, nil - } - return os.Stderr.Write(b) -} - -// Close implements an io.WriterCloser over os.Stderr. -func (s *stderr) Close() error { - return os.Stderr.Close() -} - -func init() { - readline.Stdout = &stderr{} -} - -// Print uses templates to print the arguments formated to os.Stderr. -func Print(args ...interface{}) error { - var o options - opts, args := extractOptions(args) - o.apply(opts) - - // Return with a default value. This is useful when we split the question - // and the response in two lines. - if o.value != "" && o.valid() { - return nil - } - - text := fmt.Sprint(args...) - t, err := template.New("Print").Funcs(promptui.FuncMap).Parse(text) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - if err := t.Execute(os.Stderr, nil); err != nil { - return errors.Wrap(err, "error executing template") - } - return nil -} - -// Printf uses templates to print the string formated to os.Stderr. -func Printf(format string, args ...interface{}) error { - var o options - opts, args := extractOptions(args) - o.apply(opts) - - // Return with a default value. This is useful when we split the question - // and the response in two lines. - if o.value != "" && o.valid() { - return nil - } - - text := fmt.Sprintf(format, args...) - t, err := template.New("Printf").Funcs(promptui.FuncMap).Parse(text) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - if err := t.Execute(os.Stderr, nil); err != nil { - return errors.Wrap(err, "error executing template") - } - return nil -} - -// Println uses templates to print the given arguments to os.Stderr -func Println(args ...interface{}) error { - var o options - opts, args := extractOptions(args) - o.apply(opts) - - // Return with a default value. This is useful when we split the question - // and the response in two lines. - if o.value != "" && o.valid() { - return nil - } - - text := fmt.Sprintln(args...) - t, err := template.New("Println").Funcs(promptui.FuncMap).Parse(text) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - if err := t.Execute(os.Stderr, nil); err != nil { - return errors.Wrap(err, "error executing template") - } - return nil -} - -// PrintSelected prints the given name and value as if they were selected from a -// promptui.Select. -func PrintSelected(name, value string, opts ...Option) error { - o := &options{ - printTemplate: PrintSelectedTemplate(), - } - o.apply(opts) - - t, err := template.New(name).Funcs(promptui.FuncMap).Parse(o.printTemplate) - if err != nil { - return errors.Wrap(err, "error parsing template") - } - - data := struct { - Name string - Value string - }{name, value} - if err := t.Execute(os.Stderr, data); err != nil { - return errors.Wrap(err, "error executing template") - } - - return nil -} - -// Prompt creates and runs a promptui.Prompt with the given label. -func Prompt(label string, opts ...Option) (string, error) { - o := &options{ - promptTemplates: PromptTemplates(), - } - o.apply(opts) - - // Return value if set - if o.value != "" { - return o.getValue() - } - - // Prompt using the terminal - clean, err := preparePromptTerminal() - if err != nil { - return "", err - } - defer clean() - - prompt := &promptui.Prompt{ - Label: label, - Default: o.defaultValue, - AllowEdit: o.allowEdit, - Validate: o.validateFunc, - Templates: o.promptTemplates, - } - value, err := prompt.Run() - if err != nil { - return "", errors.Wrap(err, "error running prompt") - } - return value, nil -} - -// PromptPassword creates and runs a promptui.Prompt with the given label. This -// prompt will mask the key entries with \r. -func PromptPassword(label string, opts ...Option) ([]byte, error) { - // Using a not printable character as they work better than \r - o := &options{ - mask: 1, - promptTemplates: SimplePromptTemplates(), - } - o.apply(opts) - - // Return value if set - if o.value != "" { - return o.getValueBytes() - } - - // Prompt using the terminal - clean, err := preparePromptTerminal() - if err != nil { - return nil, err - } - defer clean() - - prompt := &promptui.Prompt{ - Label: label, - Mask: o.mask, - Default: o.defaultValue, - AllowEdit: o.allowEdit, - Validate: o.validateFunc, - Templates: o.promptTemplates, - } - pass, err := prompt.Run() - if err != nil { - return nil, errors.Wrap(err, "error reading password") - } - return []byte(pass), nil -} - -// PromptPasswordGenerate creates and runs a promptui.Prompt with the given label. -// This prompt will mask the key entries with \r. If the result password length -// is 0, it will generate a new prompt with a generated password that can be -// edited. -func PromptPasswordGenerate(label string, opts ...Option) ([]byte, error) { - pass, err := PromptPassword(label, opts...) - if err != nil || len(pass) > 0 { - return pass, err - } - passString, err := randutil.ASCII(32) - if err != nil { - return nil, err - } - passString, err = Prompt("Password", WithDefaultValue(passString), WithAllowEdit(true), WithValidateNotEmpty()) - if err != nil { - return nil, err - } - return []byte(passString), nil -} - -// PromptYesNo creates and runs a promptui.Prompt with the given label, and -// returns true if the answer is y/yes and false if the answer is n/no. -func PromptYesNo(label string, opts ...Option) (bool, error) { - opts = append([]Option{WithValidateYesNo()}, opts...) - s, err := Prompt(label, opts...) - if err != nil { - return false, err - } - switch strings.ToLower(strings.TrimSpace(s)) { - case "y", "yes": - return true, nil - case "n", "no": - return false, nil - default: - return false, fmt.Errorf("%s is not a valid answer", s) - } -} - -// Select creates and runs a promptui.Select with the given label and items. -func Select(label string, items interface{}, opts ...Option) (int, string, error) { - o := &options{ - selectTemplates: SelectTemplates(label), - } - o.apply(opts) - - clean, err := prepareSelectTerminal() - if err != nil { - return 0, "", err - } - defer clean() - - prompt := &promptui.Select{ - Label: label, - Items: items, - Templates: o.selectTemplates, - } - n, s, err := prompt.Run() - if err != nil { - return 0, "", errors.Wrap(err, "error running prompt") - } - return n, s, nil -} - -func preparePromptTerminal() (func(), error) { - nothing := func() {} - if !readline.DefaultIsTerminal() { - tty, err := os.Open("/dev/tty") - if err != nil { - return nothing, errors.Wrap(err, "error allocating terminal") - } - clean := func() { - tty.Close() - } - - fd := int(tty.Fd()) - state, err := readline.MakeRaw(fd) - if err != nil { - defer clean() - return nothing, errors.Wrap(err, "error making raw terminal") - } - stdin := readline.Stdin - readline.Stdin = tty - clean = func() { - readline.Stdin = stdin - readline.Restore(fd, state) - tty.Close() - } - return clean, nil - } - - return nothing, nil -} - -func prepareSelectTerminal() (func(), error) { - nothing := func() {} - if !readline.DefaultIsTerminal() { - tty, err := os.Open("/dev/tty") - if err != nil { - return nothing, errors.Wrap(err, "error allocating terminal") - } - clean := func() { - tty.Close() - } - - fd := int(tty.Fd()) - state, err := readline.MakeRaw(fd) - if err != nil { - defer clean() - return nothing, errors.Wrap(err, "error making raw terminal") - } - stdin := os.Stdin - os.Stdin = tty - clean = func() { - os.Stdin = stdin - readline.Restore(fd, state) - tty.Close() - } - return clean, nil - } - - return nothing, nil -} diff --git a/vendor/go.step.sm/cli-utils/ui/ui_windows.go b/vendor/go.step.sm/cli-utils/ui/ui_windows.go deleted file mode 100644 index e9c47710..00000000 --- a/vendor/go.step.sm/cli-utils/ui/ui_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package ui - -import ( - "fmt" - "os" - - "golang.org/x/sys/windows" -) - -func init() { - var inMode, outMode uint32 - if err := windows.GetConsoleMode(windows.Stdin, &inMode); err == nil { - inMode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - if err := windows.SetConsoleMode(windows.Stdin, inMode); err != nil { - fmt.Fprintf(os.Stderr, "Failed to set console mode: %v", err) - } - } - if err := windows.GetConsoleMode(windows.Stdout, &outMode); err == nil { - outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - if err := windows.SetConsoleMode(windows.Stdout, outMode); err != nil { - fmt.Fprintf(os.Stderr, "Failed to set console mode: %v", err) - } - } -} diff --git a/vendor/go.step.sm/cli-utils/ui/validators.go b/vendor/go.step.sm/cli-utils/ui/validators.go deleted file mode 100644 index 5daa9cab..00000000 --- a/vendor/go.step.sm/cli-utils/ui/validators.go +++ /dev/null @@ -1,69 +0,0 @@ -package ui - -import ( - "fmt" - "net" - "strings" - - "github.com/manifoldco/promptui" -) - -// NotEmpty is a validation function that checks that the prompted string is not -// empty. -func NotEmpty() promptui.ValidateFunc { - return func(s string) error { - if len(strings.TrimSpace(s)) == 0 { - return fmt.Errorf("value is empty") - } - return nil - } -} - -// Address is a validation function that checks that the prompted string is a -// valid TCP address. -func Address() promptui.ValidateFunc { - return func(s string) error { - if _, _, err := net.SplitHostPort(s); err != nil { - return fmt.Errorf("%s is not an TCP address", s) - } - return nil - } -} - -// IPAddress is validation function that checks that the prompted string is a -// valid IP address. -func IPAddress() promptui.ValidateFunc { - return func(s string) error { - if net.ParseIP(s) == nil { - return fmt.Errorf("%s is not an ip address", s) - } - return nil - } -} - -// DNS is a validation function that changes that the prompted string is a valid -// DNS name. -func DNS() promptui.ValidateFunc { - return func(s string) error { - if len(strings.TrimSpace(s)) == 0 { - return fmt.Errorf("value is empty") - } - if _, _, err := net.SplitHostPort(s + ":443"); err != nil { - return fmt.Errorf("%s is not a valid DNS name", s) - } - return nil - } -} - -// YesNo is a validation function that checks for a Yes/No answer. -func YesNo() promptui.ValidateFunc { - return func(s string) error { - s = strings.ToLower(strings.TrimSpace(s)) - switch s { - case "y", "yes", "n", "no": - return nil - default: - return fmt.Errorf("%s is not a valid answer", s) - } - } -} diff --git a/vendor/go.step.sm/cli-utils/usage/css.go b/vendor/go.step.sm/cli-utils/usage/css.go deleted file mode 100644 index e552534c..00000000 --- a/vendor/go.step.sm/cli-utils/usage/css.go +++ /dev/null @@ -1,764 +0,0 @@ -package usage - -// CSS code replicating Github style. -// From https://github.com/sindresorhus/github-markdown-css -// MIT license -var css = `@font-face { - font-family: octicons-link; - src: url(data:font/woff;charset=utf-8;base64,d09GRgABAAAAAAZwABAAAAAACFQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABEU0lHAAAGaAAAAAgAAAAIAAAAAUdTVUIAAAZcAAAACgAAAAoAAQAAT1MvMgAAAyQAAABJAAAAYFYEU3RjbWFwAAADcAAAAEUAAACAAJThvmN2dCAAAATkAAAABAAAAAQAAAAAZnBnbQAAA7gAAACyAAABCUM+8IhnYXNwAAAGTAAAABAAAAAQABoAI2dseWYAAAFsAAABPAAAAZwcEq9taGVhZAAAAsgAAAA0AAAANgh4a91oaGVhAAADCAAAABoAAAAkCA8DRGhtdHgAAAL8AAAADAAAAAwGAACfbG9jYQAAAsAAAAAIAAAACABiATBtYXhwAAACqAAAABgAAAAgAA8ASm5hbWUAAAToAAABQgAAAlXu73sOcG9zdAAABiwAAAAeAAAAME3QpOBwcmVwAAAEbAAAAHYAAAB/aFGpk3jaTY6xa8JAGMW/O62BDi0tJLYQincXEypYIiGJjSgHniQ6umTsUEyLm5BV6NDBP8Tpts6F0v+k/0an2i+itHDw3v2+9+DBKTzsJNnWJNTgHEy4BgG3EMI9DCEDOGEXzDADU5hBKMIgNPZqoD3SilVaXZCER3/I7AtxEJLtzzuZfI+VVkprxTlXShWKb3TBecG11rwoNlmmn1P2WYcJczl32etSpKnziC7lQyWe1smVPy/Lt7Kc+0vWY/gAgIIEqAN9we0pwKXreiMasxvabDQMM4riO+qxM2ogwDGOZTXxwxDiycQIcoYFBLj5K3EIaSctAq2kTYiw+ymhce7vwM9jSqO8JyVd5RH9gyTt2+J/yUmYlIR0s04n6+7Vm1ozezUeLEaUjhaDSuXHwVRgvLJn1tQ7xiuVv/ocTRF42mNgZGBgYGbwZOBiAAFGJBIMAAizAFoAAABiAGIAznjaY2BkYGAA4in8zwXi+W2+MjCzMIDApSwvXzC97Z4Ig8N/BxYGZgcgl52BCSQKAA3jCV8CAABfAAAAAAQAAEB42mNgZGBg4f3vACQZQABIMjKgAmYAKEgBXgAAeNpjYGY6wTiBgZWBg2kmUxoDA4MPhGZMYzBi1AHygVLYQUCaawqDA4PChxhmh/8ODDEsvAwHgMKMIDnGL0x7gJQCAwMAJd4MFwAAAHjaY2BgYGaA4DAGRgYQkAHyGMF8NgYrIM3JIAGVYYDT+AEjAwuDFpBmA9KMDEwMCh9i/v8H8sH0/4dQc1iAmAkALaUKLgAAAHjaTY9LDsIgEIbtgqHUPpDi3gPoBVyRTmTddOmqTXThEXqrob2gQ1FjwpDvfwCBdmdXC5AVKFu3e5MfNFJ29KTQT48Ob9/lqYwOGZxeUelN2U2R6+cArgtCJpauW7UQBqnFkUsjAY/kOU1cP+DAgvxwn1chZDwUbd6CFimGXwzwF6tPbFIcjEl+vvmM/byA48e6tWrKArm4ZJlCbdsrxksL1AwWn/yBSJKpYbq8AXaaTb8AAHja28jAwOC00ZrBeQNDQOWO//sdBBgYGRiYWYAEELEwMTE4uzo5Zzo5b2BxdnFOcALxNjA6b2ByTswC8jYwg0VlNuoCTWAMqNzMzsoK1rEhNqByEyerg5PMJlYuVueETKcd/89uBpnpvIEVomeHLoMsAAe1Id4AAAAAAAB42oWQT07CQBTGv0JBhagk7HQzKxca2sJCE1hDt4QF+9JOS0nbaaYDCQfwCJ7Au3AHj+LO13FMmm6cl7785vven0kBjHCBhfpYuNa5Ph1c0e2Xu3jEvWG7UdPDLZ4N92nOm+EBXuAbHmIMSRMs+4aUEd4Nd3CHD8NdvOLTsA2GL8M9PODbcL+hD7C1xoaHeLJSEao0FEW14ckxC+TU8TxvsY6X0eLPmRhry2WVioLpkrbp84LLQPGI7c6sOiUzpWIWS5GzlSgUzzLBSikOPFTOXqly7rqx0Z1Q5BAIoZBSFihQYQOOBEdkCOgXTOHA07HAGjGWiIjaPZNW13/+lm6S9FT7rLHFJ6fQbkATOG1j2OFMucKJJsxIVfQORl+9Jyda6Sl1dUYhSCm1dyClfoeDve4qMYdLEbfqHf3O/AdDumsjAAB42mNgYoAAZQYjBmyAGYQZmdhL8zLdDEydARfoAqIAAAABAAMABwAKABMAB///AA8AAQAAAAAAAAAAAAAAAAABAAAAAA==) format('woff'); -} - -.wrapper { - margin: 0 auto; - max-width: 700px; - padding: 20px 10px; -} - -.markdown-body { - -ms-text-size-adjust: 100%; - -webkit-text-size-adjust: 100%; - line-height: 1.5; - color: #24292e; - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; - font-size: 16px; - line-height: 1.5; - word-wrap: break-word; -} - -.markdown-body .pl-c { - color: #6a737d; -} - -.markdown-body .pl-c1, -.markdown-body .pl-s .pl-v { - color: #005cc5; -} - -.markdown-body .pl-e, -.markdown-body .pl-en { - color: #6f42c1; -} - -.markdown-body .pl-smi, -.markdown-body .pl-s .pl-s1 { - color: #24292e; -} - -.markdown-body .pl-ent { - color: #22863a; -} - -.markdown-body .pl-k { - color: #d73a49; -} - -.markdown-body .pl-s, -.markdown-body .pl-pds, -.markdown-body .pl-s .pl-pse .pl-s1, -.markdown-body .pl-sr, -.markdown-body .pl-sr .pl-cce, -.markdown-body .pl-sr .pl-sre, -.markdown-body .pl-sr .pl-sra { - color: #032f62; -} - -.markdown-body .pl-v, -.markdown-body .pl-smw { - color: #e36209; -} - -.markdown-body .pl-bu { - color: #b31d28; -} - -.markdown-body .pl-ii { - color: #fafbfc; - background-color: #b31d28; -} - -.markdown-body .pl-c2 { - color: #fafbfc; - background-color: #d73a49; -} - -.markdown-body .pl-c2::before { - content: "^M"; -} - -.markdown-body .pl-sr .pl-cce { - font-weight: bold; - color: #22863a; -} - -.markdown-body .pl-ml { - color: #735c0f; -} - -.markdown-body .pl-mh, -.markdown-body .pl-mh .pl-en, -.markdown-body .pl-ms { - font-weight: bold; - color: #005cc5; -} - -.markdown-body .pl-mi { - font-style: italic; - color: #24292e; -} - -.markdown-body .pl-mb { - font-weight: bold; - color: #24292e; -} - -.markdown-body .pl-md { - color: #b31d28; - background-color: #ffeef0; -} - -.markdown-body .pl-mi1 { - color: #22863a; - background-color: #f0fff4; -} - -.markdown-body .pl-mc { - color: #e36209; - background-color: #ffebda; -} - -.markdown-body .pl-mi2 { - color: #f6f8fa; - background-color: #005cc5; -} - -.markdown-body .pl-mdr { - font-weight: bold; - color: #6f42c1; -} - -.markdown-body .pl-ba { - color: #586069; -} - -.markdown-body .pl-sg { - color: #959da5; -} - -.markdown-body .pl-corl { - text-decoration: underline; - color: #032f62; -} - -.markdown-body .octicon { - display: inline-block; - vertical-align: text-top; - fill: currentColor; -} - -.markdown-body a { - background-color: transparent; -} - -.markdown-body a:active, -.markdown-body a:hover { - outline-width: 0; -} - -.markdown-body strong { - font-weight: inherit; -} - -.markdown-body strong { - font-weight: bolder; -} - -.markdown-body h1 { - font-size: 2em; - margin: 0.67em 0; -} - -.markdown-body img { - border-style: none; -} - -.markdown-body code, -.markdown-body kbd, -.markdown-body pre { - font-family: monospace, monospace; - font-size: 1em; -} - -.markdown-body hr { - box-sizing: content-box; - height: 0; - overflow: visible; -} - -.markdown-body input { - font: inherit; - margin: 0; -} - -.markdown-body input { - overflow: visible; -} - -.markdown-body [type="checkbox"] { - box-sizing: border-box; - padding: 0; -} - -.markdown-body * { - box-sizing: border-box; -} - -.markdown-body input { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} - -.markdown-body a { - color: #0366d6; - text-decoration: none; -} - -.markdown-body a:hover { - text-decoration: underline; -} - -.markdown-body strong { - font-weight: 600; -} - -.markdown-body hr { - height: 0; - margin: 15px 0; - overflow: hidden; - background: transparent; - border: 0; - border-bottom: 1px solid #dfe2e5; -} - -.markdown-body hr::before { - display: table; - content: ""; -} - -.markdown-body hr::after { - display: table; - clear: both; - content: ""; -} - -.markdown-body table { - border-spacing: 0; - border-collapse: collapse; -} - -.markdown-body td, -.markdown-body th { - padding: 0; -} - -.markdown-body h1, -.markdown-body h2, -.markdown-body h3, -.markdown-body h4, -.markdown-body h5, -.markdown-body h6 { - margin-top: 0; - margin-bottom: 0; -} - -.markdown-body h1 { - font-size: 32px; - font-weight: 600; -} - -.markdown-body h2 { - font-size: 24px; - font-weight: 600; -} - -.markdown-body h3 { - font-size: 20px; - font-weight: 600; -} - -.markdown-body h4 { - font-size: 16px; - font-weight: 600; -} - -.markdown-body h5 { - font-size: 14px; - font-weight: 600; -} - -.markdown-body h6 { - font-size: 12px; - font-weight: 600; -} - -.markdown-body p { - margin-top: 0; - margin-bottom: 10px; -} - -.markdown-body blockquote { - margin: 0; -} - -.markdown-body ul, -.markdown-body ol { - padding-left: 0; - margin-top: 0; - margin-bottom: 0; -} - -.markdown-body ol ol, -.markdown-body ul ol { - list-style-type: lower-roman; -} - -.markdown-body ul ul ol, -.markdown-body ul ol ol, -.markdown-body ol ul ol, -.markdown-body ol ol ol { - list-style-type: lower-alpha; -} - -.markdown-body dd { - margin-left: 0; -} - -.markdown-body code { - font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; - font-size: 12px; -} - -.markdown-body pre { - margin-top: 0; - margin-bottom: 0; - font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; - font-size: 12px; -} - -.markdown-body .octicon { - vertical-align: text-bottom; -} - -.markdown-body .pl-0 { - padding-left: 0 !important; -} - -.markdown-body .pl-1 { - padding-left: 4px !important; -} - -.markdown-body .pl-2 { - padding-left: 8px !important; -} - -.markdown-body .pl-3 { - padding-left: 16px !important; -} - -.markdown-body .pl-4 { - padding-left: 24px !important; -} - -.markdown-body .pl-5 { - padding-left: 32px !important; -} - -.markdown-body .pl-6 { - padding-left: 40px !important; -} - -.markdown-body::before { - display: table; - content: ""; -} - -.markdown-body::after { - display: table; - clear: both; - content: ""; -} - -.markdown-body>*:first-child { - margin-top: 0 !important; -} - -.markdown-body>*:last-child { - margin-bottom: 0 !important; -} - -.markdown-body a:not([href]) { - color: inherit; - text-decoration: none; -} - -.markdown-body .anchor { - float: left; - padding-right: 4px; - margin-left: -20px; - line-height: 1; -} - -.markdown-body .anchor:focus { - outline: none; -} - -.markdown-body p, -.markdown-body blockquote, -.markdown-body ul, -.markdown-body ol, -.markdown-body dl, -.markdown-body table, -.markdown-body pre { - margin-top: 0; - margin-bottom: 16px; -} - -.markdown-body hr { - height: 0.25em; - padding: 0; - margin: 24px 0; - background-color: #e1e4e8; - border: 0; -} - -.markdown-body blockquote { - padding: 0 1em; - color: #6a737d; - border-left: 0.25em solid #dfe2e5; -} - -.markdown-body blockquote>:first-child { - margin-top: 0; -} - -.markdown-body blockquote>:last-child { - margin-bottom: 0; -} - -.markdown-body kbd { - display: inline-block; - padding: 3px 5px; - font-size: 11px; - line-height: 10px; - color: #444d56; - vertical-align: middle; - background-color: #fafbfc; - border: solid 1px #c6cbd1; - border-bottom-color: #959da5; - border-radius: 3px; - box-shadow: inset 0 -1px 0 #959da5; -} - -.markdown-body h1, -.markdown-body h2, -.markdown-body h3, -.markdown-body h4, -.markdown-body h5, -.markdown-body h6 { - margin-top: 24px; - margin-bottom: 16px; - font-weight: 600; - line-height: 1.25; -} - -.markdown-body h1 .octicon-link, -.markdown-body h2 .octicon-link, -.markdown-body h3 .octicon-link, -.markdown-body h4 .octicon-link, -.markdown-body h5 .octicon-link, -.markdown-body h6 .octicon-link { - color: #1b1f23; - vertical-align: middle; - visibility: hidden; -} - -.markdown-body h1:hover .anchor, -.markdown-body h2:hover .anchor, -.markdown-body h3:hover .anchor, -.markdown-body h4:hover .anchor, -.markdown-body h5:hover .anchor, -.markdown-body h6:hover .anchor { - text-decoration: none; -} - -.markdown-body h1:hover .anchor .octicon-link, -.markdown-body h2:hover .anchor .octicon-link, -.markdown-body h3:hover .anchor .octicon-link, -.markdown-body h4:hover .anchor .octicon-link, -.markdown-body h5:hover .anchor .octicon-link, -.markdown-body h6:hover .anchor .octicon-link { - visibility: visible; -} - -.markdown-body h1 { - padding-bottom: 0.3em; - font-size: 2em; - border-bottom: 1px solid #eaecef; -} - -.markdown-body h2 { - padding-bottom: 0.3em; - font-size: 1.5em; - border-bottom: 1px solid #eaecef; -} - -.markdown-body h3 { - font-size: 1.25em; -} - -.markdown-body h4 { - font-size: 1em; -} - -.markdown-body h5 { - font-size: 0.875em; -} - -.markdown-body h6 { - font-size: 0.85em; - color: #6a737d; -} - -.markdown-body ul, -.markdown-body ol { - padding-left: 2em; -} - -.markdown-body ul ul, -.markdown-body ul ol, -.markdown-body ol ol, -.markdown-body ol ul { - margin-top: 0; - margin-bottom: 0; -} - -.markdown-body li { - word-wrap: break-all; -} - -.markdown-body li>p { - margin-top: 16px; -} - -.markdown-body li+li { - margin-top: 0.25em; -} - -.markdown-body dl { - padding: 0; -} - -.markdown-body dl dt { - padding: 0; - margin-top: 16px; - font-size: 1em; - font-style: italic; - font-weight: 600; -} - -.markdown-body dl dd { - padding: 0 16px; - margin-bottom: 16px; -} - -.markdown-body table { - display: block; - width: 100%; - overflow: auto; -} - -.markdown-body table th { - font-weight: 600; -} - -.markdown-body table th, -.markdown-body table td { - padding: 6px 13px; - border: 1px solid #dfe2e5; -} - -.markdown-body table tr { - background-color: #fff; - border-top: 1px solid #c6cbd1; -} - -.markdown-body table tr:nth-child(2n) { - background-color: #f6f8fa; -} - -.markdown-body img { - max-width: 100%; - box-sizing: content-box; - background-color: #fff; -} - -.markdown-body img[align=right] { - padding-left: 20px; -} - -.markdown-body img[align=left] { - padding-right: 20px; -} - -.markdown-body code { - padding: 0.2em 0.4em; - margin: 0; - font-size: 85%; - background-color: rgba(27,31,35,0.05); - border-radius: 3px; -} - -.markdown-body pre { - word-wrap: normal; -} - -.markdown-body pre>code { - padding: 0; - margin: 0; - font-size: 100%; - word-break: normal; - white-space: pre; - background: transparent; - border: 0; -} - -.markdown-body .highlight { - margin-bottom: 16px; -} - -.markdown-body .highlight pre { - margin-bottom: 0; - word-break: normal; -} - -.markdown-body .highlight pre, -.markdown-body pre { - padding: 16px; - overflow: auto; - font-size: 85%; - line-height: 1.45; - background-color: #f6f8fa; - border-radius: 3px; -} - -.markdown-body pre code { - display: inline; - max-width: auto; - padding: 0; - margin: 0; - overflow: visible; - line-height: inherit; - word-wrap: normal; - background-color: transparent; - border: 0; -} - -.markdown-body .full-commit .btn-outline:not(:disabled):hover { - color: #005cc5; - border-color: #005cc5; -} - -.markdown-body kbd { - display: inline-block; - padding: 3px 5px; - font: 11px "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; - line-height: 10px; - color: #444d56; - vertical-align: middle; - background-color: #fafbfc; - border: solid 1px #d1d5da; - border-bottom-color: #c6cbd1; - border-radius: 3px; - box-shadow: inset 0 -1px 0 #c6cbd1; -} - -.markdown-body :checked+.radio-label { - position: relative; - z-index: 1; - border-color: #0366d6; -} - -.markdown-body .task-list-item { - list-style-type: none; -} - -.markdown-body .task-list-item+.task-list-item { - margin-top: 3px; -} - -.markdown-body .task-list-item input { - margin: 0 0.2em 0.25em -1.6em; - vertical-align: middle; -} - -.markdown-body hr { - border-bottom-color: #eee; -} - -.command { - font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, - monospace; - font-size: 16px; - padding-left: 40px; - -} - -.command h1 { - border: none; - margin-left: -40px; -} - -.command h2 { - border: none; - margin-top: 2em; - margin-left: -40px; - font-size: 18px; -} - -.command>ul { - padding-left: 0; -} - -.command ul { - list-style-type: none; -} - -.command table { - margin: 2em 0 1em; - display: block; - width: 100%; - overflow: auto; - border-collapse: collapse; -} - -.command table th { - font-weight: 600; -} - -.command table th, -.command table td { - padding: 6px 13px; - border: 1px solid #dfe2e5; -} - -.command table tr { - background-color: #fff; - border-top: 1px solid #c6cbd1; -} - -.command table tr:nth-child(2n) { - background-color: #f6f8fa; -} - - -` diff --git a/vendor/go.step.sm/cli-utils/usage/help.go b/vendor/go.step.sm/cli-utils/usage/help.go deleted file mode 100644 index 86962105..00000000 --- a/vendor/go.step.sm/cli-utils/usage/help.go +++ /dev/null @@ -1,193 +0,0 @@ -package usage - -import ( - "fmt" - "strings" - - "github.com/urfave/cli" -) - -// HelpCommandAction is the action function of the overwritten help command. -var HelpCommandAction = cli.ActionFunc(helpAction) - -// HelpCommand overwrites default urfvafe/cli help command to support one or -// multiple subcommands like: -// step help -// step help crypto -// step help crypto jwt -// step help crypto jwt sign -// ... -func HelpCommand() cli.Command { - return cli.Command{ - Name: "help", - Aliases: []string{"h"}, - Usage: "display help for the specified command or command group", - UsageText: "**step help** ", - Description: `**step help** command displays help for a command or command group. - -## EXAMPLES - -Display help for **step ca certificate**: -''' -$ step help ca certificate -''' - -Display help for **step ssh**: -''' -$ step help ssh -'''`, - Action: HelpCommandAction, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "http", - Usage: "HTTP service address (e.g., ':8080')", - }, - cli.StringFlag{ - Name: "html", - Usage: "The export for HTML docs.", - }, - cli.StringFlag{ - Name: "markdown", - Usage: "The export for Markdown docs.", - }, - cli.BoolFlag{ - Name: "report", - Usage: "Writes a JSON report to the HTML docs directory.", - }, - cli.BoolFlag{ - Name: "hugo", - Usage: "Writes hugo (vs jekyll) compatible markdown files", - }, - }, - } -} - -func helpAction(ctx *cli.Context) error { - // use html version - if ctx.IsSet("http") { - return httpHelpAction(ctx) - } - - if ctx.IsSet("html") { - return htmlHelpAction(ctx) - } - - if ctx.IsSet("markdown") { - return markdownHelpAction(ctx) - } - - args := ctx.Args() - if args.Present() { - last := len(args) - 1 - lastName := args[last] - subcmd := ctx.App.Commands - parent := createParentCommand(ctx) - - for _, name := range args[:last] { - for _, cmd := range subcmd { - if cmd.HasName(name) { - parent = cmd - subcmd = cmd.Subcommands - break - } - } - } - - for _, cmd := range subcmd { - if cmd.HasName(lastName) { - cmd.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args, " ")) - parent.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args[:last], " ")) - - ctx.Command = cmd - if len(cmd.Subcommands) == 0 { - ctx.App = createCliApp(ctx, parent) - return cli.ShowCommandHelp(ctx, lastName) - } - - ctx.App = createCliApp(ctx, cmd) - return cli.ShowCommandHelp(ctx, "") - } - } - - return cli.NewExitError(fmt.Sprintf("No help topic for '%s %s'", ctx.App.Name, strings.Join(args, " ")), 3) - } - - cli.ShowAppHelp(ctx) - return nil -} - -// createParentCommand returns a command representation of the app. -func createParentCommand(ctx *cli.Context) cli.Command { - return cli.Command{ - Name: ctx.App.Name, - HelpName: ctx.App.HelpName, - Usage: ctx.App.Usage, - UsageText: ctx.App.UsageText, - ArgsUsage: ctx.App.ArgsUsage, - Description: ctx.App.Description, - Subcommands: ctx.App.Commands, - Flags: ctx.App.Flags, - } -} - -// createCliApp is re-implementation of urfave/cli method (in command.go): -// -// func (c Command) startApp(ctx *Context) error -// -// It lets us show the subcommands when help is executed like: -// -// step help foo -// step help foo bar -// ... -func createCliApp(ctx *cli.Context, cmd cli.Command) *cli.App { - app := cli.NewApp() - app.Metadata = ctx.App.Metadata - - // set the name and usage - app.Name = cmd.HelpName - app.HelpName = cmd.HelpName - - app.Usage = cmd.Usage - app.UsageText = cmd.UsageText - app.Description = cmd.Description - app.ArgsUsage = cmd.ArgsUsage - - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - app.CustomAppHelpTemplate = cmd.CustomHelpTemplate - - // set the flags and commands - app.Commands = cmd.Subcommands - app.Flags = cmd.Flags - - app.Version = ctx.App.Version - app.Compiled = ctx.App.Compiled - app.Author = ctx.App.Author - app.Email = ctx.App.Email - app.Writer = ctx.App.Writer - app.ErrWriter = ctx.App.ErrWriter - - // Do not show help or version on subcommands - app.HideHelp = true - app.HideVersion = true - - // bash completion - app.EnableBashCompletion = ctx.App.EnableBashCompletion - if cmd.BashComplete != nil { - app.BashComplete = cmd.BashComplete - } - - // set the actions - app.Before = cmd.Before - app.After = cmd.After - - if cmd.Action != nil { - app.Action = cmd.Action - } else { - app.Action = helpAction - } - app.OnUsageError = cmd.OnUsageError - - app.Setup() - return app -} diff --git a/vendor/go.step.sm/cli-utils/usage/html.go b/vendor/go.step.sm/cli-utils/usage/html.go deleted file mode 100644 index 21e532ea..00000000 --- a/vendor/go.step.sm/cli-utils/usage/html.go +++ /dev/null @@ -1,352 +0,0 @@ -package usage - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "path" - "strings" - - "github.com/urfave/cli" - "go.step.sm/cli-utils/errs" -) - -func httpHelpAction(ctx *cli.Context) error { - addr := ctx.String("http") - if addr == "" { - return errs.RequiredFlag(ctx, "http") - } - - fmt.Printf("Serving HTTP on %s ...\n", addr) - return http.ListenAndServe(addr, &htmlHelpHandler{ - cliApp: ctx.App, - }) -} - -func markdownHelpAction(ctx *cli.Context) error { - dir := path.Clean(ctx.String("markdown")) - if err := os.MkdirAll(dir, 0755); err != nil { - return errs.FileError(err, dir) - } - - isHugo := ctx.Bool("hugo") - - // app index - index := path.Join(dir, "index.md") - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - markdownHelpPrinter(w, mdAppHelpTemplate, "", ctx.App) - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - // Subcommands - for _, cmd := range ctx.App.Commands { - if err := markdownHelpCommand(ctx.App, cmd, cmd, path.Join(dir, cmd.Name), isHugo); err != nil { - return err - } - } - return nil -} - -func markdownHelpCommand(app *cli.App, cmd cli.Command, parent cli.Command, base string, isHugo bool) error { - if err := os.MkdirAll(base, 0755); err != nil { - return errs.FileError(err, base) - } - - fileName := "index.md" - // preserve jekyll compatibility for transition period - if isHugo && len(cmd.Subcommands) > 0 { - fileName = "_index.md" - } - - index := path.Join(base, fileName) - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - - parentName := parent.HelpName - if cmd.HelpName == parent.HelpName { - parentName = "step" - } - - if len(cmd.Subcommands) == 0 { - markdownHelpPrinter(w, mdCommandHelpTemplate, parentName, cmd) - return errs.FileError(w.Close(), index) - } - - ctx := cli.NewContext(app, nil, nil) - ctx.App = createCliApp(ctx, cmd) - markdownHelpPrinter(w, mdSubcommandHelpTemplate, parentName, ctx.App) - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - for _, sub := range cmd.Subcommands { - sub.HelpName = fmt.Sprintf("%s %s", cmd.HelpName, sub.Name) - if err := markdownHelpCommand(app, sub, cmd, path.Join(base, sub.Name), isHugo); err != nil { - return err - } - } - - return nil -} - -func htmlHelpAction(ctx *cli.Context) error { - dir := path.Clean(ctx.String("html")) - - if err := os.MkdirAll(dir, 0755); err != nil { - return errs.FileError(err, dir) - } - - // app index - index := path.Join(dir, "index.html") - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - - tophelp := htmlHelpPrinter(w, mdAppHelpTemplate, ctx.App) - var report *Report - if ctx.IsSet("report") { - report = NewReport(ctx.App.Name, tophelp) - } - - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - // css style - cssFile := path.Join(dir, "style.css") - if err := ioutil.WriteFile(cssFile, []byte(css), 0666); err != nil { - return errs.FileError(err, cssFile) - } - - // Subcommands - for _, cmd := range ctx.App.Commands { - if err := htmlHelpCommand(ctx.App, cmd, path.Join(dir, cmd.Name), report); err != nil { - return err - } - } - - // report - if report != nil { - repjson := path.Join(dir, "report.json") - rjw, err := os.Create(repjson) - if err != nil { - return errs.FileError(err, repjson) - } - - if err := report.Write(rjw); err != nil { - return err - } - - if err := rjw.Close(); err != nil { - return errs.FileError(err, repjson) - } - } - - return nil -} - -func htmlHelpCommand(app *cli.App, cmd cli.Command, base string, report *Report) error { - if err := os.MkdirAll(base, 0755); err != nil { - return errs.FileError(err, base) - } - - index := path.Join(base, "index.html") - w, err := os.Create(index) - if err != nil { - return errs.FileError(err, index) - } - - if len(cmd.Subcommands) == 0 { - cmdhelp := htmlHelpPrinter(w, mdCommandHelpTemplate, cmd) - - if report != nil { - report.Process(cmd.HelpName, cmdhelp) - } - - return errs.FileError(w.Close(), index) - } - - ctx := cli.NewContext(app, nil, nil) - ctx.App = createCliApp(ctx, cmd) - subhelp := htmlHelpPrinter(w, mdSubcommandHelpTemplate, ctx.App) - - if report != nil { - report.Process(cmd.HelpName, subhelp) - } - - if err := w.Close(); err != nil { - return errs.FileError(err, index) - } - - for _, sub := range cmd.Subcommands { - sub.HelpName = fmt.Sprintf("%s %s", cmd.HelpName, sub.Name) - if err := htmlHelpCommand(app, sub, path.Join(base, sub.Name), report); err != nil { - return err - } - } - - return nil -} - -type htmlHelpHandler struct { - cliApp *cli.App -} - -func (h *htmlHelpHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - ctx := cli.NewContext(h.cliApp, nil, nil) - - // clean request URI - requestURI := path.Clean(req.RequestURI) - if requestURI == "/" { - htmlHelpPrinter(w, mdAppHelpTemplate, ctx.App) - return - } - - if requestURI == "/style.css" { - w.Header().Set("Content-Type", `text/css; charset="utf-8"`) - w.Write([]byte(css)) - return - } - - args := strings.Split(requestURI, "/") - last := len(args) - 1 - lastName := args[last] - subcmd := ctx.App.Commands - parent := createParentCommand(ctx) - for _, name := range args[:last] { - for _, cmd := range subcmd { - if cmd.HasName(name) { - parent = cmd - subcmd = cmd.Subcommands - break - } - } - } - - for _, cmd := range subcmd { - if cmd.HasName(lastName) { - cmd.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args, " ")) - parent.HelpName = fmt.Sprintf("%s %s", ctx.App.HelpName, strings.Join(args[:last], " ")) - - ctx.Command = cmd - if len(cmd.Subcommands) == 0 { - htmlHelpPrinter(w, mdCommandHelpTemplate, cmd) - return - } - - ctx.App = createCliApp(ctx, cmd) - htmlHelpPrinter(w, mdSubcommandHelpTemplate, ctx.App) - return - } - } - - http.NotFound(w, req) -} - -// AppHelpTemplate contains the modified template for the main app -var mdAppHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -'''raw -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .Commands}} {{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments]{{end}}{{end}} -''' -{{- if .Description}} - -## DESCRIPTION -{{.Description}}{{end}}{{if .VisibleCommands}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -| Name | Usage | -|---|---|{{range .VisibleCommands}} -| **[{{join .Names ", "}}]({{.Name}}/)** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}}{{end}} - -## OPTIONS - -{{range $index, $option := .VisibleFlags}}{{if $index}} -{{end}}{{$option}} -{{end}}{{end}}{{if .Copyright}}{{if len .Authors}} - -## AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: - -{{range $index, $author := .Authors}}{{if $index}} -{{end}}{{$author}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} - -## VERSION - -{{.Version}}{{end}}{{end}} - -## COPYRIGHT - -{{.Copyright}} -{{end}} -` - -// SubcommandHelpTemplate contains the modified template for a sub command -// Note that the weird "|||\n|---|---|" syntax sets up a markdown table with empty headers. -var mdSubcommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -'''raw -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}** {{if .VisibleFlags}} [options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments]{{end}}{{end}} -''' -{{- if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -| Name | Usage | -|---|---|{{range .VisibleCommands}} -| **[{{join .Names ", "}}]({{.Name}}/)** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` - -// CommandHelpTemplate contains the modified template for a command -var mdCommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -'''raw -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .VisibleFlags}} [options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments]{{end}}{{end}} -''' -{{- if .Category}} - -## CATEGORY - -{{.Category}}{{end}}{{if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` diff --git a/vendor/go.step.sm/cli-utils/usage/printer.go b/vendor/go.step.sm/cli-utils/usage/printer.go deleted file mode 100644 index edd75238..00000000 --- a/vendor/go.step.sm/cli-utils/usage/printer.go +++ /dev/null @@ -1,233 +0,0 @@ -package usage - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" - "text/template" - "unicode" - - "github.com/urfave/cli" - md "go.step.sm/cli-utils/pkg/blackfriday" -) - -var sectionRe = regexp.MustCompile(`(?m:^##)`) -var sectionNameRe = regexp.MustCompile(`(?m:^## [^\n]+)`) -var indentRe = regexp.MustCompile(`(?m:^:[^\n]+)`) -var definitionListRe = regexp.MustCompile(`(?m:^[\t ]+\*\*[^\*]+\*\*[^\n]*\s+:[^\n]+)`) - -//var sectionRe = regexp.MustCompile(`^## [^\n]*$`) - -type frontmatterData struct { - Data interface{} - Parent string - Children []string -} - -// HelpPrinter overwrites cli.HelpPrinter and prints the formatted help to the terminal. -func HelpPrinter(w io.Writer, templ string, data interface{}) { - b := helpPreprocessor(w, templ, data, false) - w.Write(Render(b)) -} - -func htmlHelpPrinter(w io.Writer, templ string, data interface{}) []byte { - b := helpPreprocessor(w, templ, data, true) - w.Write([]byte(`step command line documentation`)) - w.Write([]byte(``)) - w.Write([]byte(`

    `)) - html := md.Run(b) - w.Write(html) - w.Write([]byte(`
    `)) - - return html -} - -func markdownHelpPrinter(w io.Writer, templ string, parent string, data interface{}) { - b := helpPreprocessor(w, templ, data, true) - - frontmatter := frontmatterData{ - Data: data, - Parent: parent, - } - - if app, ok := data.(*cli.App); ok { - for _, cmd := range app.Commands { - frontmatter.Children = append(frontmatter.Children, cmd.Name) - } - } - - var frontMatterTemplate = `--- -layout: auto-doc -title: {{.Data.HelpName}} -menu: - docs: -{{- if .Parent}} - parent: {{.Parent}} -{{- end }} -{{- if .Children }} - children: -{{- range .Children }} - - {{.}} -{{- end }} -{{- end }} ---- - -` - t, err := template.New("frontmatter").Parse(frontMatterTemplate) - if err != nil { - panic(err) - } - err = t.Execute(w, frontmatter) - if err != nil { - panic(err) - } - w.Write(b) -} - -func helpPreprocessor(w io.Writer, templ string, data interface{}, applyRx bool) []byte { - buf := new(bytes.Buffer) - cli.HelpPrinterCustom(buf, templ, data, nil) - //w.Write(buf.Bytes()) - // s := string(markdownify(buf.Bytes())) - s := markdownify(buf) - // Move the OPTIONS section to the right place. urfave puts them at the end - // of the file, we want them to be after POSITIONAL ARGUMENTS, DESCRIPTION, - // USAGE, or NAME (in that order, depending on which sections exist). - optLoc := strings.Index(s, "## OPTIONS") - if optLoc != -1 { - optEnd := findSectionEnd("OPTIONS", s) - if optEnd != -1 { - options := s[optLoc:optEnd] - s = s[:optLoc] + s[optEnd:] - if newLoc := findSectionEnd("POSITIONAL ARGUMENTS", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else if newLoc := findSectionEnd("DESCRIPTION", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else if newLoc := findSectionEnd("USAGE", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else if newLoc := findSectionEnd("NAME", s); newLoc != -1 { - s = s[:newLoc] + options + s[newLoc:] - } else { - // Keep it at the end I guess :/. - s = s + options - } - } - } - - if applyRx { - // Keep capitalized only the first letter in arguments names. - s = sectionNameRe.ReplaceAllStringFunc(s, func(s string) string { - return s[0:4] + strings.ToLower(s[4:]) - }) - // Remove `:` at the start of a line. - s = indentRe.ReplaceAllStringFunc(s, func(s string) string { - return strings.TrimSpace(s[1:]) - }) - // Convert lines like: - // **Foo** - // : Bar zar ... - // To: - // - **Foo**: Bar zar ... - s = definitionListRe.ReplaceAllStringFunc(s, func(s string) string { - i := strings.Index(s, "\n") - j := strings.Index(s, ":") - return "- " + strings.TrimSpace(s[:i]) + ": " + strings.TrimSpace(s[j+1:]) - }) - } - - return []byte(s) -} - -func findSectionEnd(h, s string) int { - start := strings.Index(s, fmt.Sprintf("## %s", h)) - if start == -1 { - return start - } - nextSection := sectionRe.FindStringIndex(s[start+2:]) - if nextSection == nil { - return len(s) - } - return start + 2 + nextSection[0] -} - -// Convert some stuff that we can't easily write in help files because -// backticks and raw strings don't mix: -// - "" to "`foo`" -// - "'''" to "```" -func markdownify(r *bytes.Buffer) string { - const escapeByte = byte('\\') - var last byte - var inCode bool - - w := new(bytes.Buffer) - for { - b, err := r.ReadByte() - if err != nil { - return w.String() - } - loop: - switch b { - case '<': - if last != escapeByte && !inCode { - w.WriteByte('`') - } else { - w.WriteByte(b) - } - case '>': - if last != escapeByte && !inCode { - w.WriteByte('`') - } else { - w.WriteByte(b) - } - case '\'': - b1, _ := r.ReadByte() - b2, _ := r.ReadByte() - if b1 == b && b2 == b { - w.WriteString("```") - if !inCode { - if n, _, err := r.ReadRune(); err == nil { - if unicode.IsSpace(n) { - w.WriteString("shell") - } - r.UnreadRune() - } - } - inCode = !inCode - } else { - // We can only unread the last one (b2) - w.WriteByte(b) - r.UnreadByte() - b = b1 - last = b - goto loop - } - case '*': - if inCode { - if b1, _ := r.ReadByte(); b1 != '*' { - w.WriteByte(b) - w.UnreadByte() - } - } else { - w.WriteByte(b) - } - case escapeByte: - if last == escapeByte { - w.WriteByte(escapeByte) - b = 0 - } else { - if n, _, err := r.ReadRune(); err == nil { - if unicode.IsSpace(n) { - w.WriteByte(escapeByte) - } - r.UnreadRune() - } - } - case 0: // probably because io.EOF - default: - w.WriteByte(b) - } - last = b - } -} diff --git a/vendor/go.step.sm/cli-utils/usage/renderer.go b/vendor/go.step.sm/cli-utils/usage/renderer.go deleted file mode 100644 index dafff73c..00000000 --- a/vendor/go.step.sm/cli-utils/usage/renderer.go +++ /dev/null @@ -1,371 +0,0 @@ -package usage - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strings" - "text/tabwriter" - "unicode" - - "github.com/samfoo/ansi" - md "go.step.sm/cli-utils/pkg/blackfriday" -) - -// Render renders the given data with a custom markdown renderer. -func Render(b []byte) []byte { - return md.Run(b, md.WithRenderer(&Renderer{6, 0, nil, nil, false})) -} - -var colorEscapeRe = regexp.MustCompile(`\033\[\d*(;\d*)?m?\]?`) -var maxLineLength = 80 - -func stripColors(b []byte) []byte { - return colorEscapeRe.ReplaceAll(b, []byte("")) -} - -type item struct { - flags md.ListType - term []byte - definitions [][]byte -} - -type list struct { - items []item - flags md.ListType - parent *list -} - -/* TODO: commented because unused -func (l *list) isUnordered() bool { - return !l.isOrdered() && !l.isDefinition() -} - -func (l *list) isOrdered() bool { - return l.flags&md.ListTypeOrdered != 0 -} - -func (l *list) containsBlock() bool { - // TODO: Not sure if we have to check every item or if it gets - // automatically set on the list? - return l.flags&md.ListItemContainsBlock != 0 -} -*/ - -func (l *list) isDefinition() bool { - return l.flags&md.ListTypeDefinition != 0 -} - -type bufqueue struct { - w io.Writer - buf *bytes.Buffer - next *bufqueue - mode RenderMode -} - -// RenderMode enumerates different line breaks modes. -type RenderMode int - -const ( - // RenderModeKeepBreaks will keep the line breaks in the docs. - RenderModeKeepBreaks RenderMode = iota - // RenderModeBreakLines will automatically wrap the lines. - RenderModeBreakLines -) - -// Renderer implements a custom markdown renderer for blackfriday. -type Renderer struct { - depth int - listdepth int - list *list - out *bufqueue - inpara bool -} - -func (r *Renderer) write(b []byte) { - r.out.w.Write(b) -} - -func (r *Renderer) printf(s string, a ...interface{}) { - fmt.Fprintf(r.out.w, s, a...) -} - -func (r *Renderer) capture(mode RenderMode) { - buf := new(bytes.Buffer) - r.out = &bufqueue{buf, buf, r.out, mode} -} - -func (r *Renderer) finishCapture() *bytes.Buffer { - buf := r.out.buf - r.out = r.out.next - return buf -} - -func (r *Renderer) inParagraph() bool { - return r.inpara -} - -/* TODO: commented because unused -func (r *Renderer) inList() bool { - return r.list != nil -} -*/ - -func (r *Renderer) renderParagraphKeepBreaks(buf *bytes.Buffer) { - scanner := bufio.NewScanner(buf) - for scanner.Scan() { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", scanner.Text()) - } -} - -func (r *Renderer) renderParagraphBreakLines(buf *bytes.Buffer, maxlen int) { - maxlen = maxlen - r.depth - scanner := bufio.NewScanner(buf) - scanner.Split(bufio.ScanWords) - line := []string{} - length := 0 - for scanner.Scan() { - word := scanner.Text() - wordLength := len(stripColors([]byte(word))) - // Print the line if we've got a collection of words over 80 characters, or if - // we have a single word that is over 80 characters on an otherwise empty line. - if length+wordLength > maxlen { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", strings.Join(line, " ")) - line = []string{word} - length = wordLength - } else if length == 0 && wordLength > maxlen { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", word) - } else { - line = append(line, word) - length += wordLength + 1 // Plus one for space - } - } - if len(line) > 0 { - r.printf(strings.Repeat(" ", r.depth)+"%s\n", strings.Join(line, " ")) - } -} - -func (r *Renderer) renderParagraph(buf *bytes.Buffer) { - switch r.out.mode { - case RenderModeKeepBreaks: - r.renderParagraphKeepBreaks(buf) - case RenderModeBreakLines: - r.renderParagraphBreakLines(buf, maxLineLength) - } -} - -// RenderNode implements blackfriday.Renderer interface. -func (r *Renderer) RenderNode(w io.Writer, node *md.Node, entering bool) md.WalkStatus { - if r.out == nil { - r.out = &bufqueue{w, nil, nil, RenderModeBreakLines} - } - - switch node.Type { - case md.Paragraph: - // Alternative idea here: call r.RenderNode() with our new buffer as - // `w`. In the `else` condition here render to the outter buffer and - // always return md.Terminate. So when we enter a paragraph we start - // parsing with a new output buffer and capture the output. - if entering { - if r.inParagraph() { - panic("already in paragraph") - } - r.inpara = true - //r.printf(out, "[paragraph:") - r.capture(r.out.mode) - } else { - r.renderParagraph(r.finishCapture()) - // Write a newline unless the parent node is a definition list term. - if node.Parent.Type != md.Item || node.Parent.ListFlags&md.ListTypeTerm == 0 { - r.printf("\n") - } - r.inpara = false - //r.printf(w, ":paragraph]") - } - case md.Text: - // TODO: is this necessary? I think all text is in a paragraph. - if r.inParagraph() { - r.write(node.Literal) - } else { - s := strings.Replace(string(node.Literal), "\n", "\n"+strings.Repeat(" ", r.depth), -1) - r.printf(s) - } - case md.Heading: - if entering { - r.printf(ansi.ColorCode("default+bh")) - } else { - r.printf(ansi.Reset) - r.printf("\n") - } - case md.Link: - if entering { - r.printf(ansi.ColorCode("default+b")) - //r.printf("\033[2m") // Dim - } else { - r.printf(ansi.Reset) - } - case md.Strong: - if entering { - r.printf(ansi.ColorCode("default+bh")) - } else { - r.printf(ansi.Reset) - } - case md.Emph: - if entering { - r.printf(ansi.ColorCode("default+u")) - } else { - r.printf(ansi.Reset) - } - case md.Code: - r.printf(ansi.ColorCode("default+u")) - r.write(node.Literal) - r.printf(ansi.Reset) - case md.List: - if entering { - r.listdepth++ - r.list = &list{[]item{}, node.ListFlags, r.list} - //r.printf("[list (type %s:", node.ListData.ListFlags) - } else { - if r.listdepth > 1 && r.list.isDefinition() { - w := new(tabwriter.Writer) - w.Init(r.out.w, 0, 8, 4, ' ', tabwriter.StripEscape) - for _, item := range r.list.items { - fmt.Fprint(w, strings.TrimRight(string(item.term), " \n")) - fmt.Fprint(w, "\n") - for _, def := range item.definitions { - fmt.Fprint(w, strings.TrimRight(string(def), " \n")) - } - fmt.Fprintf(w, "\n\n") - } - w.Flush() - } else { - ordered := (node.ListFlags&md.ListTypeOrdered != 0) - unordered := (node.ListFlags&md.ListTypeOrdered == 0 && node.ListFlags&md.ListTypeDefinition == 0) - for i, item := range r.list.items { - if ordered || unordered { - p := bytes.IndexFunc(item.term, func(r rune) bool { return !unicode.IsSpace(r) }) - switch { - case ordered: // add numbers on ordered lists - item.term = append(item.term[:p], append([]byte(fmt.Sprintf("%d. ", i+1)), item.term[p:]...)...) - case unordered: // add bullet points on unordered lists - item.term = append(item.term[:p], append([]byte("• "), item.term[p:]...)...) - } - } - - r.write(item.term) - for _, def := range item.definitions { - r.write(def) - } - } - } - r.listdepth-- - r.list = r.list.parent - //r.printf(":list]") - } - case md.Item: - incdepth := 4 - //ltype := "normal" - if node.ListFlags&md.ListTypeTerm != 0 { - // Nested definition list terms get indented two spaces. Non-nested - // definition list terms are not indented. - if r.listdepth > 1 { - incdepth = 2 - } else { - incdepth = 0 - } - //ltype = "dt" - } else if node.ListFlags&md.ListTypeDefinition != 0 { - incdepth = 4 - //ltype = "dd" - } - - if entering { - //fmt.Fprintf(out, "[list item %s:", ltype) - r.depth += incdepth - if r.listdepth > 1 && r.list.isDefinition() { - r.capture(RenderModeKeepBreaks) - } else { - r.capture(RenderModeBreakLines) - } - if !r.list.isDefinition() || node.ListFlags&md.ListTypeTerm != 0 { - r.list.items = append(r.list.items, item{node.ListFlags, nil, nil}) - } - } else { - //fmt.Fprintf(out, ":list item]") - r.depth -= incdepth - buf := r.finishCapture() - if r.list.isDefinition() && node.ListFlags&md.ListTypeTerm == 0 { - i := len(r.list.items) - 1 - r.list.items[i].definitions = append(r.list.items[i].definitions, buf.Bytes()) - } else { - r.list.items[len(r.list.items)-1].term = buf.Bytes() - } - } - case md.Table: - if entering { - r.capture(RenderModeKeepBreaks) - w := new(tabwriter.Writer) - w.Init(r.out.w, 1, 8, 2, ' ', tabwriter.StripEscape) - r.out.w = w - } else { - r.out.w.(*tabwriter.Writer).Flush() - buf := r.finishCapture() - r.renderParagraphKeepBreaks(buf) - r.printf("\n") - } - case md.TableBody: - // Do nothing. - case md.TableHead: - if entering { - r.capture(r.out.mode) - } else { - // Markdown doens't have a way to create a table without headers. - // We've opted to fix that here by not rendering headers at all if - // they're empty. - result := r.finishCapture().Bytes() - if strings.TrimSpace(string(stripColors(result))) != "" { - parts := strings.Split(strings.TrimRight(string(result), "\t\n"), "\t") - for i := 0; i < len(parts); i++ { - parts[i] = "\xff" + ansi.ColorCode("default+bh") + "\xff" + parts[i] + "\xff" + ansi.Reset + "\xff" - } - r.printf(strings.Join(parts, "\t") + "\t\n") - } - } - case md.TableRow: - if entering { - r.capture(r.out.mode) - } else { - // Escape any colors in the row before writing to the - // tabwriter, otherwise they screw up the width calculations. The - // escape character for tabwriter is \xff. - result := r.finishCapture().Bytes() - result = colorEscapeRe.ReplaceAll(result, []byte("\xff$0\xff")) - r.write(result) - r.printf("\n") - } - case md.TableCell: - if !entering { - r.printf("\t") - } - case md.CodeBlock: - r.depth += 4 - r.renderParagraphKeepBreaks(bytes.NewBuffer(node.Literal)) - r.printf("\n") - r.depth -= 4 - case md.Document: - default: - r.printf("unknown block %s:", node.Type) - r.write(node.Literal) - } - //w.Write([]byte(fmt.Sprintf("node<%s; %t>", node.Type, entering))) - //w.Write(node.Literal) - return md.GoToNext -} - -// RenderHeader implements blackfriday.Renderer interface. -func (r *Renderer) RenderHeader(w io.Writer, ast *md.Node) {} - -// RenderFooter implements blackfriday.Renderer interface. -func (r *Renderer) RenderFooter(w io.Writer, ast *md.Node) {} diff --git a/vendor/go.step.sm/cli-utils/usage/report.go b/vendor/go.step.sm/cli-utils/usage/report.go deleted file mode 100644 index cd87831b..00000000 --- a/vendor/go.step.sm/cli-utils/usage/report.go +++ /dev/null @@ -1,148 +0,0 @@ -package usage - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "regexp" - "strings" - - "golang.org/x/net/html" -) - -// Section keeps track of individual sections -type Section struct { - Command string `json:"command"` - Name string `json:"name"` - Text string `json:"text"` - Words int `json:"words"` - Lines int `json:"lines"` - Sections []*Section `json:"sections"` -} - -// Report holds together a report of sections -type Report struct { - Report []*Section `json:"report"` -} - -// NewReport returns report based on raw -func NewReport(command string, top []byte) *Report { - report := Report{} - report.Process(command, top) - - return &report -} - -// Write serializes the report to json -func (report *Report) Write(w io.Writer) error { - j, err := json.MarshalIndent(report, "", " ") - - if err != nil { - return err - } - - w.Write(j) - - return nil -} - -// Process adds a html based help page to the report -func (report *Report) Process(command string, raw []byte) error { - r := bytes.NewBuffer(raw) - doc, err := html.Parse(r) - - if err != nil { - return err - } - - if doc.FirstChild.Type != html.ElementNode || - doc.FirstChild.Data != "html" || - doc.FirstChild.FirstChild.NextSibling.Data != "body" { - return errors.New("error parsing raw html") - } - - body := doc.FirstChild.FirstChild.NextSibling - - report.addSection(command, body.FirstChild, nil) - - return nil -} - -func (report *Report) addSection(command string, node *html.Node, section *Section) (*html.Node, *Section) { - if node == nil || - node.Type != html.ElementNode || - node.Data != "h2" { - return nil, nil - } - - text, next := report.processNode(node) - words := strings.Fields(text) - lines := strings.Split(text, "\n") - - s := Section{ - Command: command, - Name: node.FirstChild.Data, - Text: text, - Words: len(words), - Lines: len(lines), - } - - if section == nil { - report.Report = append(report.Report, &s) - return report.addSection(command, next, &s) - } - - section.Sections = append(section.Sections, &s) - return report.addSection(command, next, section) -} - -func (report *Report) processNode(node *html.Node) (string, *html.Node) { - text := "" - current := node.NextSibling - - r, _ := regexp.Compile("<[^>]*>") - - for current != nil { - var buf bytes.Buffer - w := io.Writer(&buf) - html.Render(w, current) - - notags := r.ReplaceAllString(buf.String(), "") - clean := strings.TrimSpace(notags) - - if len(text) > 0 && len(clean) > 0 { - text = fmt.Sprintf("%s %s", text, clean) - } else if len(clean) > 0 { - text = clean - } - - current = current.NextSibling - if current == nil { - return text, nil - } else if current.Type == html.ElementNode && - current.Data == "h2" { - node = current - current = nil - } - } - - return text, node -} - -// PerHeadline returns all sections across commands/pages with the same headline -func (report *Report) PerHeadline(headline string) []Section { - var results []Section - for _, top := range report.Report { - for _, section := range top.Sections { - if section.Name != headline { - continue - } - - results = append(results, *section) - } - } - - return results -} diff --git a/vendor/go.step.sm/cli-utils/usage/usage.go b/vendor/go.step.sm/cli-utils/usage/usage.go deleted file mode 100644 index 6a0bae8c..00000000 --- a/vendor/go.step.sm/cli-utils/usage/usage.go +++ /dev/null @@ -1,214 +0,0 @@ -package usage - -import ( - "bytes" - "fmt" - "html" - "strconv" - "strings" - "text/template" -) - -var usageTextTempl = " {{.Name}}\n {{.Usage}} {{if .Required}}(Required){{else}}(Optional){{end}}{{if .Multiple}} (Multiple can be specified){{end}}\n" -var templ *template.Template - -func init() { - templ = template.Must(template.New("usageText").Parse(usageTextTempl)) -} - -// Argument specifies the Name, Usage, and whether or not an Argument is -// required or not -type Argument struct { - Required bool - Multiple bool - Name string - Usage string -} - -// Decorate returns the name of an Argument and decorates it with notation to -// indicate whether its required or not -func (a Argument) Decorate() string { - name := a.Name - if a.Multiple { - name = name + "(s)..." - } - if a.Required { - return fmt.Sprintf("<%s>", name) - } - - return fmt.Sprintf("[%s]", name) -} - -// Arguments is an array of Argument structs that specify which arguments are -// accepted by a Command -type Arguments []Argument - -// UsageText returns the value of the UsageText property for a cli.Command for -// these arguments -func (args Arguments) UsageText() string { - var buf bytes.Buffer - for _, a := range args { - data := map[string]interface{}{ - "Name": a.Decorate(), - "Multiple": a.Multiple, - "Required": a.Required, - "Usage": a.Usage, - } - - err := templ.Execute(&buf, data) - if err != nil { - panic(fmt.Sprintf("Could not generate args template for %s: %s", a.Name, err)) - } - } - - return "\n\n" + buf.String() -} - -// ArgsUsage returns the value of the ArgsUsage property for a cli.Command for -// these arguments -func (args Arguments) ArgsUsage() string { - out := "" - for i, a := range args { - out += a.Decorate() - if i < len(args)-1 { - out += " " - } - } - - return out -} - -// AppHelpTemplate contains the modified template for the main app -var AppHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .Commands}} {{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}_[arguments]_{{end}}{{end}}{{if .Description}} - -## DESCRIPTION -{{.Description}}{{end}}{{if .VisibleCommands}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -||| -|---|---|{{range .VisibleCommands}} -| **{{join .Names ", "}}** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}}{{end}} - -## OPTIONS - -{{range $index, $option := .VisibleFlags}}{{if $index}} -{{end}}{{$option}} -{{end}}{{end}}{{if .Copyright}}{{if len .Authors}} - -## AUTHOR{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: - -{{range $index, $author := .Authors}}{{if $index}} -{{end}}{{$author}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} - -## ONLINE - -This documentation is available online at https://smallstep.com/docs/cli - -## VERSION - -{{.Version}}{{end}}{{end}} - -## COPYRIGHT - -{{.Copyright}} - -## FEEDBACK ` + - html.UnescapeString("&#"+strconv.Itoa(128525)+";") + " " + - html.UnescapeString("&#"+strconv.Itoa(127867)+";") + - ` - -The **step** utility is not instrumented for usage statistics. It does not phone home. -But your feedback is extremely valuable. Any information you can provide regarding how you’re using **step** helps. -Please send us a sentence or two, good or bad: **feedback@smallstep.com** or ask in [GitHub Discussions](https://github.com/smallstep/certificates/discussions). -{{end}} -` - -// SubcommandHelpTemplate contains the modified template for a sub command -// Note that the weird "|||\n|---|---|" syntax sets up a markdown table with empty headers. -var SubcommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}** {{if .VisibleFlags}} _[options]_{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}_[arguments]_{{end}}{{end}}{{if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}} - -## COMMANDS - -{{range .VisibleCategories}}{{if .Name}}{{.Name}}:{{end}} -||| -|---|---|{{range .VisibleCommands}} -| **{{join .Names ", "}}** | {{.Usage}} |{{end}} -{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` - -// CommandHelpTemplate contains the modified template for a command -var CommandHelpTemplate = `## NAME -**{{.HelpName}}** -- {{.Usage}} - -## USAGE - -{{if .UsageText}}{{.UsageText}}{{else}}**{{.HelpName}}**{{if .VisibleFlags}} _[options]_{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}_[arguments]_{{end}}{{end}}{{if .Category}} - -## CATEGORY - -{{.Category}}{{end}}{{if .Description}} - -## DESCRIPTION - -{{.Description}}{{end}}{{if .VisibleFlags}} - -## OPTIONS - -{{range .VisibleFlags}} -{{.}} -{{end}}{{end}} -` - -// FlagNamePrefixer converts a full flag name and its placeholder into the help -// message flag prefix. This is used by the default FlagStringer. -// -// This method clones urflave/cli functionality but adds a new line at the end. -func FlagNamePrefixer(fullName, placeholder string) string { - var prefixed string - parts := strings.Split(fullName, ",") - for i, name := range parts { - name = strings.Trim(name, " ") - prefixed += "**" + prefixFor(name) + name + "**" - - if placeholder != "" { - prefixed += "=" + placeholder - } - if i < len(parts)-1 { - prefixed += ", " - } - } - //return "* " + prefixed + "\n" - return prefixed + "\n: " -} - -func prefixFor(name string) (prefix string) { - if len(name) == 1 { - prefix = "-" - } else { - prefix = "--" - } - - return -} diff --git a/vendor/go.step.sm/crypto/LICENSE b/vendor/go.step.sm/crypto/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.step.sm/crypto/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE deleted file mode 100644 index b99c5e3b..00000000 --- a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014 Dmitry Chestnykh -Copyright (c) 2010 The Go Authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README deleted file mode 100644 index fb0fc8b7..00000000 --- a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/README +++ /dev/null @@ -1,22 +0,0 @@ -Go implementation of bcrypt_pbkdf(3) from OpenBSD -(a variant of PBKDF2 with bcrypt-based PRF). - - -USAGE - - func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) - - - Key derives a key from the password, salt and rounds count, returning a - []byte of length keyLen that can be used as cryptographic key. - - Remember to get a good random salt of at least 16 bytes. Using a higher - rounds count will increase the cost of an exhaustive search but will also - make derivation proportionally slower. - - -REFERENCES - -* https://github.com/dchest/bcrypt_pbkdf -* http://www.tedunangst.com/flak/post/bcrypt-pbkdf -* http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c diff --git a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index 863ef1df..00000000 --- a/vendor/go.step.sm/crypto/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2014 Dmitry Chestnykh. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements password-based key derivation function based -// on bcrypt compatible with bcrypt_pbkdf(3) from OpenBSD. -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - - // NOTE! Requires blowfish package version from Aug 1, 2014 or later. - // Will produce incorrect results if the package is older. - // See commit message for details: http://goo.gl/wx6g8O - //nolint:staticcheck - "golang.org/x/crypto/blowfish" -) - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -// -// Remember to get a good random salt of at least 16 bytes. Using a higher -// rounds count will increase the cost of an exhaustive search but will also -// make derivation proportionally slower. -//nolint:errcheck -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - var shapass, shasalt [sha512.Size]byte - var out, tmp [32]byte - var cnt [4]byte - - numBlocks := (keyLen + len(out) - 1) / len(out) - key := make([]byte, numBlocks*len(out)) - - h := sha512.New() - h.Write(password) - h.Sum(shapass[:0]) - - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt[:]) - bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) - copy(out[:], tmp[:]) - - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp[:]) - bcryptHash(tmp[:], shapass[:], h.Sum(shasalt[:0])) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out[:], magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/vendor/go.step.sm/crypto/internal/step/config.go b/vendor/go.step.sm/crypto/internal/step/config.go deleted file mode 100644 index fe9af5ac..00000000 --- a/vendor/go.step.sm/crypto/internal/step/config.go +++ /dev/null @@ -1,94 +0,0 @@ -package step - -import ( - "log" - "os" - "os/user" - "path/filepath" - "strings" -) - -// PathEnv defines the name of the environment variable that can overwrite -// the default configuration path. -const PathEnv = "STEPPATH" - -// HomeEnv defines the name of the environment variable that can overwrite the -// default home directory. -const HomeEnv = "HOME" - -// stepPath will be populated in init() with the proper STEPPATH. -var stepPath string - -// homePath will be populated in init() with the proper HOME. -var homePath string - -// Path returns the path for the step configuration directory, this is -// defined by the environment variable STEPPATH or if this is not set it will -// default to '$HOME/.step'. -func Path() string { - return stepPath -} - -// Home returns the user home directory using the environment variable HOME or -// the os/user package. -func Home() string { - return homePath -} - -// Abs returns the given path relative to the StepPath if it's not an absolute -// path, relative to the home directory using the special string "~/", or -// relative to the working directory using "./" -// -// Relative paths like 'certs/root_ca.crt' will be converted to -// '$STEPPATH/certs/root_ca.crt', but paths like './certs/root_ca.crt' will be -// relative to the current directory. Home relative paths like -// ~/certs/root_ca.crt will be converted to '$HOME/certs/root_ca.crt'. And -// absolute paths like '/certs/root_ca.crt' will remain the same. -func Abs(path string) string { - if filepath.IsAbs(path) { - return path - } - // Windows accept both \ and / - slashed := filepath.ToSlash(path) - switch { - case strings.HasPrefix(slashed, "~/"): - return filepath.Join(homePath, path[2:]) - case strings.HasPrefix(slashed, "./"), strings.HasPrefix(slashed, "../"): - if abs, err := filepath.Abs(path); err == nil { - return abs - } - return path - default: - return filepath.Join(stepPath, path) - } -} - -func init() { - l := log.New(os.Stderr, "", 0) - - // Get home path from environment or from the user object. - homePath = os.Getenv(HomeEnv) - if homePath == "" { - if homePath = getUserHomeDir(); homePath == "" { - l.Fatalf("Error obtaining home directory, please define environment variable %s.", HomeEnv) - } - } - - // Get step path from environment or relative to home. - stepPath = os.Getenv(PathEnv) - if stepPath == "" { - stepPath = filepath.Join(homePath, ".step") - } - - // cleanup - homePath = filepath.Clean(homePath) - stepPath = filepath.Clean(stepPath) -} - -func getUserHomeDir() string { - usr, err := user.Current() - if err == nil && usr.HomeDir != "" { - return usr.HomeDir - } - return "" -} diff --git a/vendor/go.step.sm/crypto/internal/utils/io.go b/vendor/go.step.sm/crypto/internal/utils/io.go deleted file mode 100644 index 6d6b7301..00000000 --- a/vendor/go.step.sm/crypto/internal/utils/io.go +++ /dev/null @@ -1,51 +0,0 @@ -package utils - -import ( - "bytes" - "io/ioutil" - "os" - "unicode" - - "github.com/pkg/errors" -) - -func maybeUnwrap(err error) error { - if wrapped := errors.Unwrap(err); wrapped != nil { - return wrapped - } - return err -} - -// ReadFile reads the file named by filename and returns the contents. -// -// It wraps ioutil.ReadFile wrapping the errors. -func ReadFile(filename string) ([]byte, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, errors.Wrapf(maybeUnwrap(err), "error reading %s", filename) - } - return b, nil -} - -// ReadPasswordFromFile reads and returns the password from the given filename. -// The contents of the file will be trimmed at the right. -func ReadPasswordFromFile(filename string) ([]byte, error) { - password, err := ReadFile(filename) - if err != nil { - return nil, errors.Wrapf(err, "error reading %s", filename) - } - password = bytes.TrimRightFunc(password, unicode.IsSpace) - return password, nil -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm -// (before umask); otherwise WriteFile truncates it before writing. -// -// It wraps ioutil.WriteFile wrapping the errors. -func WriteFile(filename string, data []byte, perm os.FileMode) error { - if err := ioutil.WriteFile(filename, data, perm); err != nil { - return errors.Wrapf(maybeUnwrap(err), "error writing %s", filename) - } - return nil -} diff --git a/vendor/go.step.sm/crypto/jose/encrypt.go b/vendor/go.step.sm/crypto/jose/encrypt.go deleted file mode 100644 index 87d555b7..00000000 --- a/vendor/go.step.sm/crypto/jose/encrypt.go +++ /dev/null @@ -1,102 +0,0 @@ -package jose - -import ( - "encoding/json" - - "github.com/pkg/errors" - "go.step.sm/crypto/randutil" -) - -// MaxDecryptTries is the maximum number of attempts to decrypt a file. -const MaxDecryptTries = 3 - -// PasswordPrompter defines the function signature for the PromptPassword -// callback. -type PasswordPrompter func(s string) ([]byte, error) - -// PromptPassword is a method used to prompt for a password to decode encrypted -// keys. If this method is not defined and the key or password are not passed, -// the parse of the key will fail. -var PromptPassword PasswordPrompter - -// EncryptJWK returns the given JWK encrypted with the default encryption -// algorithm (PBES2-HS256+A128KW). -func EncryptJWK(jwk *JSONWebKey, passphrase []byte) (*JSONWebEncryption, error) { - b, err := json.Marshal(jwk) - if err != nil { - return nil, errors.Wrap(err, "error marshaling JWK") - } - - salt, err := randutil.Salt(PBKDF2SaltSize) - if err != nil { - return nil, err - } - - // Encrypt private key using PBES2 - recipient := Recipient{ - Algorithm: PBES2_HS256_A128KW, - Key: passphrase, - PBES2Count: PBKDF2Iterations, - PBES2Salt: salt, - } - - opts := new(EncrypterOptions) - opts.WithContentType(ContentType("jwk+json")) - - encrypter, err := NewEncrypter(DefaultEncAlgorithm, recipient, opts) - if err != nil { - return nil, errors.Wrap(err, "error creating cipher") - } - - jwe, err := encrypter.Encrypt(b) - if err != nil { - return nil, errors.Wrap(err, "error encrypting data") - } - - return jwe, nil -} - -// Decrypt returns the decrypted version of the given data if it's encrypted, -// it will return the raw data if it's not encrypted or the format is not -// valid. -func Decrypt(data []byte, opts ...Option) ([]byte, error) { - ctx, err := new(context).apply(opts...) - if err != nil { - return nil, err - } - - // Return the given data if we cannot parse it as encrypted. - enc, err := ParseEncrypted(string(data)) - if err != nil { - return data, nil - } - - // Try with the given password. - if len(ctx.password) > 0 { - if data, err = enc.Decrypt(ctx.password); err == nil { - return data, nil - } - return nil, errors.New("failed to decrypt JWE: invalid password") - } - - // Try with a given password prompter. - if ctx.passwordPrompter != nil || PromptPassword != nil { - var pass []byte - for i := 0; i < MaxDecryptTries; i++ { - if ctx.passwordPrompter != nil { - if pass, err = ctx.passwordPrompter(ctx.passwordPrompt); err != nil { - return nil, err - } - } else { - if pass, err = PromptPassword("Please enter the password to decrypt the JWE"); err != nil { - return nil, err - } - } - if data, err = enc.Decrypt(pass); err == nil { - return data, nil - } - } - } - - return nil, errors.New("failed to decrypt JWE: invalid password") -} diff --git a/vendor/go.step.sm/crypto/jose/generate.go b/vendor/go.step.sm/crypto/jose/generate.go deleted file mode 100644 index 42f94ca2..00000000 --- a/vendor/go.step.sm/crypto/jose/generate.go +++ /dev/null @@ -1,189 +0,0 @@ -package jose - -import ( - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - - "github.com/pkg/errors" - "go.step.sm/crypto/keyutil" - "go.step.sm/crypto/pemutil" -) - -const ( - jwksUsageSig = "sig" - jwksUsageEnc = "enc" - // defaultKeyType is the default type of the one-time token key. - defaultKeyType = EC - // defaultKeyCurve is the default curve of the one-time token key. - defaultKeyCurve = P256 - // defaultKeyAlg is the default algorithm of the one-time token key. - defaultKeyAlg = ES256 - // defaultKeySize is the default size of the one-time token key. - defaultKeySize = 0 -) - -var ( - errAmbiguousCertKeyUsage = errors.New("jose/generate: certificate's key usage is ambiguous, it should be for signature or encipherment, but not both (use --subtle to ignore usage field)") - errNoCertKeyUsage = errors.New("jose/generate: certificate doesn't contain any key usage (use --subtle to ignore usage field)") -) - -// Thumbprint computes the JWK Thumbprint of a key using SHA256 as the hash -// algorithm. It returns the hash encoded in the Base64 raw url encoding. -func Thumbprint(jwk *JSONWebKey) (string, error) { - hash, err := jwk.Thumbprint(crypto.SHA256) - if err != nil { - return "", errors.Wrap(err, "error generating JWK thumbprint") - } - return base64.RawURLEncoding.EncodeToString(hash), nil -} - -// GenerateDefaultKeyPair generates an asymmetric public/private key pair. -// Returns the public key as a JWK and the private key as an encrypted JWE. -func GenerateDefaultKeyPair(passphrase []byte) (*JSONWebKey, *JSONWebEncryption, error) { - if len(passphrase) == 0 { - return nil, nil, errors.New("step-jose: password cannot be empty when encryptying a JWK") - } - - // Generate the OTT key - jwk, err := GenerateJWK(defaultKeyType, defaultKeyCurve, defaultKeyAlg, jwksUsageSig, "", defaultKeySize) - if err != nil { - return nil, nil, err - } - - jwk.KeyID, err = Thumbprint(jwk) - if err != nil { - return nil, nil, err - } - - jwe, err := EncryptJWK(jwk, passphrase) - if err != nil { - return nil, nil, err - } - - public := jwk.Public() - return &public, jwe, nil -} - -// GenerateJWK generates a JWK given the key type, curve, alg, use, kid and -// the size of the RSA or oct keys if necessary. -func GenerateJWK(kty, crv, alg, use, kid string, size int) (jwk *JSONWebKey, err error) { - if kty == "OKP" && use == "enc" && (crv == "" || crv == "Ed25519") { - return nil, errors.New("invalid algorithm: Ed25519 cannot be used for encryption") - } - - switch { - case kty == "EC" && crv == "": - crv = P256 - case kty == "OKP" && crv == "": - crv = Ed25519 - case kty == "RSA" && size == 0: - size = DefaultRSASize - case kty == "oct" && size == 0: - size = DefaultOctSize - } - - key, err := keyutil.GenerateKey(kty, crv, size) - if err != nil { - return nil, err - } - jwk = &JSONWebKey{ - Key: key, - KeyID: kid, - Use: use, - Algorithm: alg, - } - guessJWKAlgorithm(&context{alg: alg}, jwk) - if jwk.KeyID == "" && kty != "oct" { - jwk.KeyID, err = Thumbprint(jwk) - } - return jwk, err -} - -// GenerateJWKFromPEM returns an incomplete JSONWebKey using the key from a -// PEM file. -func GenerateJWKFromPEM(filename string, subtle bool) (*JSONWebKey, error) { - key, err := pemutil.Read(filename) - if err != nil { - return nil, err - } - - switch key := key.(type) { - case *rsa.PrivateKey, *rsa.PublicKey: - return &JSONWebKey{ - Key: key, - }, nil - case *ecdsa.PrivateKey, *ecdsa.PublicKey, ed25519.PrivateKey, ed25519.PublicKey: - return &JSONWebKey{ - Key: key, - Algorithm: algForKey(key), - }, nil - case *x509.Certificate: - var use string - if !subtle { - use, err = keyUsageForCert(key) - if err != nil { - return nil, err - } - } - return &JSONWebKey{ - Key: key.PublicKey, - Certificates: []*x509.Certificate{key}, - Algorithm: algForKey(key.PublicKey), - Use: use, - }, nil - default: - return nil, errors.Errorf("error parsing %s: unsupported key type '%T'", filename, key) - } -} - -func algForKey(key crypto.PublicKey) string { - switch key := key.(type) { - case *ecdsa.PrivateKey: - return getECAlgorithm(key.Curve) - case *ecdsa.PublicKey: - return getECAlgorithm(key.Curve) - case ed25519.PrivateKey, ed25519.PublicKey: - return EdDSA - default: - return "" - } -} - -func keyUsageForCert(cert *x509.Certificate) (string, error) { - isDigitalSignature := containsUsage(cert.KeyUsage, - x509.KeyUsageDigitalSignature, - x509.KeyUsageContentCommitment, - x509.KeyUsageCertSign, - x509.KeyUsageCRLSign, - ) - isEncipherment := containsUsage(cert.KeyUsage, - x509.KeyUsageKeyEncipherment, - x509.KeyUsageDataEncipherment, - x509.KeyUsageKeyAgreement, - x509.KeyUsageEncipherOnly, - x509.KeyUsageDecipherOnly, - ) - if isDigitalSignature && isEncipherment { - return "", errAmbiguousCertKeyUsage - } - if isDigitalSignature { - return jwksUsageSig, nil - } - if isEncipherment { - return jwksUsageEnc, nil - } - return "", errNoCertKeyUsage -} - -func containsUsage(usage x509.KeyUsage, queries ...x509.KeyUsage) bool { - for _, query := range queries { - if usage&query == query { - return true - } - } - return false -} diff --git a/vendor/go.step.sm/crypto/jose/options.go b/vendor/go.step.sm/crypto/jose/options.go deleted file mode 100644 index 98a54b95..00000000 --- a/vendor/go.step.sm/crypto/jose/options.go +++ /dev/null @@ -1,119 +0,0 @@ -package jose - -import ( - "go.step.sm/crypto/internal/utils" -) - -type context struct { - filename string - use, alg, kid string - subtle, insecure bool - noDefaults bool - password []byte - passwordPrompt string - passwordPrompter PasswordPrompter -} - -// apply the options to the context and returns an error if one of the options -// fails. -func (ctx *context) apply(opts ...Option) (*context, error) { - for _, opt := range opts { - if err := opt(ctx); err != nil { - return nil, err - } - } - if ctx.filename == "" { - ctx.filename = "key" - } - return ctx, nil -} - -// Option is the type used to add attributes to the context. -type Option func(ctx *context) error - -// WithFilename adds the given filename to the context. -func WithFilename(filename string) Option { - return func(ctx *context) error { - ctx.filename = filename - return nil - } -} - -// WithUse adds the use claim to the context. -func WithUse(use string) Option { - return func(ctx *context) error { - ctx.use = use - return nil - } -} - -// WithAlg adds the alg claim to the context. -func WithAlg(alg string) Option { - return func(ctx *context) error { - ctx.alg = alg - return nil - } -} - -// WithKid adds the kid property to the context. -func WithKid(kid string) Option { - return func(ctx *context) error { - ctx.kid = kid - return nil - } -} - -// WithSubtle marks the context as subtle. -func WithSubtle(subtle bool) Option { - return func(ctx *context) error { - ctx.subtle = subtle - return nil - } -} - -// WithInsecure marks the context as insecure. -func WithInsecure(insecure bool) Option { - return func(ctx *context) error { - ctx.insecure = insecure - return nil - } -} - -// WithNoDefaults avoids that the parser loads defaults values, specially the -// default algorithms. -func WithNoDefaults(val bool) Option { - return func(ctx *context) error { - ctx.noDefaults = val - return nil - } -} - -// WithPassword is a method that adds the given password to the context. -func WithPassword(pass []byte) Option { - return func(ctx *context) error { - ctx.password = pass - return nil - } -} - -// WithPasswordFile is a method that adds the password in a file to the context. -func WithPasswordFile(filename string) Option { - return func(ctx *context) error { - b, err := utils.ReadPasswordFromFile(filename) - if err != nil { - return err - } - ctx.password = b - return nil - } -} - -// WithPasswordPrompter defines a method that can be used to prompt for the -// password to decrypt an encrypted JWE. -func WithPasswordPrompter(prompt string, fn PasswordPrompter) Option { - return func(ctx *context) error { - ctx.passwordPrompt = prompt - ctx.passwordPrompter = fn - return nil - } -} diff --git a/vendor/go.step.sm/crypto/jose/parse.go b/vendor/go.step.sm/crypto/jose/parse.go deleted file mode 100644 index 6545c732..00000000 --- a/vendor/go.step.sm/crypto/jose/parse.go +++ /dev/null @@ -1,380 +0,0 @@ -package jose - -import ( - "bytes" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/pkg/errors" - "go.step.sm/crypto/pemutil" -) - -type keyType int - -const ( - jwkKeyType keyType = iota - pemKeyType - octKeyType -) - -// read returns the bytes from reading a file, or from a url if the filename has -// the prefix https:// -func read(filename string) ([]byte, error) { - if strings.HasPrefix(filename, "https://") { - resp, err := http.Get(filename) - if err != nil { - return nil, errors.Wrapf(err, "error retrieving %s", filename) - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return nil, errors.Errorf("error retrieving %s: status code %d", filename, resp.StatusCode) - } - b, err := ioutil.ReadAll(resp.Body) - return b, errors.Wrapf(err, "error retrieving %s", filename) - } - - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, errors.Wrapf(err, "error reading %s", filename) - } - return b, nil -} - -// ReadKey returns a JSONWebKey from the given JWK or PEM file. If the file is -// password protected, and no password or prompt password function is given it -// will fail. -func ReadKey(filename string, opts ...Option) (*JSONWebKey, error) { - b, err := read(filename) - if err != nil { - return nil, err - } - opts = append(opts, WithFilename(filename)) - return ParseKey(b, opts...) -} - -// ParseKey returns a JSONWebKey from the given JWK file or a PEM file. If the -// file is password protected, and no password or prompt password function is -// given it will fail. -func ParseKey(b []byte, opts ...Option) (*JSONWebKey, error) { - ctx, err := new(context).apply(opts...) - if err != nil { - return nil, err - } - - jwk := new(JSONWebKey) - switch guessKeyType(ctx, b) { - case jwkKeyType: - // Attempt to parse an encrypted file - if b, err = Decrypt(b, opts...); err != nil { - return nil, err - } - - // Unmarshal the plain (or decrypted JWK) - if err = json.Unmarshal(b, jwk); err != nil { - return nil, errors.Errorf("error reading %s: unsupported format", ctx.filename) - } - - // If KeyID not set by environment, then use the default. - // NOTE: we do not set this value by default in the case of jwkKeyType - // because it is assumed to have been left empty on purpose. - case pemKeyType: - pemOptions := []pemutil.Options{ - pemutil.WithFilename(ctx.filename), - } - if ctx.password != nil { - pemOptions = append(pemOptions, pemutil.WithPassword(ctx.password)) - } - if ctx.passwordPrompter != nil { - pemOptions = append(pemOptions, pemutil.WithPasswordPrompt(ctx.passwordPrompt, pemutil.PasswordPrompter(ctx.passwordPrompter))) - } - if pemutil.PromptPassword == nil && PromptPassword != nil { - pemutil.PromptPassword = pemutil.PasswordPrompter(PromptPassword) - } - - jwk.Key, err = pemutil.ParseKey(b, pemOptions...) - if err != nil { - return nil, err - } - if len(ctx.kid) == 0 { - if jwk.KeyID, err = Thumbprint(jwk); err != nil { - return nil, err - } - } - case octKeyType: - jwk.Key = b - } - - // Validate key id - if ctx.kid != "" && jwk.KeyID != "" && ctx.kid != jwk.KeyID { - return nil, errors.Errorf("kid %s does not match the kid on %s", ctx.kid, ctx.filename) - } - if jwk.KeyID == "" { - jwk.KeyID = ctx.kid - } - if jwk.Use == "" { - jwk.Use = ctx.use - } - - // Set the algorithm if empty - guessJWKAlgorithm(ctx, jwk) - - // Validate alg: if the flag '--subtle' is passed we will allow to overwrite it - if !ctx.subtle && ctx.alg != "" && jwk.Algorithm != "" && ctx.alg != jwk.Algorithm { - return nil, errors.Errorf("alg %s does not match the alg on %s", ctx.alg, ctx.filename) - } - if ctx.subtle && ctx.alg != "" { - jwk.Algorithm = ctx.alg - } - - return jwk, nil -} - -// ReadKeySet reads a JWK Set from a URL or filename. URLs must start with -// "https://". -func ReadKeySet(filename string, opts ...Option) (*JSONWebKey, error) { - b, err := read(filename) - if err != nil { - return nil, err - } - opts = append(opts, WithFilename(filename)) - return ParseKeySet(b, opts...) -} - -// ParseKeySet returns the JWK with the given key after parsing a JWKSet from -// a given file. -func ParseKeySet(b []byte, opts ...Option) (*JSONWebKey, error) { - ctx, err := new(context).apply(opts...) - if err != nil { - return nil, err - } - - // Attempt to parse an encrypted file - if b, err = Decrypt(b, opts...); err != nil { - return nil, err - } - - // Unmarshal the plain or decrypted JWKSet - jwkSet := new(JSONWebKeySet) - if err := json.Unmarshal(b, jwkSet); err != nil { - return nil, errors.Errorf("error reading %s: unsupported format", ctx.filename) - } - - jwks := jwkSet.Key(ctx.kid) - switch len(jwks) { - case 0: - return nil, errors.Errorf("cannot find key with kid %s on %s", ctx.kid, ctx.filename) - case 1: - jwk := &jwks[0] - - // Set the algorithm if empty - guessJWKAlgorithm(ctx, jwk) - - // Validate alg: if the flag '--subtle' is passed we will allow the - // overwrite of the alg - if !ctx.subtle && ctx.alg != "" && jwk.Algorithm != "" && ctx.alg != jwk.Algorithm { - return nil, errors.Errorf("alg %s does not match the alg on %s", ctx.alg, ctx.filename) - } - if ctx.subtle && ctx.alg != "" { - jwk.Algorithm = ctx.alg - } - return jwk, nil - default: - return nil, errors.Errorf("multiple keys with kid %s have been found on %s", ctx.kid, ctx.filename) - } -} - -func decodeCerts(l []interface{}) ([]*x509.Certificate, error) { - certs := make([]*x509.Certificate, len(l)) - for i, j := range l { - certStr, ok := j.(string) - if !ok { - return nil, errors.Errorf("wrong type in x5c header list; expected string but %T", i) - } - certB, err := base64.StdEncoding.DecodeString(certStr) - if err != nil { - return nil, errors.Wrap(err, "error decoding base64 encoded x5c cert") - } - cert, err := x509.ParseCertificate(certB) - if err != nil { - return nil, errors.Wrap(err, "error parsing x5c cert") - } - certs[i] = cert - } - return certs, nil -} - -// X5cInsecureKey is the key used to store the x5cInsecure cert chain in the JWT header. -var X5cInsecureKey = "x5cInsecure" - -// GetX5cInsecureHeader extracts the x5cInsecure certificate chain from the token. -func GetX5cInsecureHeader(jwt *JSONWebToken) ([]*x509.Certificate, error) { - x5cVal, ok := jwt.Headers[0].ExtraHeaders[HeaderKey(X5cInsecureKey)] - if !ok { - return nil, errors.New("ssh check-host token missing x5cInsecure header") - } - interfaces, ok := x5cVal.([]interface{}) - if !ok { - return nil, errors.Errorf("ssh check-host token x5cInsecure header has wrong type; expected []string, but got %T", x5cVal) - } - chain, err := decodeCerts(interfaces) - if err != nil { - return nil, errors.Wrap(err, "error decoding x5cInsecure header certs") - } - return chain, nil -} - -// ParseX5cInsecure parses an x5cInsecure token, validates the certificate chain -// in the token, and returns the JWT struct along with all the verified chains. -func ParseX5cInsecure(tok string, roots []*x509.Certificate) (*JSONWebToken, [][]*x509.Certificate, error) { - jwt, err := ParseSigned(tok) - if err != nil { - return nil, nil, errors.Wrapf(err, "error parsing x5cInsecure token") - } - - chain, err := GetX5cInsecureHeader(jwt) - if err != nil { - return nil, nil, errors.Wrap(err, "error extracting x5cInsecure cert chain") - } - leaf := chain[0] - - interPool := x509.NewCertPool() - for _, crt := range chain[1:] { - interPool.AddCert(crt) - } - rootPool := x509.NewCertPool() - for _, crt := range roots { - rootPool.AddCert(crt) - } - // Correctly parse and validate the x5c certificate chain. - verifiedChains, err := leaf.Verify(x509.VerifyOptions{ - Roots: rootPool, - Intermediates: interPool, - // A hack so we skip validity period validation. - CurrentTime: leaf.NotAfter.Add(-1 * time.Minute), - }) - if err != nil { - return nil, nil, errors.Wrap(err, "error verifying x5cInsecure certificate chain") - } - leaf = verifiedChains[0][0] - - if leaf.KeyUsage&x509.KeyUsageDigitalSignature == 0 { - return nil, nil, errors.New("certificate used to sign x5cInsecure token cannot be used for digital signature") - } - - return jwt, verifiedChains, nil -} - -// guessKeyType returns the key type of the given data. Key types are JWK, PEM -// or oct. -func guessKeyType(ctx *context, data []byte) keyType { - switch ctx.alg { - // jwk or file with oct data - case "HS256", "HS384", "HS512": - // Encrypted JWK ? - if _, err := ParseEncrypted(string(data)); err == nil { - return jwkKeyType - } - // JSON JWK ? - if err := json.Unmarshal(data, &JSONWebKey{}); err == nil { - return jwkKeyType - } - // Default to oct - return octKeyType - default: - // PEM or default to JWK - if bytes.HasPrefix(data, []byte("-----BEGIN ")) { - return pemKeyType - } - return jwkKeyType - } -} - -// guessJWKAlgorithm set the algorithm if it's not set and we can guess it -func guessJWKAlgorithm(ctx *context, jwk *JSONWebKey) { - if jwk.Algorithm == "" { - // Force default algorithm if passed. - if ctx.alg != "" { - jwk.Algorithm = ctx.alg - return - } - - // Guess only fixed algorithms if no defaults is enabled - if ctx.noDefaults { - guessKnownJWKAlgorithm(ctx, jwk) - return - } - - // Use defaults for each key type - switch k := jwk.Key.(type) { - case []byte: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultOctKeyAlgorithm) - } else { - jwk.Algorithm = string(DefaultOctSigAlgorithm) - } - case *ecdsa.PrivateKey: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultECKeyAlgorithm) - } else { - jwk.Algorithm = getECAlgorithm(k.Curve) - } - case *ecdsa.PublicKey: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultECKeyAlgorithm) - } else { - jwk.Algorithm = getECAlgorithm(k.Curve) - } - case *rsa.PrivateKey, *rsa.PublicKey: - if jwk.Use == "enc" { - jwk.Algorithm = string(DefaultRSAKeyAlgorithm) - } else { - jwk.Algorithm = string(DefaultRSASigAlgorithm) - } - // Ed25519 can only be used for signing operations - case ed25519.PrivateKey, ed25519.PublicKey: - jwk.Algorithm = EdDSA - } - } -} - -// guessKnownJWKAlgorithm sets the algorithm for keys that only have one -// possible algorithm. -func guessKnownJWKAlgorithm(ctx *context, jwk *JSONWebKey) { - if jwk.Algorithm == "" && jwk.Use != "enc" { - switch k := jwk.Key.(type) { - case *ecdsa.PrivateKey: - jwk.Algorithm = getECAlgorithm(k.Curve) - case *ecdsa.PublicKey: - jwk.Algorithm = getECAlgorithm(k.Curve) - case ed25519.PrivateKey, ed25519.PublicKey: - jwk.Algorithm = EdDSA - } - } -} - -// getECAlgorithm returns the JWA algorithm name for the given elliptic curve. -// If the curve is not supported it will return an empty string. -// -// Supported curves are P-256, P-384, and P-521. -func getECAlgorithm(crv elliptic.Curve) string { - switch crv.Params().Name { - case P256: - return ES256 - case P384: - return ES384 - case P521: - return ES512 - default: - return "" - } -} diff --git a/vendor/go.step.sm/crypto/jose/types.go b/vendor/go.step.sm/crypto/jose/types.go deleted file mode 100644 index 01798efa..00000000 --- a/vendor/go.step.sm/crypto/jose/types.go +++ /dev/null @@ -1,273 +0,0 @@ -// Code generated (comment to force golint to ignore this file). DO NOT EDIT. - -// Package jose is a wrapper for gopkg.in/square/go-jose.v2 and implements -// utilities to parse and generate JWT, JWK and JWKSets. -package jose - -import ( - "errors" - "strings" - "time" - - jose "gopkg.in/square/go-jose.v2" - "gopkg.in/square/go-jose.v2/jwt" -) - -// SupportsPBKDF2 constant to know if the underlaying library supports -// password based cryptography algorithms. -const SupportsPBKDF2 = true - -// PBKDF2SaltSize is the default size of the salt for PBKDF2, 128-bit salt. -const PBKDF2SaltSize = 16 - -// PBKDF2Iterations is the default number of iterations for PBKDF2, 100k -// iterations. Nist recommends at least 10k, 1Passsword uses 100k. -const PBKDF2Iterations = 100000 - -// JSONWebSignature represents a signed JWS object after parsing. -type JSONWebSignature = jose.JSONWebSignature - -// JSONWebToken represents a JSON Web Token (as specified in RFC7519). -type JSONWebToken = jwt.JSONWebToken - -// JSONWebKey represents a public or private key in JWK format. -type JSONWebKey = jose.JSONWebKey - -// JSONWebKeySet represents a JWK Set object. -type JSONWebKeySet = jose.JSONWebKeySet - -// JSONWebEncryption represents an encrypted JWE object after parsing. -type JSONWebEncryption = jose.JSONWebEncryption - -// Recipient represents an algorithm/key to encrypt messages to. -type Recipient = jose.Recipient - -// EncrypterOptions represents options that can be set on new encrypters. -type EncrypterOptions = jose.EncrypterOptions - -// Encrypter represents an encrypter which produces an encrypted JWE object. -type Encrypter = jose.Encrypter - -// ContentType represents type of the contained data. -type ContentType = jose.ContentType - -// KeyAlgorithm represents a key management algorithm. -type KeyAlgorithm = jose.KeyAlgorithm - -// ContentEncryption represents a content encryption algorithm. -type ContentEncryption = jose.ContentEncryption - -// SignatureAlgorithm represents a signature (or MAC) algorithm. -type SignatureAlgorithm = jose.SignatureAlgorithm - -// Signature represents a signature. -type Signature = jose.Signature - -// ErrCryptoFailure indicates an error in a cryptographic primitive. -var ErrCryptoFailure = jose.ErrCryptoFailure - -// Claims represents public claim values (as specified in RFC 7519). -type Claims = jwt.Claims - -// Builder is a utility for making JSON Web Tokens. Calls can be chained, and -// errors are accumulated until the final call to CompactSerialize/FullSerialize. -type Builder = jwt.Builder - -// NumericDate represents date and time as the number of seconds since the -// epoch, including leap seconds. Non-integer values can be represented -// in the serialized format, but we round to the nearest second. -type NumericDate = jwt.NumericDate - -// Audience represents the recipients that the token is intended for. -type Audience = jwt.Audience - -// Expected defines values used for protected claims validation. -// If field has zero value then validation is skipped. -type Expected = jwt.Expected - -// Signer represents a signer which takes a payload and produces a signed JWS object. -type Signer = jose.Signer - -// SigningKey represents an algorithm/key used to sign a message. -type SigningKey = jose.SigningKey - -// SignerOptions represents options that can be set when creating signers. -type SignerOptions = jose.SignerOptions - -// Header represents the read-only JOSE header for JWE/JWS objects. -type Header = jose.Header - -// HeaderKey represents the type used as a key in the protected header of a JWS -// object. -type HeaderKey = jose.HeaderKey - -// ErrInvalidIssuer indicates invalid iss claim. -var ErrInvalidIssuer = jwt.ErrInvalidIssuer - -// ErrInvalidAudience indicated invalid aud claim. -var ErrInvalidAudience = jwt.ErrInvalidAudience - -// ErrNotValidYet indicates that token is used before time indicated in nbf claim. -var ErrNotValidYet = jwt.ErrNotValidYet - -// ErrExpired indicates that token is used after expiry time indicated in exp claim. -var ErrExpired = jwt.ErrExpired - -// ErrInvalidSubject indicates invalid sub claim. -var ErrInvalidSubject = jwt.ErrInvalidSubject - -// ErrInvalidID indicates invalid jti claim. -var ErrInvalidID = jwt.ErrInvalidID - -// Key management algorithms -const ( - RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5 - RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1 - RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256 - A128KW = KeyAlgorithm("A128KW") // AES key wrap (128) - A192KW = KeyAlgorithm("A192KW") // AES key wrap (192) - A256KW = KeyAlgorithm("A256KW") // AES key wrap (256) - DIRECT = KeyAlgorithm("dir") // Direct encryption - ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES - ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128) - ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192) - ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256) - A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128) - A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192) - A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256) - PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128) - PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192) - PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256) -) - -// Signature algorithms -const ( - HS256 = "HS256" // HMAC using SHA-256 - HS384 = "HS384" // HMAC using SHA-384 - HS512 = "HS512" // HMAC using SHA-512 - RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256 - RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384 - RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512 - ES256 = "ES256" // ECDSA using P-256 and SHA-256 - ES384 = "ES384" // ECDSA using P-384 and SHA-384 - ES512 = "ES512" // ECDSA using P-521 and SHA-512 - PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256 - PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384 - PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512 - EdDSA = "EdDSA" // Ed25591 -) - -// Content encryption algorithms -const ( - A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128) - A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192) - A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256) - A128GCM = ContentEncryption("A128GCM") // AES-GCM (128) - A192GCM = ContentEncryption("A192GCM") // AES-GCM (192) - A256GCM = ContentEncryption("A256GCM") // AES-GCM (256) -) - -// Elliptic curves -const ( - P256 = "P-256" // P-256 curve (FIPS 186-3) - P384 = "P-384" // P-384 curve (FIPS 186-3) - P521 = "P-521" // P-521 curve (FIPS 186-3) -) - -// Key types -const ( - EC = "EC" // Elliptic curves - RSA = "RSA" // RSA - OKP = "OKP" // Ed25519 - OCT = "oct" // Octet sequence -) - -// Ed25519 is the EdDSA signature scheme using SHA-512/256 and Curve25519 -const Ed25519 = "Ed25519" - -// Default key management, signature, and content encryption algorithms to use if none is specified. -const ( - // Key management algorithms - DefaultECKeyAlgorithm = ECDH_ES - DefaultRSAKeyAlgorithm = RSA_OAEP_256 - DefaultOctKeyAlgorithm = A256GCMKW - // Signature algorithms - DefaultRSASigAlgorithm = RS256 - DefaultOctSigAlgorithm = HS256 - // Content encryption algorithm - DefaultEncAlgorithm = A256GCM -) - -// Default sizes -const ( - DefaultRSASize = 2048 - DefaultOctSize = 32 -) - -// ParseEncrypted parses an encrypted message in compact or full serialization format. -func ParseEncrypted(input string) (*JSONWebEncryption, error) { - return jose.ParseEncrypted(input) -} - -// NewEncrypter creates an appropriate encrypter based on the key type. -func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { - return jose.NewEncrypter(enc, rcpt, opts) -} - -// NewNumericDate constructs NumericDate from time.Time value. -func NewNumericDate(t time.Time) *NumericDate { - return jwt.NewNumericDate(t) -} - -// UnixNumericDate returns a NumericDate from the given seconds since the UNIX -// Epoch time. For backward compatibility is s is 0, a nil value will be returned. -func UnixNumericDate(s int64) *NumericDate { - if s == 0 { - return nil - } - out := NumericDate(s) - return &out -} - -// NewSigner creates an appropriate signer based on the key type -func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) { - return jose.NewSigner(sig, opts) -} - -// ParseSigned parses token from JWS form. -func ParseSigned(s string) (*JSONWebToken, error) { - return jwt.ParseSigned(s) -} - -// Signed creates builder for signed tokens. -func Signed(sig Signer) Builder { - return jwt.Signed(sig) -} - -// ParseJWS parses a signed message in compact or full serialization format. -func ParseJWS(s string) (*JSONWebSignature, error) { - return jose.ParseSigned(s) -} - -// Determine whether a JSONWebKey is symmetric -func IsSymmetric(k *JSONWebKey) bool { - switch k.Key.(type) { - case []byte: - return true - default: - return false - } -} - -// Determine whether a JSONWebKey is asymmetric -func IsAsymmetric(k *JSONWebKey) bool { - return !IsSymmetric(k) -} - -// TrimPrefix removes the string "square/go-jose" from all errors. -func TrimPrefix(err error) error { - if err == nil { - return nil - } - return errors.New(strings.TrimPrefix(err.Error(), "square/go-jose: ")) -} diff --git a/vendor/go.step.sm/crypto/jose/validate.go b/vendor/go.step.sm/crypto/jose/validate.go deleted file mode 100644 index 981aeb4f..00000000 --- a/vendor/go.step.sm/crypto/jose/validate.go +++ /dev/null @@ -1,199 +0,0 @@ -package jose - -import ( - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "fmt" - "io/ioutil" - - "github.com/pkg/errors" - "go.step.sm/crypto/keyutil" - "golang.org/x/crypto/ssh" -) - -// ValidateSSHPOP validates the given SSH certificate and key for use in an -// sshpop header. -func ValidateSSHPOP(certFile string, key interface{}) (string, error) { - if certFile == "" { - return "", errors.New("ssh certfile cannot be empty") - } - certBytes, err := ioutil.ReadFile(certFile) - if err != nil { - return "", errors.Wrapf(err, "error reading ssh certificate from %s", certFile) - } - sshpub, _, _, _, err := ssh.ParseAuthorizedKey(certBytes) - if err != nil { - return "", errors.Wrapf(err, "error parsing ssh public key from %s", certFile) - } - cert, ok := sshpub.(*ssh.Certificate) - if !ok { - return "", errors.New("error casting ssh public key to ssh certificate") - } - pubkey, err := keyutil.ExtractKey(cert) - if err != nil { - return "", errors.Wrap(err, "error extracting public key from ssh public key interface") - } - if err = keyutil.VerifyPair(pubkey, key); err != nil { - return "", errors.Wrap(err, "error verifying ssh key pair") - } - - return base64.StdEncoding.EncodeToString(cert.Marshal()), nil -} - -func validateX5(certs []*x509.Certificate, key interface{}) error { - if len(certs) == 0 { - return errors.New("certs cannot be empty") - } - - if err := keyutil.VerifyPair(certs[0].PublicKey, key); err != nil { - return errors.Wrap(err, "error verifying certificate and key") - } - - if certs[0].KeyUsage&x509.KeyUsageDigitalSignature == 0 { - return errors.New("certificate/private-key pair used to sign " + - "token is not approved for digital signature") - } - return nil -} - -// ValidateX5C validates the given certificate chain and key for use as a token -// signer and x5t header. -func ValidateX5C(certs []*x509.Certificate, key interface{}) ([]string, error) { - if err := validateX5(certs, key); err != nil { - return nil, errors.Wrap(err, "ValidateX5C") - } - strs := make([]string, len(certs)) - for i, cert := range certs { - strs[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - return strs, nil -} - -// ValidateX5T validates the given certificate and key for use as a token signer -// and x5t header. -func ValidateX5T(certs []*x509.Certificate, key interface{}) (string, error) { - if err := validateX5(certs, key); err != nil { - return "", errors.Wrap(err, "ValidateX5T") - } - // x5t is the base64 URL encoded SHA1 thumbprint - // (see https://tools.ietf.org/html/rfc7515#section-4.1.7) - fingerprint := sha1.Sum(certs[0].Raw) - return base64.URLEncoding.EncodeToString(fingerprint[:]), nil -} - -// ValidateJWK validates the given JWK. -func ValidateJWK(jwk *JSONWebKey) error { - switch jwk.Use { - case "sig": - return validateSigJWK(jwk) - case "enc": - return validateEncJWK(jwk) - default: - return validateGeneric(jwk) - } -} - -// validateSigJWK validates the given JWK for signature operations. -func validateSigJWK(jwk *JSONWebKey) error { - if jwk.Algorithm == "" { - return errors.New("flag '--alg' is required with the given key") - } - errctx := "the given key" - - switch k := jwk.Key.(type) { - case []byte: - switch jwk.Algorithm { - case HS256, HS384, HS512: - return nil - } - errctx = "kty 'oct'" - case *rsa.PrivateKey, *rsa.PublicKey: - switch jwk.Algorithm { - case RS256, RS384, RS512: - return nil - case PS256, PS384, PS512: - return nil - } - errctx = "kty 'RSA'" - case *ecdsa.PrivateKey: - curve := k.Params().Name - switch { - case jwk.Algorithm == ES256 && curve == P256: - return nil - case jwk.Algorithm == ES384 && curve == P384: - return nil - case jwk.Algorithm == ES512 && curve == P521: - return nil - } - errctx = fmt.Sprintf("kty 'EC' and crv '%s'", curve) - case *ecdsa.PublicKey: - curve := k.Params().Name - switch { - case jwk.Algorithm == ES256 && curve == P256: - return nil - case jwk.Algorithm == ES384 && curve == P384: - return nil - case jwk.Algorithm == ES512 && curve == P521: - return nil - } - errctx = fmt.Sprintf("kty 'EC' and crv '%s'", curve) - case ed25519.PrivateKey, ed25519.PublicKey: - if jwk.Algorithm == EdDSA { - return nil - } - errctx = "kty 'OKP' and crv 'Ed25519'" - } - - return errors.Errorf("alg '%s' is not compatible with %s", jwk.Algorithm, errctx) -} - -// validatesEncJWK validates the given JWK for encryption operations. -func validateEncJWK(jwk *JSONWebKey) error { - alg := KeyAlgorithm(jwk.Algorithm) - var kty string - - switch jwk.Key.(type) { - case []byte: - switch alg { - case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW: - return nil - } - kty = "oct" - case *rsa.PrivateKey, *rsa.PublicKey: - switch alg { - case RSA1_5, RSA_OAEP, RSA_OAEP_256: - return nil - } - kty = "RSA" - case *ecdsa.PrivateKey, *ecdsa.PublicKey: - switch alg { - case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: - return nil - } - kty = "EC" - case ed25519.PrivateKey, ed25519.PublicKey: - return errors.New("key Ed25519 cannot be used for encryption") - } - - return errors.Errorf("alg '%s' is not compatible with kty '%s'", jwk.Algorithm, kty) -} - -// validateGeneric validates just the supported key types. -func validateGeneric(jwk *JSONWebKey) error { - switch jwk.Key.(type) { - case []byte: - return nil - case *rsa.PrivateKey, *rsa.PublicKey: - return nil - case *ecdsa.PrivateKey, *ecdsa.PublicKey: - return nil - case ed25519.PrivateKey, ed25519.PublicKey: - return nil - } - - return errors.Errorf("unsupported key type '%T'", jwk.Key) -} diff --git a/vendor/go.step.sm/crypto/keyutil/key.go b/vendor/go.step.sm/crypto/keyutil/key.go deleted file mode 100644 index b5d01ec0..00000000 --- a/vendor/go.step.sm/crypto/keyutil/key.go +++ /dev/null @@ -1,214 +0,0 @@ -// Package keyutil implements utilities to generate cryptographic keys. -package keyutil - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "math/big" - - "github.com/pkg/errors" - "golang.org/x/crypto/ssh" -) - -var ( - // DefaultKeyType is the default type of a private key. - DefaultKeyType = "EC" - // DefaultKeySize is the default size (in # of bits) of a private key. - DefaultKeySize = 2048 - // DefaultKeyCurve is the default curve of a private key. - DefaultKeyCurve = "P-256" - // DefaultSignatureAlgorithm is the default signature algorithm used on a - // certificate with the default key type. - DefaultSignatureAlgorithm = x509.ECDSAWithSHA256 - // MinRSAKeyBytes is the minimum acceptable size (in bytes) for RSA keys - // signed by the authority. - MinRSAKeyBytes = 256 -) - -// PublicKey extracts a public key from a private key. -func PublicKey(priv interface{}) (crypto.PublicKey, error) { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &k.PublicKey, nil - case *ecdsa.PrivateKey: - return &k.PublicKey, nil - case ed25519.PrivateKey: - return k.Public(), nil - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: - return k, nil - default: - return nil, errors.Errorf("unrecognized key type: %T", priv) - } -} - -// GenerateDefaultKey generates a public/private key pair using sane defaults -// for key type, curve, and size. -func GenerateDefaultKey() (crypto.PrivateKey, error) { - return GenerateKey(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) -} - -// GenerateDefaultKeyPair generates a public/private key pair using configured -// default values for key type, curve, and size. -func GenerateDefaultKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { - return GenerateKeyPair(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) -} - -// GenerateKey generates a key of the given type (kty). -func GenerateKey(kty, crv string, size int) (crypto.PrivateKey, error) { - switch kty { - case "EC", "RSA", "OKP": - return GenerateSigner(kty, crv, size) - case "oct": - return generateOctKey(size) - default: - return nil, errors.Errorf("unrecognized key type: %s", kty) - } -} - -// GenerateKeyPair creates an asymmetric crypto keypair using input -// configuration. -func GenerateKeyPair(kty, crv string, size int) (crypto.PublicKey, crypto.PrivateKey, error) { - signer, err := GenerateSigner(kty, crv, size) - if err != nil { - return nil, nil, err - } - return signer.Public(), signer, nil -} - -// GenerateDefaultSigner returns an asymmetric crypto key that implements -// crypto.Signer using sane defaults. -func GenerateDefaultSigner() (crypto.Signer, error) { - return GenerateSigner(DefaultKeyType, DefaultKeyCurve, DefaultKeySize) -} - -// GenerateSigner creates an asymmetric crypto key that implements -// crypto.Signer. -func GenerateSigner(kty, crv string, size int) (crypto.Signer, error) { - switch kty { - case "EC": - return generateECKey(crv) - case "RSA": - return generateRSAKey(size) - case "OKP": - return generateOKPKey(crv) - default: - return nil, errors.Errorf("unrecognized key type: %s", kty) - } -} - -// ExtractKey returns the given public or private key or extracts the public key -// if a x509.Certificate or x509.CertificateRequest is given. -func ExtractKey(in interface{}) (interface{}, error) { - switch k := in.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey, *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: - return in, nil - case []byte: - return in, nil - case *x509.Certificate: - return k.PublicKey, nil - case *x509.CertificateRequest: - return k.PublicKey, nil - case ssh.CryptoPublicKey: - return k.CryptoPublicKey(), nil - case *ssh.Certificate: - return ExtractKey(k.Key) - default: - return nil, errors.Errorf("cannot extract the key from type '%T'", k) - } -} - -// VerifyPair that the public key matches the given private key. -func VerifyPair(pubkey crypto.PublicKey, key crypto.PrivateKey) error { - switch pub := pubkey.(type) { - case *rsa.PublicKey: - priv, ok := key.(*rsa.PrivateKey) - if !ok { - return errors.New("private key type does not match public key type") - } - if pub.N.Cmp(priv.N) != 0 { - return errors.New("private key does not match public key") - } - case *ecdsa.PublicKey: - priv, ok := key.(*ecdsa.PrivateKey) - if !ok { - return errors.New("private key type does not match public key type") - } - if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 { - return errors.New("private key does not match public key") - } - case ed25519.PublicKey: - priv, ok := key.(ed25519.PrivateKey) - if !ok { - return errors.New("private key type does not match public key type") - } - if !bytes.Equal(priv.Public().(ed25519.PublicKey), pub) { - return errors.New("private key does not match public key") - } - default: - return errors.Errorf("unsupported public key type %T", pub) - } - return nil -} - -func generateECKey(crv string) (crypto.Signer, error) { - var c elliptic.Curve - switch crv { - case "P-256": - c = elliptic.P256() - case "P-384": - c = elliptic.P384() - case "P-521": - c = elliptic.P521() - default: - return nil, errors.Errorf("invalid value for argument crv (crv: '%s')", crv) - } - - key, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, errors.Wrap(err, "error generating EC key") - } - - return key, nil -} - -func generateRSAKey(bits int) (crypto.Signer, error) { - key, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, errors.Wrap(err, "error generating RSA key") - } - - return key, nil -} - -func generateOKPKey(crv string) (crypto.Signer, error) { - switch crv { - case "Ed25519": - _, key, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - return nil, errors.Wrap(err, "error generating Ed25519 key") - } - return key, nil - default: - return nil, errors.Errorf("missing or invalid value for argument 'crv'. "+ - "expected 'Ed25519', but got '%s'", crv) - } -} - -func generateOctKey(size int) (interface{}, error) { - const chars = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - result := make([]byte, size) - for i := range result { - num, err := rand.Int(rand.Reader, big.NewInt(int64(len(chars)))) - if err != nil { - return nil, err - } - result[i] = chars[num.Int64()] - } - return result, nil -} diff --git a/vendor/go.step.sm/crypto/pemutil/pem.go b/vendor/go.step.sm/crypto/pemutil/pem.go deleted file mode 100644 index 84d57c20..00000000 --- a/vendor/go.step.sm/crypto/pemutil/pem.go +++ /dev/null @@ -1,590 +0,0 @@ -// Package pemutil implements utilities to parse keys and certificates. It also -// includes a method to serialize keys, X.509 certificates and certificate -// requests to PEM. -package pemutil - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "math/big" - "os" - - "github.com/pkg/errors" - "go.step.sm/crypto/internal/utils" - "go.step.sm/crypto/keyutil" - "golang.org/x/crypto/ssh" -) - -// DefaultEncCipher is the default algorithm used when encrypting sensitive -// data in the PEM format. -var DefaultEncCipher = x509.PEMCipherAES256 - -// PasswordPrompter defines the function signature for the PromptPassword -// callback. -type PasswordPrompter func(s string) ([]byte, error) - -// FileWriter defines the function signature for the WriteFile callback. -type FileWriter func(filename string, data []byte, perm os.FileMode) error - -// PromptPassword is a method used to prompt for a password to decode encrypted -// keys. If this method is not defined and the key or password are not passed, -// the parse of the key will fail. -var PromptPassword PasswordPrompter - -// WriteFile is a method used to write a file, by default it uses a wrapper over -// ioutil.WriteFile, but it can be set to a custom method, that for example can -// check if a file exists and prompts the user if it should be overwritten. -var WriteFile FileWriter = utils.WriteFile - -// context add options to the pem methods. -type context struct { - filename string - perm os.FileMode - password []byte - pkcs8 bool - openSSH bool - comment string - firstBlock bool - passwordPrompt string - passwordPrompter PasswordPrompter -} - -// newContext initializes the context with a filename. -func newContext(name string) *context { - return &context{ - filename: name, - perm: 0600, - } -} - -// apply the context options and return the first error if exists. -func (c *context) apply(opts []Options) error { - for _, fn := range opts { - if err := fn(c); err != nil { - return err - } - } - return nil -} - -// Options is the type to add attributes to the context. -type Options func(o *context) error - -// withContext replaces the context with the given one. -func withContext(c *context) Options { - return func(ctx *context) error { - *ctx = *c - return nil - } -} - -// WithFilename is a method that adds the given filename to the context. -func WithFilename(name string) Options { - return func(ctx *context) error { - ctx.filename = name - // Default perm mode if not set - if ctx.perm == 0 { - ctx.perm = 0600 - } - return nil - } -} - -// ToFile is a method that adds the given filename and permissions to the -// context. It is used in the Serialize to store PEM in disk. -func ToFile(name string, perm os.FileMode) Options { - return func(ctx *context) error { - ctx.filename = name - ctx.perm = perm - return nil - } -} - -// WithPassword is a method that adds the given password to the context. -func WithPassword(pass []byte) Options { - return func(ctx *context) error { - ctx.password = pass - return nil - } -} - -// WithPasswordFile is a method that adds the password in a file to the context. -func WithPasswordFile(filename string) Options { - return func(ctx *context) error { - b, err := utils.ReadPasswordFromFile(filename) - if err != nil { - return err - } - ctx.password = b - return nil - } -} - -// WithPasswordPrompt ask the user for a password and adds it to the context. -func WithPasswordPrompt(prompt string, fn PasswordPrompter) Options { - return func(ctx *context) error { - ctx.passwordPrompt = prompt - ctx.passwordPrompter = fn - return nil - } -} - -// WithPKCS8 with v set to true returns an option used in the Serialize method -// to use the PKCS#8 encoding form on the private keys. With v set to false -// default form will be used. -func WithPKCS8(v bool) Options { - return func(ctx *context) error { - ctx.pkcs8 = v - return nil - } -} - -// WithOpenSSH is an option used in the Serialize method to use OpenSSH encoding -// form on the private keys. With v set to false default form will be used. -func WithOpenSSH(v bool) Options { - return func(ctx *context) error { - ctx.openSSH = v - return nil - } -} - -// WithComment is an option used in the Serialize method to add a comment in the -// OpenSSH private keys. WithOpenSSH must be set to true too. -func WithComment(comment string) Options { - return func(ctx *context) error { - ctx.comment = comment - return nil - } -} - -// WithFirstBlock will avoid failing if a PEM contains more than one block or -// certificate and it will only look at the first. -func WithFirstBlock() Options { - return func(ctx *context) error { - ctx.firstBlock = true - return nil - } -} - -// ReadCertificate returns a *x509.Certificate from the given filename. It -// supports certificates formats PEM and DER. -func ReadCertificate(filename string, opts ...Options) (*x509.Certificate, error) { - b, err := utils.ReadFile(filename) - if err != nil { - return nil, err - } - - // PEM format - if bytes.HasPrefix(b, []byte("-----BEGIN ")) { - var crt interface{} - crt, err = Read(filename, opts...) - if err != nil { - return nil, err - } - switch crt := crt.(type) { - case *x509.Certificate: - return crt, nil - default: - return nil, errors.Errorf("error decoding PEM: file '%s' does not contain a certificate", filename) - } - } - - // DER format (binary) - crt, err := x509.ParseCertificate(b) - return crt, errors.Wrapf(err, "error parsing %s", filename) -} - -// ReadCertificateBundle returns a list of *x509.Certificate from the given -// filename. It supports certificates formats PEM and DER. If a DER-formatted -// file is given only one certificate will be returned. -func ReadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := utils.ReadFile(filename) - if err != nil { - return nil, err - } - - // PEM format - if bytes.HasPrefix(b, []byte("-----BEGIN ")) { - var block *pem.Block - var bundle []*x509.Certificate - for len(b) > 0 { - block, b = pem.Decode(b) - if block == nil { - break - } - if block.Type != "CERTIFICATE" { - return nil, errors.Errorf("error decoding PEM: file '%s' is not a certificate bundle", filename) - } - var crt *x509.Certificate - crt, err = x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, errors.Wrapf(err, "error parsing %s", filename) - } - bundle = append(bundle, crt) - } - if len(b) > 0 { - return nil, errors.Errorf("error decoding PEM: file '%s' contains unexpected data", filename) - } - return bundle, nil - } - - // DER format (binary) - crt, err := x509.ParseCertificate(b) - if err != nil { - return nil, errors.Wrapf(err, "error parsing %s", filename) - } - return []*x509.Certificate{crt}, nil -} - -// ReadCertificateRequest returns a *x509.CertificateRequest from the given -// filename. It supports certificates formats PEM and DER. -func ReadCertificateRequest(filename string) (*x509.CertificateRequest, error) { - b, err := utils.ReadFile(filename) - if err != nil { - return nil, err - } - - // PEM format - if bytes.HasPrefix(b, []byte("-----BEGIN ")) { - csr, err := Parse(b, WithFilename(filename)) - if err != nil { - return nil, err - } - switch csr := csr.(type) { - case *x509.CertificateRequest: - return csr, nil - default: - return nil, errors.Errorf("error decoding PEM: file '%s' does not contain a certificate request", filename) - } - } - - // DER format (binary) - csr, err := x509.ParseCertificateRequest(b) - return csr, errors.Wrapf(err, "error parsing %s", filename) -} - -// Parse returns the key or certificate PEM-encoded in the given bytes. -func Parse(b []byte, opts ...Options) (interface{}, error) { - // Populate options - ctx := newContext("PEM") - if err := ctx.apply(opts); err != nil { - return nil, err - } - - block, rest := pem.Decode(b) - switch { - case block == nil: - return nil, errors.Errorf("error decoding %s: not a valid PEM encoded block", ctx.filename) - case len(rest) > 0 && !ctx.firstBlock: - return nil, errors.Errorf("error decoding %s: contains more than one PEM endoded block", ctx.filename) - } - - // PEM is encrypted: ask for password - if block.Headers["Proc-Type"] == "4,ENCRYPTED" || block.Type == "ENCRYPTED PRIVATE KEY" { - var err error - var pass []byte - - if len(ctx.password) > 0 { - pass = ctx.password - } else if ctx.passwordPrompter != nil { - if pass, err = ctx.passwordPrompter(ctx.passwordPrompt); err != nil { - return nil, err - } - } else if PromptPassword != nil { - if pass, err = PromptPassword(fmt.Sprintf("Please enter the password to decrypt %s", ctx.filename)); err != nil { - return nil, err - } - } else { - return nil, errors.Errorf("error decoding %s: key is password protected", ctx.filename) - } - - block.Bytes, err = DecryptPEMBlock(block, pass) - if err != nil { - return nil, errors.Wrapf(err, "error decrypting %s", ctx.filename) - } - } - - switch block.Type { - case "PUBLIC KEY": - pub, err := x509.ParsePKIXPublicKey(block.Bytes) - return pub, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "RSA PRIVATE KEY": - priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "EC PRIVATE KEY": - priv, err := x509.ParseECPrivateKey(block.Bytes) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "PRIVATE KEY", "ENCRYPTED PRIVATE KEY": - priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "OPENSSH PRIVATE KEY": - priv, err := ParseOpenSSHPrivateKey(b, withContext(ctx)) - return priv, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "CERTIFICATE": - crt, err := x509.ParseCertificate(block.Bytes) - return crt, errors.Wrapf(err, "error parsing %s", ctx.filename) - case "CERTIFICATE REQUEST", "NEW CERTIFICATE REQUEST": - csr, err := x509.ParseCertificateRequest(block.Bytes) - return csr, errors.Wrapf(err, "error parsing %s", ctx.filename) - default: - return nil, errors.Errorf("error decoding %s: contains an unexpected header '%s'", ctx.filename, block.Type) - } -} - -// ParseKey returns the key or the public key of a certificate or certificate -// signing request in the given PEM-encoded bytes. -func ParseKey(b []byte, opts ...Options) (interface{}, error) { - k, err := Parse(b, opts...) - if err != nil { - return nil, err - } - return keyutil.ExtractKey(k) -} - -// Read returns the key or certificate encoded in the given PEM file. -// If the file is encrypted it will ask for a password and it will try -// to decrypt it. -// -// Supported keys algorithms are RSA and EC. Supported standards for private -// keys are PKCS#1, PKCS#8, RFC5915 for EC, and base64-encoded DER for -// certificates and public keys. -func Read(filename string, opts ...Options) (interface{}, error) { - b, err := utils.ReadFile(filename) - if err != nil { - return nil, err - } - - // force given filename - opts = append(opts, WithFilename(filename)) - return Parse(b, opts...) -} - -// Serialize will serialize the input to a PEM formatted block and apply -// modifiers. -func Serialize(in interface{}, opts ...Options) (*pem.Block, error) { - ctx := new(context) - if err := ctx.apply(opts); err != nil { - return nil, err - } - - var p *pem.Block - switch k := in.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey, ed25519.PublicKey: - b, err := x509.MarshalPKIXPublicKey(k) - if err != nil { - return nil, errors.WithStack(err) - } - p = &pem.Block{ - Type: "PUBLIC KEY", - Bytes: b, - } - case *rsa.PrivateKey: - switch { - case ctx.pkcs8: - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil, err - } - p = &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - case ctx.openSSH: - return SerializeOpenSSHPrivateKey(k, withContext(ctx)) - default: - p = &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(k), - } - } - case *ecdsa.PrivateKey: - switch { - case ctx.pkcs8: - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil, err - } - p = &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - case ctx.openSSH: - return SerializeOpenSSHPrivateKey(k, withContext(ctx)) - default: - b, err := x509.MarshalECPrivateKey(k) - if err != nil { - return nil, errors.Wrap(err, "failed to marshal private key") - } - p = &pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: b, - } - } - case ed25519.PrivateKey: - switch { - case !ctx.pkcs8 && ctx.openSSH: - return SerializeOpenSSHPrivateKey(k, withContext(ctx)) - default: // Ed25519 keys will use pkcs8 by default - ctx.pkcs8 = true - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil, err - } - p = &pem.Block{ - Type: "PRIVATE KEY", - Bytes: b, - } - } - case *x509.Certificate: - p = &pem.Block{ - Type: "CERTIFICATE", - Bytes: k.Raw, - } - case *x509.CertificateRequest: - p = &pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: k.Raw, - } - default: - return nil, errors.Errorf("cannot serialize type '%T', value '%v'", k, k) - } - - // Apply options on the PEM blocks. - if ctx.password != nil { - if _, ok := in.(crypto.PrivateKey); ok && ctx.pkcs8 { - var err error - p, err = EncryptPKCS8PrivateKey(rand.Reader, p.Bytes, ctx.password, DefaultEncCipher) - if err != nil { - return nil, err - } - } else { - var err error - // nolint:staticcheck - p, err = x509.EncryptPEMBlock(rand.Reader, p.Type, p.Bytes, ctx.password, DefaultEncCipher) - if err != nil { - return nil, errors.Wrap(err, "failed to serialize to PEM") - } - } - } - - if ctx.filename != "" { - if err := WriteFile(ctx.filename, pem.EncodeToMemory(p), ctx.perm); err != nil { - return nil, err - } - } - - return p, nil -} - -// ParseDER parses the given DER-encoded bytes and results the public or private -// key encoded. -func ParseDER(b []byte) (interface{}, error) { - // Try private keys - key, err := x509.ParsePKCS8PrivateKey(b) - if err != nil { - if key, err = x509.ParseECPrivateKey(b); err != nil { - key, err = x509.ParsePKCS1PrivateKey(b) - } - } - - // Try public key - if err != nil { - if key, err = x509.ParsePKIXPublicKey(b); err != nil { - if key, err = x509.ParsePKCS1PublicKey(b); err != nil { - return nil, errors.New("error decoding DER; bad format") - } - } - } - - return key, nil -} - -// ParseSSH parses parses a public key from an authorized_keys file used in -// OpenSSH according to the sshd(8) manual page. -func ParseSSH(b []byte) (interface{}, error) { - key, _, _, _, err := ssh.ParseAuthorizedKey(b) - if err != nil { - return nil, errors.Wrap(err, "error parsing OpenSSH key") - } - - if cert, ok := key.(*ssh.Certificate); ok { - key = cert.Key - } - - switch key.Type() { - case ssh.KeyAlgoRSA: - var w struct { - Name string - E *big.Int - N *big.Int - } - if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling key") - } - - if w.E.BitLen() > 24 { - return nil, errors.New("error unmarshaling key: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, errors.New("error unmarshaling key: incorrect exponent") - } - - key := new(rsa.PublicKey) - key.E = int(e) - key.N = w.N - return key, nil - - case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: - var w struct { - Name string - ID string - KeyBytes []byte - } - if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling key") - } - - key := new(ecdsa.PublicKey) - switch w.Name { - case ssh.KeyAlgoECDSA256: - key.Curve = elliptic.P256() - case ssh.KeyAlgoECDSA384: - key.Curve = elliptic.P384() - case ssh.KeyAlgoECDSA521: - key.Curve = elliptic.P521() - default: - return nil, errors.Errorf("unsupported ecdsa curve %s", w.Name) - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, errors.New("invalid ecdsa curve point") - } - return key, nil - - case ssh.KeyAlgoED25519: - var w struct { - Name string - KeyBytes []byte - } - if err := ssh.Unmarshal(key.Marshal(), &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling key") - } - return ed25519.PublicKey(w.KeyBytes), nil - - case ssh.KeyAlgoDSA: - return nil, errors.Errorf("step does not support DSA keys") - - default: - return nil, errors.Errorf("unsupported key type %T", key) - } -} diff --git a/vendor/go.step.sm/crypto/pemutil/pkcs8.go b/vendor/go.step.sm/crypto/pemutil/pkcs8.go deleted file mode 100644 index 9c78133f..00000000 --- a/vendor/go.step.sm/crypto/pemutil/pkcs8.go +++ /dev/null @@ -1,319 +0,0 @@ -package pemutil - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "hash" - "io" - - "github.com/pkg/errors" - "golang.org/x/crypto/pbkdf2" -) - -// PBKDF2SaltSize is the default size of the salt for PBKDF2, 128-bit salt. -const PBKDF2SaltSize = 16 - -// PBKDF2Iterations is the default number of iterations for PBKDF2, 100k -// iterations. Nist recommends at least 10k, 1Passsword uses 100k. -const PBKDF2Iterations = 100000 - -// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See -// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn -// and RFC 5208. -type pkcs8 struct { - Version int - Algo pkix.AlgorithmIdentifier - PrivateKey []byte - // optional attributes omitted. -} - -type publicKeyInfo struct { - Raw asn1.RawContent - Algo pkix.AlgorithmIdentifier - PublicKey asn1.BitString -} - -// Encrypted pkcs8 -// Based on https://github.com/youmark/pkcs8 -// MIT license -type prfParam struct { - Algo asn1.ObjectIdentifier - NullParam asn1.RawValue -} - -type pbkdf2Params struct { - Salt []byte - IterationCount int - PrfParam prfParam `asn1:"optional"` -} - -type pbkdf2Algorithms struct { - Algo asn1.ObjectIdentifier - PBKDF2Params pbkdf2Params -} - -type pbkdf2Encs struct { - EncryAlgo asn1.ObjectIdentifier - IV []byte -} - -type pbes2Params struct { - KeyDerivationFunc pbkdf2Algorithms - EncryptionScheme pbkdf2Encs -} - -type encryptedlAlgorithmIdentifier struct { - Algorithm asn1.ObjectIdentifier - Parameters pbes2Params -} - -type encryptedPrivateKeyInfo struct { - Algo encryptedlAlgorithmIdentifier - PrivateKey []byte -} - -var ( - // key derivation functions - oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} - oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} - oidHMACWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 2, 9} - - // encryption - oidAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} - oidAES196CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 22} - oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} - oidDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} - oidD3DESCBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} -) - -// rfc1423Algo holds a method for enciphering a PEM block. -type rfc1423Algo struct { - cipher x509.PEMCipher - name string - cipherFunc func(key []byte) (cipher.Block, error) - keySize int - blockSize int - identifier asn1.ObjectIdentifier -} - -// rfc1423Algos holds a slice of the possible ways to encrypt a PEM -// block. The ivSize numbers were taken from the OpenSSL source. -var rfc1423Algos = []rfc1423Algo{{ - cipher: x509.PEMCipherDES, - name: "DES-CBC", - cipherFunc: des.NewCipher, - keySize: 8, - blockSize: des.BlockSize, - identifier: oidDESCBC, -}, { - cipher: x509.PEMCipher3DES, - name: "DES-EDE3-CBC", - cipherFunc: des.NewTripleDESCipher, - keySize: 24, - blockSize: des.BlockSize, - identifier: oidD3DESCBC, -}, { - cipher: x509.PEMCipherAES128, - name: "AES-128-CBC", - cipherFunc: aes.NewCipher, - keySize: 16, - blockSize: aes.BlockSize, - identifier: oidAES128CBC, -}, { - cipher: x509.PEMCipherAES192, - name: "AES-192-CBC", - cipherFunc: aes.NewCipher, - keySize: 24, - blockSize: aes.BlockSize, - identifier: oidAES196CBC, -}, { - cipher: x509.PEMCipherAES256, - name: "AES-256-CBC", - cipherFunc: aes.NewCipher, - keySize: 32, - blockSize: aes.BlockSize, - identifier: oidAES256CBC, -}, -} - -func cipherByKey(key x509.PEMCipher) *rfc1423Algo { - for i := range rfc1423Algos { - alg := &rfc1423Algos[i] - if alg.cipher == key { - return alg - } - } - return nil -} - -// deriveKey uses a key derivation function to stretch the password into a key -// with the number of bits our cipher requires. This algorithm was derived from -// the OpenSSL source. -func (c rfc1423Algo) deriveKey(password, salt []byte, h func() hash.Hash) []byte { - return pbkdf2.Key(password, salt, PBKDF2Iterations, c.keySize, h) -} - -// DecryptPEMBlock takes a password encrypted PEM block and the password used -// to encrypt it and returns a slice of decrypted DER encoded bytes. -// -// If the PEM blocks has the Proc-Type header set to "4,ENCRYPTED" it uses -// x509.DecryptPEMBlock to decrypt the block. If not it tries to decrypt the -// block using AES-128-CBC, AES-192-CBC, AES-256-CBC, DES, or 3DES using the -// key derived using PBKDF2 over the given password. -func DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { - if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - // nolint:staticcheck - return x509.DecryptPEMBlock(block, password) - } - - // PKCS#8 header defined in RFC7468 section 11 - if block.Type == "ENCRYPTED PRIVATE KEY" { - return DecryptPKCS8PrivateKey(block.Bytes, password) - } - - return nil, errors.New("unsupported encrypted PEM") -} - -// DecryptPKCS8PrivateKey takes a password encrypted private key using the -// PKCS#8 encoding and returns the decrypted data in PKCS#8 form. -// -// It supports AES-128-CBC, AES-192-CBC, AES-256-CBC, DES, or 3DES encrypted -// data using the key derived with PBKDF2 over the given password. -func DecryptPKCS8PrivateKey(data, password []byte) ([]byte, error) { - var pki encryptedPrivateKeyInfo - if _, err := asn1.Unmarshal(data, &pki); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal private key") - } - - if !pki.Algo.Algorithm.Equal(oidPBES2) { - return nil, errors.New("unsupported encrypted PEM: only PBES2 is supported") - } - - if !pki.Algo.Parameters.KeyDerivationFunc.Algo.Equal(oidPKCS5PBKDF2) { - return nil, errors.New("unsupported encrypted PEM: only PBKDF2 is supported") - } - - encParam := pki.Algo.Parameters.EncryptionScheme - kdfParam := pki.Algo.Parameters.KeyDerivationFunc.PBKDF2Params - - iv := encParam.IV - salt := kdfParam.Salt - iter := kdfParam.IterationCount - - // pbkdf2 hash function - keyHash := sha1.New - if kdfParam.PrfParam.Algo.Equal(oidHMACWithSHA256) { - keyHash = sha256.New - } - - encryptedKey := pki.PrivateKey - var symkey []byte - var block cipher.Block - var err error - switch { - // AES-128-CBC, AES-192-CBC, AES-256-CBC - case encParam.EncryAlgo.Equal(oidAES128CBC): - symkey = pbkdf2.Key(password, salt, iter, 16, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidAES196CBC): - symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) - block, err = aes.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidAES256CBC): - symkey = pbkdf2.Key(password, salt, iter, 32, keyHash) - block, err = aes.NewCipher(symkey) - // DES, TripleDES - case encParam.EncryAlgo.Equal(oidDESCBC): - symkey = pbkdf2.Key(password, salt, iter, 8, keyHash) - block, err = des.NewCipher(symkey) - case encParam.EncryAlgo.Equal(oidD3DESCBC): - symkey = pbkdf2.Key(password, salt, iter, 24, keyHash) - block, err = des.NewTripleDESCipher(symkey) - default: - return nil, errors.Errorf("unsupported encrypted PEM: unknown algorithm %v", encParam.EncryAlgo) - } - if err != nil { - return nil, err - } - - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(encryptedKey, encryptedKey) - - return encryptedKey, nil -} - -// EncryptPKCS8PrivateKey returns a PEM block holding the given PKCS#8 encroded -// private key, encrypted with the specified algorithm and a PBKDF2 derived key -// from the given password. -func EncryptPKCS8PrivateKey(rand io.Reader, data, password []byte, alg x509.PEMCipher) (*pem.Block, error) { - ciph := cipherByKey(alg) - if ciph == nil { - return nil, errors.Errorf("failed to encrypt PEM: unknown algorithm %v", alg) - } - - salt := make([]byte, PBKDF2SaltSize) - if _, err := io.ReadFull(rand, salt); err != nil { - return nil, errors.Wrap(err, "failed to generate salt") - } - iv := make([]byte, ciph.blockSize) - if _, err := io.ReadFull(rand, iv); err != nil { - return nil, errors.Wrap(err, "failed to generate IV") - } - - key := ciph.deriveKey(password, salt, sha256.New) - block, err := ciph.cipherFunc(key) - if err != nil { - return nil, errors.Wrap(err, "failed to create cipher") - } - enc := cipher.NewCBCEncrypter(block, iv) - pad := ciph.blockSize - len(data)%ciph.blockSize - encrypted := make([]byte, len(data), len(data)+pad) - // We could save this copy by encrypting all the whole blocks in - // the data separately, but it doesn't seem worth the additional - // code. - copy(encrypted, data) - // See RFC 1423, section 1.1 - for i := 0; i < pad; i++ { - encrypted = append(encrypted, byte(pad)) - } - enc.CryptBlocks(encrypted, encrypted) - - // Build encrypted ans1 data - pki := encryptedPrivateKeyInfo{ - Algo: encryptedlAlgorithmIdentifier{ - Algorithm: oidPBES2, - Parameters: pbes2Params{ - KeyDerivationFunc: pbkdf2Algorithms{ - Algo: oidPKCS5PBKDF2, - PBKDF2Params: pbkdf2Params{ - Salt: salt, - IterationCount: PBKDF2Iterations, - PrfParam: prfParam{ - Algo: oidHMACWithSHA256, - }, - }, - }, - EncryptionScheme: pbkdf2Encs{ - EncryAlgo: ciph.identifier, - IV: iv, - }, - }, - }, - PrivateKey: encrypted, - } - - b, err := asn1.Marshal(pki) - if err != nil { - return nil, errors.Wrap(err, "error marshaling encrypted key") - } - return &pem.Block{ - Type: "ENCRYPTED PRIVATE KEY", - Bytes: b, - }, nil -} diff --git a/vendor/go.step.sm/crypto/pemutil/ssh.go b/vendor/go.step.sm/crypto/pemutil/ssh.go deleted file mode 100644 index 757a9a0d..00000000 --- a/vendor/go.step.sm/crypto/pemutil/ssh.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pemutil - -import ( - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "encoding/binary" - "encoding/pem" - "fmt" - "math/big" - - "github.com/pkg/errors" - "go.step.sm/crypto/internal/bcrypt_pbkdf" - "go.step.sm/crypto/randutil" - "golang.org/x/crypto/ssh" -) - -const ( - sshMagic = "openssh-key-v1\x00" - sshDefaultKdf = "bcrypt" - sshDefaultCiphername = "aes256-ctr" - sshDefaultKeyLength = 32 - sshDefaultSaltLength = 16 - sshDefaultRounds = 16 -) - -type openSSHPrivateKey struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte -} - -type openSSHPrivateKeyBlock struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` -} - -// ParseOpenSSHPrivateKey parses a private key in OpenSSH PEM format. -// -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -// -// This method is based on the implementation at -// https://github.com/golang/crypto/blob/master/ssh/keys.go -func ParseOpenSSHPrivateKey(pemBytes []byte, opts ...Options) (crypto.PrivateKey, error) { - // Populate options - ctx := newContext("PEM") - if err := ctx.apply(opts); err != nil { - return nil, err - } - - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.Errorf("error decoding %s: not a valid PEM encoded block", ctx.filename) - } - - if len(block.Bytes) < len(sshMagic) || string(block.Bytes[:len(sshMagic)]) != sshMagic { - return nil, errors.New("invalid openssh private key format") - } - remaining := block.Bytes[len(sshMagic):] - - var w openSSHPrivateKey - if err := ssh.Unmarshal(remaining, &w); err != nil { - return nil, errors.Wrap(err, "error unmarshaling private key") - } - - var err error - var key crypto.PrivateKey - if w.KdfName != "none" || w.CipherName != "none" { - var password []byte - if len(ctx.password) > 0 { - password = ctx.password - } else if PromptPassword != nil { - password, err = PromptPassword(fmt.Sprintf("Please enter the password to decrypt %s", ctx.filename)) - if err != nil { - return nil, err - } - } else { - return nil, errors.Errorf("error decoding %s: file is password protected", ctx.filename) - } - key, err = ssh.ParseRawPrivateKeyWithPassphrase(pemBytes, password) - if err != nil { - return nil, errors.Wrap(err, "error parsing private key") - } - } else { - key, err = ssh.ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, errors.Wrap(err, "error parsing private key") - } - } - - // Convert *ed25519.PrivateKey to ed25519.PrivateKey: - switch k := key.(type) { - case *ed25519.PrivateKey: - return *k, nil - default: - return k, nil - } -} - -// SerializeOpenSSHPrivateKey serialize a private key in the OpenSSH PEM format. -func SerializeOpenSSHPrivateKey(key crypto.PrivateKey, opts ...Options) (*pem.Block, error) { - ctx := new(context) - if err := ctx.apply(opts); err != nil { - return nil, err - } - - // Random check bytes. - var check uint32 - if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil { - return nil, errors.Wrap(err, "error generating random check ") - } - - w := openSSHPrivateKey{ - NumKeys: 1, - } - pk1 := openSSHPrivateKeyBlock{ - Check1: check, - Check2: check, - } - - var blockSize int - if ctx.password == nil { - w.CipherName = "none" - w.KdfName = "none" - blockSize = 8 - } else { - w.CipherName = sshDefaultCiphername - w.KdfName = sshDefaultKdf - blockSize = aes.BlockSize - } - - switch k := key.(type) { - case *rsa.PrivateKey: - E := new(big.Int).SetInt64(int64(k.PublicKey.E)) - // Marshal public key: - // E and N are in reversed order in the public and private key. - pubKey := struct { - KeyType string - E *big.Int - N *big.Int - }{ - ssh.KeyAlgoRSA, - E, k.PublicKey.N, - } - w.PubKey = ssh.Marshal(pubKey) - - // Marshal private key. - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - }{ - k.PublicKey.N, E, - k.D, k.Precomputed.Qinv, k.Primes[0], k.Primes[1], - ctx.comment, - } - pk1.Keytype = ssh.KeyAlgoRSA - pk1.Rest = ssh.Marshal(key) - case *ecdsa.PrivateKey: - var curve, keyType string - switch k.Curve.Params().Name { - case "P-256": - curve = "nistp256" - keyType = ssh.KeyAlgoECDSA256 - case "P-384": - curve = "nistp384" - keyType = ssh.KeyAlgoECDSA384 - case "P-521": - curve = "nistp521" - keyType = ssh.KeyAlgoECDSA521 - default: - return nil, errors.Errorf("error serializing key: unsupported curve %s", k.Curve.Params().Name) - } - - pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y) - - // Marshal public key. - pubKey := struct { - KeyType string - Curve string - Pub []byte - }{ - keyType, curve, pub, - } - w.PubKey = ssh.Marshal(pubKey) - - // Marshal private key. - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - }{ - curve, pub, k.D, - ctx.comment, - } - pk1.Keytype = keyType - pk1.Rest = ssh.Marshal(key) - case ed25519.PrivateKey: - pub := make([]byte, ed25519.PublicKeySize) - priv := make([]byte, ed25519.PrivateKeySize) - copy(pub, k[ed25519.PublicKeySize:]) - copy(priv, k) - - // Marshal public key. - pubKey := struct { - KeyType string - Pub []byte - }{ - ssh.KeyAlgoED25519, pub, - } - w.PubKey = ssh.Marshal(pubKey) - - // Marshal private key. - key := struct { - Pub []byte - Priv []byte - Comment string - }{ - pub, priv, - ctx.comment, - } - pk1.Keytype = ssh.KeyAlgoED25519 - pk1.Rest = ssh.Marshal(key) - default: - return nil, errors.Errorf("unsupported key type %T", k) - } - - w.PrivKeyBlock = ssh.Marshal(pk1) - - // Add padding until the private key block matches the block size, - // 16 with AES encryption, 8 without. - for i, l := 0, len(w.PrivKeyBlock); (l+i)%blockSize != 0; i++ { - w.PrivKeyBlock = append(w.PrivKeyBlock, byte(i+1)) - } - - if ctx.password != nil { - // Create encryption key derivation the password. - salt, err := randutil.Salt(sshDefaultSaltLength) - if err != nil { - return nil, err - } - kdfOpts := struct { - Salt []byte - Rounds uint32 - }{salt, sshDefaultRounds} - w.KdfOpts = string(ssh.Marshal(kdfOpts)) - - // Derive key to encrypt the private key block. - k, err := bcrypt_pbkdf.Key(ctx.password, salt, sshDefaultRounds, sshDefaultKeyLength+aes.BlockSize) - if err != nil { - return nil, errors.Wrap(err, "error deriving decryption key") - } - - // Encrypt the private key using the derived secret. - dst := make([]byte, len(w.PrivKeyBlock)) - iv := k[sshDefaultKeyLength : sshDefaultKeyLength+aes.BlockSize] - block, err := aes.NewCipher(k[:sshDefaultKeyLength]) - if err != nil { - return nil, errors.Wrap(err, "error creating cipher") - } - - stream := cipher.NewCTR(block, iv) - stream.XORKeyStream(dst, w.PrivKeyBlock) - w.PrivKeyBlock = dst - } - - b := ssh.Marshal(w) - block := &pem.Block{ - Type: "OPENSSH PRIVATE KEY", - Bytes: append([]byte(sshMagic), b...), - } - - if ctx.filename != "" { - if err := WriteFile(ctx.filename, pem.EncodeToMemory(block), ctx.perm); err != nil { - return nil, err - } - } - - return block, nil -} diff --git a/vendor/go.step.sm/crypto/randutil/random.go b/vendor/go.step.sm/crypto/randutil/random.go deleted file mode 100644 index 642e661a..00000000 --- a/vendor/go.step.sm/crypto/randutil/random.go +++ /dev/null @@ -1,103 +0,0 @@ -// Package randutil provides methods to generate random strings and salts. -package randutil - -import ( - "crypto/rand" - "encoding/hex" - "io" - "math/big" - - "github.com/pkg/errors" -) - -var ascii string - -func init() { - // initialize the charcters in ascii - aciiBytes := make([]byte, 94) - for i := range aciiBytes { - aciiBytes[i] = byte(i + 33) - } - ascii = string(aciiBytes) -} - -// Salt generates a new random salt of the given size. -func Salt(size int) ([]byte, error) { - salt := make([]byte, size) - _, err := io.ReadFull(rand.Reader, salt) - if err != nil { - return nil, errors.Wrap(err, "error generating salt") - } - return salt, nil -} - -// String returns a random string of a given length using the characters in -// the given string. It splits the string on runes to support UTF-8 -// characters. -func String(length int, chars string) (string, error) { - result := make([]rune, length) - runes := []rune(chars) - x := int64(len(runes)) - for i := range result { - num, err := rand.Int(rand.Reader, big.NewInt(x)) - if err != nil { - return "", errors.Wrap(err, "error creating random number") - } - result[i] = runes[num.Int64()] - } - return string(result), nil -} - -// Hex returns a random string of the given length using the hexadecimal -// characters in lower case (0-9+a-f). -func Hex(length int) (string, error) { - return String(length, "0123456789abcdef") -} - -// Alphanumeric returns a random string of the given length using the 62 -// alphanumeric characters in the POSIX/C locale (a-z+A-Z+0-9). -func Alphanumeric(length int) (string, error) { - return String(length, "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ") -} - -// ASCII returns a securely generated random ASCII string. It reads random -// numbers from crypto/rand and searches for printable characters. It will -// return an error if the system's secure random number generator fails to -// function correctly, in which case the caller must not continue. -func ASCII(length int) (string, error) { - return String(length, ascii) -} - -// Alphabet returns a random string of the given length using the 52 -// alphabetic characters in the POSIX/C locale (a-z+A-Z). -func Alphabet(length int) (string, error) { - return String(length, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") -} - -// UUIDv4 returns the string representation of a UUID version 4. Because 6 bits -// are used to indicate the version 4 and the variant 10, the randomly generated -// part has 122 bits. -func UUIDv4() (string, error) { - var uuid [16]byte - _, err := io.ReadFull(rand.Reader, uuid[:]) - if err != nil { - return "", errors.Wrap(err, "error generating uuid") - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return encodeUUID(uuid), nil -} - -func encodeUUID(uuid [16]byte) string { - buf := make([]byte, 36) - hex.Encode(buf, uuid[:4]) - buf[8] = '-' - hex.Encode(buf[9:13], uuid[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], uuid[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], uuid[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], uuid[10:]) - return string(buf) -} diff --git a/vendor/go.step.sm/crypto/sshutil/certificate.go b/vendor/go.step.sm/crypto/sshutil/certificate.go deleted file mode 100644 index f4d81a80..00000000 --- a/vendor/go.step.sm/crypto/sshutil/certificate.go +++ /dev/null @@ -1,107 +0,0 @@ -// Package sshutil implements utilities to build SSH certificates based on JSON -// templates. -package sshutil - -import ( - "crypto/rand" - "encoding/binary" - "encoding/json" - - "github.com/pkg/errors" - "go.step.sm/crypto/randutil" - "golang.org/x/crypto/ssh" -) - -// Certificate is the json representation of ssh.Certificate. -type Certificate struct { - Nonce []byte `json:"nonce"` - Key ssh.PublicKey `json:"-"` - Serial uint64 `json:"serial"` - Type CertType `json:"type"` - KeyID string `json:"keyId"` - Principals []string `json:"principals"` - ValidAfter uint64 `json:"-"` - ValidBefore uint64 `json:"-"` - CriticalOptions map[string]string `json:"criticalOptions"` - Extensions map[string]string `json:"extensions"` - Reserved []byte `json:"reserved"` - SignatureKey ssh.PublicKey `json:"-"` - Signature *ssh.Signature `json:"-"` -} - -// NewCertificate creates a new certificate with the given key after parsing a -// template given in the options. -func NewCertificate(cr CertificateRequest, opts ...Option) (*Certificate, error) { - o, err := new(Options).apply(cr, opts) - if err != nil { - return nil, err - } - - if o.CertBuffer == nil { - return nil, errors.New("certificate template cannot be empty") - } - - // With templates - var cert Certificate - if err := json.NewDecoder(o.CertBuffer).Decode(&cert); err != nil { - return nil, errors.Wrap(err, "error unmarshaling certificate") - } - - // Complete with public key - cert.Key = cr.Key - - return &cert, nil -} - -// GetCertificate return the ssh.Certificate representation of the certificate. -func (c *Certificate) GetCertificate() *ssh.Certificate { - return &ssh.Certificate{ - Nonce: c.Nonce, - Key: c.Key, - Serial: c.Serial, - CertType: uint32(c.Type), - KeyId: c.KeyID, - ValidPrincipals: c.Principals, - ValidAfter: c.ValidAfter, - ValidBefore: c.ValidBefore, - Permissions: ssh.Permissions{ - CriticalOptions: c.CriticalOptions, - Extensions: c.Extensions, - }, - Reserved: c.Reserved, - } -} - -// CreateCertificate signs the given certificate with the given signer. If the -// certificate does not have a nonce or a serial, it will create random ones. -func CreateCertificate(cert *ssh.Certificate, signer ssh.Signer) (*ssh.Certificate, error) { - if len(cert.Nonce) == 0 { - nonce, err := randutil.ASCII(32) - if err != nil { - return nil, err - } - cert.Nonce = []byte(nonce) - } - - if cert.Serial == 0 { - if err := binary.Read(rand.Reader, binary.BigEndian, &cert.Serial); err != nil { - return nil, errors.Wrap(err, "error reading random number") - } - } - - // Set signer public key. - cert.SignatureKey = signer.PublicKey() - - // Get bytes for signing trailing the signature length. - data := cert.Marshal() - data = data[:len(data)-4] - - // Sign the certificate. - sig, err := signer.Sign(rand.Reader, data) - if err != nil { - return nil, errors.Wrap(err, "error signing certificate") - } - cert.Signature = sig - - return cert, nil -} diff --git a/vendor/go.step.sm/crypto/sshutil/certificate_request.go b/vendor/go.step.sm/crypto/sshutil/certificate_request.go deleted file mode 100644 index 9f4645fe..00000000 --- a/vendor/go.step.sm/crypto/sshutil/certificate_request.go +++ /dev/null @@ -1,17 +0,0 @@ -package sshutil - -import "golang.org/x/crypto/ssh" - -// CertificateRequest simulates a certificate request for SSH. SSH does not have -// a concept of certificate requests, but the CA accepts the key and some other -// parameters in the requests that are part of the certificate. This struct will -// hold these parameters. -// -// CertificateRequest object will be used in the templates to set parameters -// passed with the API instead of the validated ones. -type CertificateRequest struct { - Key ssh.PublicKey - Type string - KeyID string - Principals []string -} diff --git a/vendor/go.step.sm/crypto/sshutil/options.go b/vendor/go.step.sm/crypto/sshutil/options.go deleted file mode 100644 index 9bcdbc5b..00000000 --- a/vendor/go.step.sm/crypto/sshutil/options.go +++ /dev/null @@ -1,94 +0,0 @@ -package sshutil - -import ( - "bytes" - "encoding/base64" - "io/ioutil" - "text/template" - - "github.com/Masterminds/sprig/v3" - "github.com/pkg/errors" - "go.step.sm/crypto/internal/step" -) - -// getFuncMap returns the list of functions provided by sprig. It changes the -// function "fail" to set the given string, this way we can report template -// errors directly to the template without having the wrapper that text/template -// adds. -func getFuncMap(failMessage *string) template.FuncMap { - m := sprig.TxtFuncMap() - m["fail"] = func(msg string) (string, error) { - *failMessage = msg - return "", errors.New(msg) - } - return m -} - -// Options are the options that can be passed to NewCertificate. -type Options struct { - CertBuffer *bytes.Buffer -} - -func (o *Options) apply(cr CertificateRequest, opts []Option) (*Options, error) { - for _, fn := range opts { - if err := fn(cr, o); err != nil { - return o, err - } - } - return o, nil -} - -// Option is the type used as a variadic argument in NewCertificate. -type Option func(cr CertificateRequest, o *Options) error - -// WithTemplate is an options that executes the given template text with the -// given data. -func WithTemplate(text string, data TemplateData) Option { - return func(cr CertificateRequest, o *Options) error { - terr := new(TemplateError) - funcMap := getFuncMap(&terr.Message) - - tmpl, err := template.New("template").Funcs(funcMap).Parse(text) - if err != nil { - return errors.Wrapf(err, "error parsing template") - } - - buf := new(bytes.Buffer) - data.SetCertificateRequest(cr) - if err := tmpl.Execute(buf, data); err != nil { - if terr.Message != "" { - return terr - } - return errors.Wrapf(err, "error executing template") - } - o.CertBuffer = buf - return nil - } -} - -// WithTemplateBase64 is an options that executes the given template base64 -// string with the given data. -func WithTemplateBase64(s string, data TemplateData) Option { - return func(cr CertificateRequest, o *Options) error { - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return errors.Wrap(err, "error decoding template") - } - fn := WithTemplate(string(b), data) - return fn(cr, o) - } -} - -// WithTemplateFile is an options that reads the template file and executes it -// with the given data. -func WithTemplateFile(path string, data TemplateData) Option { - return func(cr CertificateRequest, o *Options) error { - filename := step.Abs(path) - b, err := ioutil.ReadFile(filename) - if err != nil { - return errors.Wrapf(err, "error reading %s", path) - } - fn := WithTemplate(string(b), data) - return fn(cr, o) - } -} diff --git a/vendor/go.step.sm/crypto/sshutil/templates.go b/vendor/go.step.sm/crypto/sshutil/templates.go deleted file mode 100644 index 84a05ec4..00000000 --- a/vendor/go.step.sm/crypto/sshutil/templates.go +++ /dev/null @@ -1,195 +0,0 @@ -package sshutil - -// Variables used to hold template data. -const ( - TypeKey = "Type" - KeyIDKey = "KeyID" - PrincipalsKey = "Principals" - ExtensionsKey = "Extensions" - CriticalOptionsKey = "CriticalOptions" - TokenKey = "Token" - InsecureKey = "Insecure" - UserKey = "User" - CertificateRequestKey = "CR" -) - -// TemplateError represents an error in a template produced by the fail -// function. -type TemplateError struct { - Message string -} - -// Error implements the error interface and returns the error string when a -// template executes the `fail "message"` function. -func (e *TemplateError) Error() string { - return e.Message -} - -// TemplateData is an alias for map[string]interface{}. It represents the data -// passed to the templates. -type TemplateData map[string]interface{} - -// CreateTemplateData returns a TemplateData with the given certificate type, -// key id, principals, and the default extensions. -func CreateTemplateData(ct CertType, keyID string, principals []string) TemplateData { - return TemplateData{ - TypeKey: ct.String(), - KeyIDKey: keyID, - PrincipalsKey: principals, - ExtensionsKey: DefaultExtensions(ct), - } -} - -// DefaultExtensions returns the default extensions set in an SSH certificate. -func DefaultExtensions(ct CertType) map[string]interface{} { - switch ct { - case UserCert: - return map[string]interface{}{ - "permit-X11-forwarding": "", - "permit-agent-forwarding": "", - "permit-port-forwarding": "", - "permit-pty": "", - "permit-user-rc": "", - } - default: - return nil - } -} - -// NewTemplateData creates a new map for templates data. -func NewTemplateData() TemplateData { - return TemplateData{} -} - -// AddExtension adds one extension to the templates data. -func (t TemplateData) AddExtension(key, value string) { - if m, ok := t[ExtensionsKey].(map[string]interface{}); ok { - m[key] = value - } else { - t[ExtensionsKey] = map[string]interface{}{ - key: value, - } - } -} - -// AddCriticalOption adds one critical option to the templates data. -func (t TemplateData) AddCriticalOption(key, value string) { - if m, ok := t[CriticalOptionsKey].(map[string]interface{}); ok { - m[key] = value - } else { - t[CriticalOptionsKey] = map[string]interface{}{ - key: value, - } - } -} - -// Set sets a key-value pair in the template data. -func (t TemplateData) Set(key string, v interface{}) { - t[key] = v -} - -// SetInsecure sets a key-value pair in the insecure template data. -func (t TemplateData) SetInsecure(key string, v interface{}) { - if m, ok := t[InsecureKey].(TemplateData); ok { - m[key] = v - } else { - t[InsecureKey] = TemplateData{key: v} - } -} - -// SetType sets the certificate type in the template data. -func (t TemplateData) SetType(typ CertType) { - t.Set(TypeKey, typ.String()) -} - -// SetKeyID sets the certificate key id in the template data. -func (t TemplateData) SetKeyID(id string) { - t.Set(KeyIDKey, id) -} - -// SetPrincipals sets the certificate principals in the template data. -func (t TemplateData) SetPrincipals(p []string) { - t.Set(PrincipalsKey, p) -} - -// SetExtensions sets the certificate extensions in the template data. -func (t TemplateData) SetExtensions(e map[string]interface{}) { - t.Set(ExtensionsKey, e) -} - -// SetCriticalOptions sets the certificate critical options in the template -// data. -func (t TemplateData) SetCriticalOptions(o map[string]interface{}) { - t.Set(CriticalOptionsKey, o) -} - -// SetToken sets the given token in the template data. -func (t TemplateData) SetToken(v interface{}) { - t.Set(TokenKey, v) -} - -// SetUserData sets the given user provided object in the insecure template -// data. -func (t TemplateData) SetUserData(v interface{}) { - t.SetInsecure(UserKey, v) -} - -// SetCertificateRequest sets the simulated ssh certificate request the insecure -// template data. -func (t TemplateData) SetCertificateRequest(cr CertificateRequest) { - t.SetInsecure(CertificateRequestKey, cr) -} - -// DefaultTemplate is the default template for an SSH certificate. -const DefaultTemplate = `{ - "type": {{ toJson .Type }}, - "keyId": {{ toJson .KeyID }}, - "principals": {{ toJson .Principals }}, - "extensions": {{ toJson .Extensions }}, - "criticalOptions": {{ toJson .CriticalOptions }} -}` - -// DefaultAdminTemplate is the template used by an admin user in a OIDC -// provisioner. -const DefaultAdminTemplate = `{ - "type": {{ toJson .Insecure.CR.Type }}, - "keyId": {{ toJson .Insecure.CR.KeyID }}, - "principals": {{ toJson .Insecure.CR.Principals }} -{{- if eq .Insecure.CR.Type "user" }} - , "extensions": {{ toJson .Extensions }}, - "criticalOptions": {{ toJson .CriticalOptions }} -{{- end }} -}` - -// DefaultIIDTemplate is the default template for IID provisioners. By default -// certificate type will be set always to host, key id to the instance id. -// Principals will be only enforced by the provisioner if disableCustomSANs is -// set to true. -const DefaultIIDTemplate = `{ - "type": {{ toJson .Type }}, - "keyId": {{ toJson .KeyID }}, -{{- if .Insecure.CR.Principals }} - "principals": {{ toJson .Insecure.CR.Principals }}, -{{- else }} - "principals": {{ toJson .Principals }}, -{{- end }} - "extensions": {{ toJson .Extensions }} -}` - -// CertificateRequestTemplate is the template used for provisioners that accepts -// any certificate request. The provisioner must validate that type, keyId and -// principals are passed in the request. -const CertificateRequestTemplate = `{ - "type": {{ toJson .Insecure.CR.Type }}, - "keyId": {{ toJson .Insecure.CR.KeyID }}, - "principals": {{ toJson .Insecure.CR.Principals }} -{{- if eq .Insecure.CR.Type "user" }} - , "extensions": { - "permit-X11-forwarding": "", - "permit-agent-forwarding": "", - "permit-port-forwarding": "", - "permit-pty": "", - "permit-user-rc": "" - } -{{- end }} -}` diff --git a/vendor/go.step.sm/crypto/sshutil/types.go b/vendor/go.step.sm/crypto/sshutil/types.go deleted file mode 100644 index 271a070b..00000000 --- a/vendor/go.step.sm/crypto/sshutil/types.go +++ /dev/null @@ -1,74 +0,0 @@ -package sshutil - -import ( - "encoding/json" - "strings" - - "github.com/pkg/errors" - "golang.org/x/crypto/ssh" -) - -// CertType defines the certificate type, it can be a user or a host -// certificate. -type CertType uint32 - -const ( - // UserCert defines a user certificate. - UserCert CertType = ssh.UserCert - - // HostCert defines a host certificate. - HostCert CertType = ssh.HostCert -) - -const ( - userString = "user" - hostString = "host" -) - -// CertTypeFromString returns the CertType for the string "user" and "host". -func CertTypeFromString(s string) (CertType, error) { - switch strings.ToLower(s) { - case userString: - return UserCert, nil - case hostString: - return HostCert, nil - default: - return 0, errors.Errorf("unknown certificate type '%s'", s) - } -} - -// String returns "user" for user certificates and "host" for host certificates. -// It will return the empty string for any other value. -func (c CertType) String() string { - switch c { - case UserCert: - return userString - case HostCert: - return hostString - default: - return "" - } -} - -// MarshalJSON implements the json.Marshaler interface for CertType. UserCert -// will be marshaled as the string "user" and HostCert as "host". -func (c CertType) MarshalJSON() ([]byte, error) { - if s := c.String(); s != "" { - return []byte(`"` + s + `"`), nil - } - return nil, errors.Errorf("unknown certificate type %d", c) -} - -// UnmarshalJSON implements the json.Unmarshaler interface for CertType. -func (c *CertType) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return errors.Wrap(err, "error unmarshaling certificate type") - } - certType, err := CertTypeFromString(s) - if err != nil { - return errors.Errorf("error unmarshaling '%s' as a certificate type", s) - } - *c = certType - return nil -} diff --git a/vendor/go.step.sm/crypto/x509util/algorithms.go b/vendor/go.step.sm/crypto/x509util/algorithms.go deleted file mode 100644 index 59267ba8..00000000 --- a/vendor/go.step.sm/crypto/x509util/algorithms.go +++ /dev/null @@ -1,85 +0,0 @@ -package x509util - -import ( - "crypto/x509" - "strings" - - "github.com/pkg/errors" -) - -// List of signature algorithms. -const ( - MD2WithRSA = "MD2-RSA" - MD5WithRSA = "MD5-RSA" - SHA1WithRSA = "SHA1-RSA" - SHA256WithRSA = "SHA256-RSA" - SHA384WithRSA = "SHA384-RSA" - SHA512WithRSA = "SHA512-RSA" - DSAWithSHA1 = "DSA-SHA1" - DSAWithSHA256 = "DSA-SHA256" - ECDSAWithSHA1 = "ECDSA-SHA1" - ECDSAWithSHA256 = "ECDSA-SHA256" - ECDSAWithSHA384 = "ECDSA-SHA384" - ECDSAWithSHA512 = "ECDSA-SHA512" - SHA256WithRSAPSS = "SHA256-RSAPSS" - SHA384WithRSAPSS = "SHA384-RSAPSS" - SHA512WithRSAPSS = "SHA512-RSAPSS" - PureEd25519 = "Ed25519" -) - -var signatureAlgorithmMapping = []struct { - name string - value x509.SignatureAlgorithm -}{ - {"", x509.UnknownSignatureAlgorithm}, - {MD2WithRSA, x509.MD2WithRSA}, - {MD5WithRSA, x509.MD5WithRSA}, - {SHA1WithRSA, x509.SHA1WithRSA}, - {SHA256WithRSA, x509.SHA256WithRSA}, - {SHA384WithRSA, x509.SHA384WithRSA}, - {SHA512WithRSA, x509.SHA512WithRSA}, - {DSAWithSHA1, x509.DSAWithSHA1}, - {DSAWithSHA256, x509.DSAWithSHA256}, - {ECDSAWithSHA1, x509.ECDSAWithSHA1}, - {ECDSAWithSHA256, x509.ECDSAWithSHA256}, - {ECDSAWithSHA384, x509.ECDSAWithSHA384}, - {ECDSAWithSHA512, x509.ECDSAWithSHA512}, - {SHA256WithRSAPSS, x509.SHA256WithRSAPSS}, - {SHA384WithRSAPSS, x509.SHA384WithRSAPSS}, - {SHA512WithRSAPSS, x509.SHA512WithRSAPSS}, - {PureEd25519, x509.PureEd25519}, -} - -// SignatureAlgorithm is the JSON representation of the X509 signature algorithms -type SignatureAlgorithm x509.SignatureAlgorithm - -// Set sets the signature algorithm in the given certificate. -func (s SignatureAlgorithm) Set(c *x509.Certificate) { - c.SignatureAlgorithm = x509.SignatureAlgorithm(s) -} - -// MarshalJSON implements the json.Marshaller interface. -func (s SignatureAlgorithm) MarshalJSON() ([]byte, error) { - if s == SignatureAlgorithm(x509.UnknownSignatureAlgorithm) { - return []byte(`""`), nil - } - return []byte(`"` + x509.SignatureAlgorithm(s).String() + `"`), nil -} - -// UnmarshalJSON implements the json.Unmarshal interface and unmarshals and -// validates a string as a SignatureAlgorithm. -func (s *SignatureAlgorithm) UnmarshalJSON(data []byte) error { - name, err := unmarshalString(data) - if err != nil { - return err - } - - for _, m := range signatureAlgorithmMapping { - if strings.EqualFold(name, m.name) { - *s = SignatureAlgorithm(m.value) - return nil - } - } - - return errors.Errorf("unsupported signatureAlgorithm %s", name) -} diff --git a/vendor/go.step.sm/crypto/x509util/certificate.go b/vendor/go.step.sm/crypto/x509util/certificate.go deleted file mode 100644 index 555d0b63..00000000 --- a/vendor/go.step.sm/crypto/x509util/certificate.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package x509util implements utilities to build X.509 certificates based on -// JSON templates. -package x509util - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "encoding/json" - - "github.com/pkg/errors" -) - -// Certificate is the JSON representation of a X.509 certificate. It is used to -// build a certificate from a template. -type Certificate struct { - Version int `json:"version"` - Subject Subject `json:"subject"` - Issuer Issuer `json:"issuer"` - SerialNumber SerialNumber `json:"serialNumber"` - DNSNames MultiString `json:"dnsNames"` - EmailAddresses MultiString `json:"emailAddresses"` - IPAddresses MultiIP `json:"ipAddresses"` - URIs MultiURL `json:"uris"` - SANs []SubjectAlternativeName `json:"sans"` - Extensions []Extension `json:"extensions"` - KeyUsage KeyUsage `json:"keyUsage"` - ExtKeyUsage ExtKeyUsage `json:"extKeyUsage"` - UnknownExtKeyUsage UnknownExtKeyUsage `json:"unknownExtKeyUsage"` - SubjectKeyID SubjectKeyID `json:"subjectKeyId"` - AuthorityKeyID AuthorityKeyID `json:"authorityKeyId"` - OCSPServer OCSPServer `json:"ocspServer"` - IssuingCertificateURL IssuingCertificateURL `json:"issuingCertificateURL"` - CRLDistributionPoints CRLDistributionPoints `json:"crlDistributionPoints"` - PolicyIdentifiers PolicyIdentifiers `json:"policyIdentifiers"` - BasicConstraints *BasicConstraints `json:"basicConstraints"` - NameConstraints *NameConstraints `json:"nameConstraints"` - SignatureAlgorithm SignatureAlgorithm `json:"signatureAlgorithm"` - PublicKeyAlgorithm x509.PublicKeyAlgorithm `json:"-"` - PublicKey interface{} `json:"-"` -} - -// NewCertificate creates a new Certificate from an x509.Certificate request and -// some template options. -func NewCertificate(cr *x509.CertificateRequest, opts ...Option) (*Certificate, error) { - if err := cr.CheckSignature(); err != nil { - return nil, errors.Wrap(err, "error validating certificate request") - } - - o, err := new(Options).apply(cr, opts) - if err != nil { - return nil, err - } - - // If no template use only the certificate request with the default leaf key - // usages. - if o.CertBuffer == nil { - return newCertificateRequest(cr).GetLeafCertificate(), nil - } - - // With templates - var cert Certificate - if err := json.NewDecoder(o.CertBuffer).Decode(&cert); err != nil { - return nil, errors.Wrap(err, "error unmarshaling certificate") - } - - // Complete with certificate request - cert.PublicKey = cr.PublicKey - cert.PublicKeyAlgorithm = cr.PublicKeyAlgorithm - - return &cert, nil -} - -// GetCertificate returns the x509.Certificate representation of the -// certificate. -func (c *Certificate) GetCertificate() *x509.Certificate { - cert := new(x509.Certificate) - // Unparsed data - cert.PublicKey = c.PublicKey - cert.PublicKeyAlgorithm = c.PublicKeyAlgorithm - - // SANs are directly converted. - cert.DNSNames = c.DNSNames - cert.EmailAddresses = c.EmailAddresses - cert.IPAddresses = c.IPAddresses - cert.URIs = c.URIs - - // SANs slice. - for _, san := range c.SANs { - san.Set(cert) - } - - // Subject. - c.Subject.Set(cert) - - // Defined extensions. - c.KeyUsage.Set(cert) - c.ExtKeyUsage.Set(cert) - c.UnknownExtKeyUsage.Set(cert) - c.SubjectKeyID.Set(cert) - c.AuthorityKeyID.Set(cert) - c.OCSPServer.Set(cert) - c.IssuingCertificateURL.Set(cert) - c.CRLDistributionPoints.Set(cert) - c.PolicyIdentifiers.Set(cert) - if c.BasicConstraints != nil { - c.BasicConstraints.Set(cert) - } - if c.NameConstraints != nil { - c.NameConstraints.Set(cert) - } - - // Custom Extensions. - for _, e := range c.Extensions { - e.Set(cert) - } - - // Others. - c.SerialNumber.Set(cert) - c.SignatureAlgorithm.Set(cert) - - return cert -} - -// CreateCertificate signs the given template using the parent private key and -// returns it. -func CreateCertificate(template, parent *x509.Certificate, pub crypto.PublicKey, signer crypto.Signer) (*x509.Certificate, error) { - var err error - // Complete certificate. - if template.SerialNumber == nil { - if template.SerialNumber, err = generateSerialNumber(); err != nil { - return nil, err - } - } - if template.SubjectKeyId == nil { - if template.SubjectKeyId, err = generateSubjectKeyID(pub); err != nil { - return nil, err - } - } - - // Sign certificate - asn1Data, err := x509.CreateCertificate(rand.Reader, template, parent, pub, signer) - if err != nil { - return nil, errors.Wrap(err, "error creating certificate") - } - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - return nil, errors.Wrap(err, "error parsing certificate") - } - return cert, nil -} - -// CreateCertificateTemplate creates a X.509 certificate template from the given certificate request. -func CreateCertificateTemplate(cr *x509.CertificateRequest) (*x509.Certificate, error) { - if err := cr.CheckSignature(); err != nil { - return nil, errors.Wrap(err, "error validating certificate request") - } - // Set SubjectAltName extension as critical if Subject is empty. - fixSubjectAltName(cr) - - return &x509.Certificate{ - Subject: cr.Subject, - DNSNames: cr.DNSNames, - EmailAddresses: cr.EmailAddresses, - IPAddresses: cr.IPAddresses, - URIs: cr.URIs, - ExtraExtensions: cr.Extensions, - PublicKey: cr.PublicKey, - PublicKeyAlgorithm: cr.PublicKeyAlgorithm, - SignatureAlgorithm: 0, - }, nil -} diff --git a/vendor/go.step.sm/crypto/x509util/certificate_request.go b/vendor/go.step.sm/crypto/x509util/certificate_request.go deleted file mode 100644 index 4c58ebaa..00000000 --- a/vendor/go.step.sm/crypto/x509util/certificate_request.go +++ /dev/null @@ -1,187 +0,0 @@ -package x509util - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/json" - - "github.com/pkg/errors" -) - -var emptyASN1Subject = []byte{0x30, 0} -var oidExtensionSubjectAltName = []int{2, 5, 29, 17} - -// CertificateRequest is the JSON representation of an X.509 certificate. It is -// used to build a certificate request from a template. -type CertificateRequest struct { - Version int `json:"version"` - Subject Subject `json:"subject"` - DNSNames MultiString `json:"dnsNames"` - EmailAddresses MultiString `json:"emailAddresses"` - IPAddresses MultiIP `json:"ipAddresses"` - URIs MultiURL `json:"uris"` - SANs []SubjectAlternativeName `json:"sans"` - Extensions []Extension `json:"extensions"` - SignatureAlgorithm SignatureAlgorithm `json:"signatureAlgorithm"` - PublicKey interface{} `json:"-"` - PublicKeyAlgorithm x509.PublicKeyAlgorithm `json:"-"` - Signature []byte `json:"-"` - Signer crypto.Signer `json:"-"` -} - -// NewCertificateRequest creates a certificate request from a template. -func NewCertificateRequest(signer crypto.Signer, opts ...Option) (*CertificateRequest, error) { - pub := signer.Public() - o, err := new(Options).apply(&x509.CertificateRequest{ - PublicKey: pub, - }, opts) - if err != nil { - return nil, err - } - - // If no template use only the certificate request with the default leaf key - // usages. - if o.CertBuffer == nil { - return &CertificateRequest{ - PublicKey: pub, - Signer: signer, - }, nil - } - - // With templates - var cr CertificateRequest - if err := json.NewDecoder(o.CertBuffer).Decode(&cr); err != nil { - return nil, errors.Wrap(err, "error unmarshaling certificate") - } - cr.PublicKey = pub - cr.Signer = signer - return &cr, nil -} - -// newCertificateRequest is an internal method that creates a CertificateRequest -// from an x509.CertificateRequest. -// -// This method is used to create the template variable .Insecure.CR or to -// initialize the Certificate when no templates are used. newCertificateRequest -// will always ignore the SignatureAlgorithm because we cannot guarantee that -// the signer will be able to sign a certificate template if -// Certificate.SignatureAlgorithm is set. -func newCertificateRequest(cr *x509.CertificateRequest) *CertificateRequest { - // Set SubjectAltName extension as critical if Subject is empty. - fixSubjectAltName(cr) - return &CertificateRequest{ - Version: cr.Version, - Subject: newSubject(cr.Subject), - DNSNames: cr.DNSNames, - EmailAddresses: cr.EmailAddresses, - IPAddresses: cr.IPAddresses, - URIs: cr.URIs, - Extensions: newExtensions(cr.Extensions), - PublicKey: cr.PublicKey, - PublicKeyAlgorithm: cr.PublicKeyAlgorithm, - Signature: cr.Signature, - // Do not enforce signature algorithm from the CSR, it might not - // be compatible with the certificate signer. - SignatureAlgorithm: 0, - } -} - -// GetCertificateRequest returns the equivalent x509.CertificateRequest. -func (c *CertificateRequest) GetCertificateRequest() (*x509.CertificateRequest, error) { - cert := c.GetCertificate().GetCertificate() - asn1Data, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ - Subject: cert.Subject, - DNSNames: cert.DNSNames, - IPAddresses: cert.IPAddresses, - EmailAddresses: cert.EmailAddresses, - URIs: cert.URIs, - ExtraExtensions: cert.ExtraExtensions, - SignatureAlgorithm: x509.SignatureAlgorithm(c.SignatureAlgorithm), - }, c.Signer) - if err != nil { - return nil, errors.Wrap(err, "error creating certificate request") - } - // This should not fail - return x509.ParseCertificateRequest(asn1Data) -} - -// GetCertificate returns the Certificate representation of the -// CertificateRequest. -// -// GetCertificate will not specify a SignatureAlgorithm, it's not possible to -// guarantee that the certificate signer can sign with the CertificateRequest -// SignatureAlgorithm. -func (c *CertificateRequest) GetCertificate() *Certificate { - return &Certificate{ - Subject: c.Subject, - DNSNames: c.DNSNames, - EmailAddresses: c.EmailAddresses, - IPAddresses: c.IPAddresses, - URIs: c.URIs, - SANs: c.SANs, - Extensions: c.Extensions, - PublicKey: c.PublicKey, - PublicKeyAlgorithm: c.PublicKeyAlgorithm, - SignatureAlgorithm: 0, - } -} - -// GetLeafCertificate returns the Certificate representation of the -// CertificateRequest, including KeyUsage and ExtKeyUsage extensions. -// -// GetLeafCertificate will not specify a SignatureAlgorithm, it's not possible -// to guarantee that the certificate signer can sign with the CertificateRequest -// SignatureAlgorithm. -func (c *CertificateRequest) GetLeafCertificate() *Certificate { - keyUsage := x509.KeyUsageDigitalSignature - if _, ok := c.PublicKey.(*rsa.PublicKey); ok { - keyUsage |= x509.KeyUsageKeyEncipherment - } - - cert := c.GetCertificate() - cert.KeyUsage = KeyUsage(keyUsage) - cert.ExtKeyUsage = ExtKeyUsage([]x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }) - return cert -} - -// CreateCertificateRequest creates a simple X.509 certificate request with the -// given common name and sans. -func CreateCertificateRequest(commonName string, sans []string, signer crypto.Signer) (*x509.CertificateRequest, error) { - dnsNames, ips, emails, uris := SplitSANs(sans) - asn1Data, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: commonName, - }, - DNSNames: dnsNames, - IPAddresses: ips, - EmailAddresses: emails, - URIs: uris, - }, signer) - if err != nil { - return nil, errors.Wrap(err, "error creating certificate request") - } - // This should not fail - return x509.ParseCertificateRequest(asn1Data) -} - -// fixSubjectAltName makes sure to mark the SAN extension to critical if the -// subject is empty. -func fixSubjectAltName(cr *x509.CertificateRequest) { - if asn1Subject, err := asn1.Marshal(cr.Subject.ToRDNSequence()); err == nil { - if bytes.Equal(asn1Subject, emptyASN1Subject) { - for i, ext := range cr.Extensions { - if ext.Id.Equal(oidExtensionSubjectAltName) { - cr.Extensions[i].Critical = true - } - } - } - } -} diff --git a/vendor/go.step.sm/crypto/x509util/certpool.go b/vendor/go.step.sm/crypto/x509util/certpool.go deleted file mode 100644 index ac83e406..00000000 --- a/vendor/go.step.sm/crypto/x509util/certpool.go +++ /dev/null @@ -1,54 +0,0 @@ -package x509util - -import ( - "crypto/x509" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" -) - -// ReadCertPool loads a certificate pool from disk. The given path can be a -// file, a directory, or a comma-separated list of files. -func ReadCertPool(path string) (*x509.CertPool, error) { - info, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { - return nil, errors.Wrap(err, "error reading cert pool") - } - - var ( - files []string - pool = x509.NewCertPool() - ) - if info != nil && info.IsDir() { - finfos, err := ioutil.ReadDir(path) - if err != nil { - return nil, errors.Wrap(err, "error reading cert pool") - } - for _, finfo := range finfos { - files = append(files, filepath.Join(path, finfo.Name())) - } - } else { - files = strings.Split(path, ",") - for i := range files { - files[i] = strings.TrimSpace(files[i]) - } - } - - var found bool - for _, f := range files { - bytes, err := ioutil.ReadFile(f) - if err != nil { - return nil, errors.Wrap(err, "error reading cert pool") - } - if ok := pool.AppendCertsFromPEM(bytes); ok { - found = true - } - } - if !found { - return nil, errors.New("error reading cert pool: not certificates found") - } - return pool, nil -} diff --git a/vendor/go.step.sm/crypto/x509util/extensions.go b/vendor/go.step.sm/crypto/x509util/extensions.go deleted file mode 100644 index b0d6f5b7..00000000 --- a/vendor/go.step.sm/crypto/x509util/extensions.go +++ /dev/null @@ -1,502 +0,0 @@ -package x509util - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/json" - "fmt" - "math/big" - "net" - "net/url" - "strings" - - "github.com/pkg/errors" -) - -func convertName(s string) string { - return strings.ReplaceAll(strings.ToLower(s), "_", "") -} - -// Names used for key usages. -var ( - KeyUsageDigitalSignature = convertName("DigitalSignature") - KeyUsageContentCommitment = convertName("ContentCommitment") - KeyUsageKeyEncipherment = convertName("KeyEncipherment") - KeyUsageDataEncipherment = convertName("DataEncipherment") - KeyUsageKeyAgreement = convertName("KeyAgreement") - KeyUsageCertSign = convertName("CertSign") - KeyUsageCRLSign = convertName("CRLSign") - KeyUsageEncipherOnly = convertName("EncipherOnly") - KeyUsageDecipherOnly = convertName("DecipherOnly") -) - -// Names used for extended key usages. -var ( - ExtKeyUsageAny = convertName("Any") - ExtKeyUsageServerAuth = convertName("ServerAuth") - ExtKeyUsageClientAuth = convertName("ClientAuth") - ExtKeyUsageCodeSigning = convertName("CodeSigning") - ExtKeyUsageEmailProtection = convertName("EmailProtection") - ExtKeyUsageIPSECEndSystem = convertName("IPSECEndSystem") - ExtKeyUsageIPSECTunnel = convertName("IPSECTunnel") - ExtKeyUsageIPSECUser = convertName("IPSECUser") - ExtKeyUsageTimeStamping = convertName("TimeStamping") - ExtKeyUsageOCSPSigning = convertName("OCSPSigning") - ExtKeyUsageMicrosoftServerGatedCrypto = convertName("MicrosoftServerGatedCrypto") - ExtKeyUsageNetscapeServerGatedCrypto = convertName("NetscapeServerGatedCrypto") - ExtKeyUsageMicrosoftCommercialCodeSigning = convertName("MicrosoftCommercialCodeSigning") - ExtKeyUsageMicrosoftKernelCodeSigning = convertName("MicrosoftKernelCodeSigning") -) - -// Names used and SubjectAlternativeNames types. -const ( - AutoType = "auto" - DNSType = "dns" - EmailType = "email" - IPType = "ip" - URIType = "uri" -) - -// Extension is the JSON representation of a raw X.509 extensions. -type Extension struct { - ID ObjectIdentifier `json:"id"` - Critical bool `json:"critical"` - Value []byte `json:"value"` -} - -// newExtension creates an Extension from a standard pkix.Extension. -func newExtension(e pkix.Extension) Extension { - return Extension{ - ID: ObjectIdentifier(e.Id), - Critical: e.Critical, - Value: e.Value, - } -} - -// newExtensions creates a slice of Extension from a slice of pkix.Exntesion. -func newExtensions(extensions []pkix.Extension) []Extension { - if extensions == nil { - return nil - } - ret := make([]Extension, len(extensions)) - for i, e := range extensions { - ret[i] = newExtension(e) - } - return ret - -} - -// Set adds the extension to the given X509 certificate. -func (e Extension) Set(c *x509.Certificate) { - c.ExtraExtensions = append(c.ExtraExtensions, pkix.Extension{ - Id: asn1.ObjectIdentifier(e.ID), - Critical: e.Critical, - Value: e.Value, - }) -} - -// ObjectIdentifier represents a JSON strings that unmarshals into an ASN1 -// object identifier or OID. -type ObjectIdentifier asn1.ObjectIdentifier - -// MarshalJSON implements the json.Marshaler interface and returns the string -// version of the asn1.ObjectIdentifier. -func (o ObjectIdentifier) MarshalJSON() ([]byte, error) { - return json.Marshal(asn1.ObjectIdentifier(o).String()) -} - -// UnmarshalJSON implements the json.Unmarshaler interface and coverts a strings -// like "2.5.29.17" into an ASN1 object identifier. -func (o *ObjectIdentifier) UnmarshalJSON(data []byte) error { - s, err := unmarshalString(data) - if err != nil { - return err - } - - oid, err := parseObjectIdentifier(s) - if err != nil { - return err - } - *o = ObjectIdentifier(oid) - return nil -} - -// SubjectAlternativeName represents a X.509 subject alternative name. Types -// supported are "dns", "email", "ip", "uri". A special type "auto" or "" can be -// used to try to guess the type of the value. -type SubjectAlternativeName struct { - Type string `json:"type"` - Value string `json:"value"` -} - -// Set sets the subject alternative name in the given x509.Certificate. -func (s SubjectAlternativeName) Set(c *x509.Certificate) { - switch strings.ToLower(s.Type) { - case DNSType: - c.DNSNames = append(c.DNSNames, s.Value) - case EmailType: - c.EmailAddresses = append(c.EmailAddresses, s.Value) - case IPType: - // The validation of the IP would happen in the unmarshaling, but just - // to be sure we are only adding valid IPs. - if ip := net.ParseIP(s.Value); ip != nil { - c.IPAddresses = append(c.IPAddresses, ip) - } - case URIType: - if u, err := url.Parse(s.Value); err == nil { - c.URIs = append(c.URIs, u) - } - case "", AutoType: - dnsNames, ips, emails, uris := SplitSANs([]string{s.Value}) - c.DNSNames = append(c.DNSNames, dnsNames...) - c.IPAddresses = append(c.IPAddresses, ips...) - c.EmailAddresses = append(c.EmailAddresses, emails...) - c.URIs = append(c.URIs, uris...) - default: - panic(fmt.Sprintf("unsupported subject alternative name type %s", s.Type)) - } -} - -// KeyUsage type represents the JSON array used to represent the key usages of a -// X509 certificate. -type KeyUsage x509.KeyUsage - -// Set sets the key usage to the given certificate. -func (k KeyUsage) Set(c *x509.Certificate) { - c.KeyUsage = x509.KeyUsage(k) -} - -// UnmarshalJSON implements the json.Unmarshaler interface and coverts a string -// or a list of strings into a key usage. -func (k *KeyUsage) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - - *k = 0 - - for _, s := range ms { - var ku x509.KeyUsage - switch convertName(s) { - case KeyUsageDigitalSignature: - ku = x509.KeyUsageDigitalSignature - case KeyUsageContentCommitment: - ku = x509.KeyUsageContentCommitment - case KeyUsageKeyEncipherment: - ku = x509.KeyUsageKeyEncipherment - case KeyUsageDataEncipherment: - ku = x509.KeyUsageDataEncipherment - case KeyUsageKeyAgreement: - ku = x509.KeyUsageKeyAgreement - case KeyUsageCertSign: - ku = x509.KeyUsageCertSign - case KeyUsageCRLSign: - ku = x509.KeyUsageCRLSign - case KeyUsageEncipherOnly: - ku = x509.KeyUsageEncipherOnly - case KeyUsageDecipherOnly: - ku = x509.KeyUsageDecipherOnly - default: - return errors.Errorf("unsupported keyUsage %s", s) - } - *k |= KeyUsage(ku) - } - - return nil -} - -// ExtKeyUsage represents a JSON array of extended key usages. -type ExtKeyUsage []x509.ExtKeyUsage - -// Set sets the extended key usages in the given certificate. -func (k ExtKeyUsage) Set(c *x509.Certificate) { - c.ExtKeyUsage = []x509.ExtKeyUsage(k) -} - -// UnmarshalJSON implements the json.Unmarshaler interface and coverts a string -// or a list of strings into a list of extended key usages. -func (k *ExtKeyUsage) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - - eku := make([]x509.ExtKeyUsage, len(ms)) - for i, s := range ms { - var ku x509.ExtKeyUsage - switch convertName(s) { - case ExtKeyUsageAny: - ku = x509.ExtKeyUsageAny - case ExtKeyUsageServerAuth: - ku = x509.ExtKeyUsageServerAuth - case ExtKeyUsageClientAuth: - ku = x509.ExtKeyUsageClientAuth - case ExtKeyUsageCodeSigning: - ku = x509.ExtKeyUsageCodeSigning - case ExtKeyUsageEmailProtection: - ku = x509.ExtKeyUsageEmailProtection - case ExtKeyUsageIPSECEndSystem: - ku = x509.ExtKeyUsageIPSECEndSystem - case ExtKeyUsageIPSECTunnel: - ku = x509.ExtKeyUsageIPSECTunnel - case ExtKeyUsageIPSECUser: - ku = x509.ExtKeyUsageIPSECUser - case ExtKeyUsageTimeStamping: - ku = x509.ExtKeyUsageTimeStamping - case ExtKeyUsageOCSPSigning: - ku = x509.ExtKeyUsageOCSPSigning - case ExtKeyUsageMicrosoftServerGatedCrypto: - ku = x509.ExtKeyUsageMicrosoftServerGatedCrypto - case ExtKeyUsageNetscapeServerGatedCrypto: - ku = x509.ExtKeyUsageNetscapeServerGatedCrypto - case ExtKeyUsageMicrosoftCommercialCodeSigning: - ku = x509.ExtKeyUsageMicrosoftCommercialCodeSigning - case ExtKeyUsageMicrosoftKernelCodeSigning: - ku = x509.ExtKeyUsageMicrosoftKernelCodeSigning - default: - return errors.Errorf("unsupported extKeyUsage %s", s) - } - eku[i] = ku - } - - *k = ExtKeyUsage(eku) - return nil -} - -// UnknownExtKeyUsage represents the list of OIDs of extended key usages unknown -// to crypto/x509. -type UnknownExtKeyUsage MultiObjectIdentifier - -// MarshalJSON implements the json.Marshaler interface in UnknownExtKeyUsage. -func (u UnknownExtKeyUsage) MarshalJSON() ([]byte, error) { - return MultiObjectIdentifier(u).MarshalJSON() -} - -// UnmarshalJSON implements the json.Unmarshaler interface in UnknownExtKeyUsage. -func (u *UnknownExtKeyUsage) UnmarshalJSON(data []byte) error { - var v MultiObjectIdentifier - if err := json.Unmarshal(data, &v); err != nil { - return errors.Wrap(err, "error unmarshaling json") - } - *u = UnknownExtKeyUsage(v) - return nil -} - -// Set sets the policy identifiers to the given certificate. -func (u UnknownExtKeyUsage) Set(c *x509.Certificate) { - c.UnknownExtKeyUsage = u -} - -// SubjectKeyID represents the binary value of the subject key identifier -// extension, this should be the SHA-1 hash of the public key. In JSON this -// value should be a base64-encoded string, and in most cases it should not be -// set because it will be automatically generated. -type SubjectKeyID []byte - -// Set sets the subject key identifier to the given certificate. -func (id SubjectKeyID) Set(c *x509.Certificate) { - c.SubjectKeyId = id -} - -// AuthorityKeyID represents the binary value of the authority key identifier -// extension. It should be the subject key identifier of the parent certificate. -// In JSON this value should be a base64-encoded string, and in most cases it -// should not be set, as it will be automatically provided. -type AuthorityKeyID []byte - -// Set sets the authority key identifier to the given certificate. -func (id AuthorityKeyID) Set(c *x509.Certificate) { - c.AuthorityKeyId = id -} - -// OCSPServer contains the list of OSCP servers that will be encoded in the -// authority information access extension. -type OCSPServer MultiString - -// UnmarshalJSON implements the json.Unmarshaler interface in OCSPServer. -func (o *OCSPServer) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - *o = ms - return nil -} - -// Set sets the list of OSCP servers to the given certificate. -func (o OCSPServer) Set(c *x509.Certificate) { - c.OCSPServer = o -} - -// IssuingCertificateURL contains the list of the issuing certificate url that -// will be encoded in the authority information access extension. -type IssuingCertificateURL MultiString - -// UnmarshalJSON implements the json.Unmarshaler interface in IssuingCertificateURL. -func (u *IssuingCertificateURL) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - *u = ms - return nil -} - -// Set sets the list of issuing certificate urls to the given certificate. -func (u IssuingCertificateURL) Set(c *x509.Certificate) { - c.IssuingCertificateURL = u -} - -// CRLDistributionPoints contains the list of CRL distribution points that will -// be encoded in the CRL distribution points extension. -type CRLDistributionPoints MultiString - -// UnmarshalJSON implements the json.Unmarshaler interface in CRLDistributionPoints. -func (u *CRLDistributionPoints) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - *u = ms - return nil -} - -// Set sets the CRL distribution points to the given certificate. -func (u CRLDistributionPoints) Set(c *x509.Certificate) { - c.CRLDistributionPoints = u -} - -// PolicyIdentifiers represents the list of OIDs to set in the certificate -// policies extension. -type PolicyIdentifiers MultiObjectIdentifier - -// MarshalJSON implements the json.Marshaler interface in PolicyIdentifiers. -func (p PolicyIdentifiers) MarshalJSON() ([]byte, error) { - return MultiObjectIdentifier(p).MarshalJSON() -} - -// UnmarshalJSON implements the json.Unmarshaler interface in PolicyIdentifiers. -func (p *PolicyIdentifiers) UnmarshalJSON(data []byte) error { - var v MultiObjectIdentifier - if err := json.Unmarshal(data, &v); err != nil { - return errors.Wrap(err, "error unmarshaling json") - } - *p = PolicyIdentifiers(v) - return nil -} - -// Set sets the policy identifiers to the given certificate. -func (p PolicyIdentifiers) Set(c *x509.Certificate) { - c.PolicyIdentifiers = p -} - -// BasicConstraints represents the X509 basic constraints extension and defines -// if a certificate is a CA and then maximum depth of valid certification paths -// that include the certificate. A MaxPathLen of zero indicates that no non- -// self-issued intermediate CA certificates may follow in a valid certification -// path. To do not impose a limit the MaxPathLen should be set to -1. -type BasicConstraints struct { - IsCA bool `json:"isCA"` - MaxPathLen int `json:"maxPathLen"` -} - -// Set sets the basic constraints to the given certificate. -func (b BasicConstraints) Set(c *x509.Certificate) { - c.BasicConstraintsValid = true - c.IsCA = b.IsCA - if c.IsCA { - switch { - case b.MaxPathLen == 0: - c.MaxPathLen = 0 - c.MaxPathLenZero = true - case b.MaxPathLen < 0: - c.MaxPathLen = -1 - c.MaxPathLenZero = false - default: - c.MaxPathLen = b.MaxPathLen - c.MaxPathLenZero = false - } - } else { - c.MaxPathLen = 0 - c.MaxPathLenZero = false - } -} - -// NameConstraints represents the X509 Name constraints extension and defines a -// names space within which all subject names in subsequent certificates in a -// certificate path must be located. The name constraints extension must be used -// only in a CA. -type NameConstraints struct { - Critical bool `json:"critical"` - PermittedDNSDomains MultiString `json:"permittedDNSDomains"` - ExcludedDNSDomains MultiString `json:"excludedDNSDomains"` - PermittedIPRanges MultiIPNet `json:"permittedIPRanges"` - ExcludedIPRanges MultiIPNet `json:"excludedIPRanges"` - PermittedEmailAddresses MultiString `json:"permittedEmailAddresses"` - ExcludedEmailAddresses MultiString `json:"excludedEmailAddresses"` - PermittedURIDomains MultiString `json:"permittedURIDomains"` - ExcludedURIDomains MultiString `json:"excludedURIDomains"` -} - -// Set sets the name constraints in the given certificate. -func (n NameConstraints) Set(c *x509.Certificate) { - c.PermittedDNSDomainsCritical = n.Critical - c.PermittedDNSDomains = n.PermittedDNSDomains - c.ExcludedDNSDomains = n.ExcludedDNSDomains - c.PermittedIPRanges = n.PermittedIPRanges - c.ExcludedIPRanges = n.ExcludedIPRanges - c.PermittedEmailAddresses = n.PermittedEmailAddresses - c.ExcludedEmailAddresses = n.ExcludedEmailAddresses - c.PermittedURIDomains = n.PermittedURIDomains - c.ExcludedURIDomains = n.ExcludedURIDomains -} - -// SerialNumber is the JSON representation of the X509 serial number. -type SerialNumber struct { - *big.Int -} - -// Set sets the serial number in the given certificate. -func (s SerialNumber) Set(c *x509.Certificate) { - c.SerialNumber = s.Int -} - -// MarshalJSON implements the json.Marshaler interface, and encodes a -// SerialNumber using the big.Int marshaler. -func (s *SerialNumber) MarshalJSON() ([]byte, error) { - if s == nil || s.Int == nil { - return []byte(`null`), nil - } - return s.Int.MarshalJSON() -} - -// UnmarshalJSON implements the json.Unmarshal interface and unmarshals an -// integer or a string into a serial number. If a string is used, a prefix of -// “0b†or “0B†selects base 2, “0â€, “0o†or “0O†selects base 8, and “0x†or -// “0X†selects base 16. Otherwise, the selected base is 10 and no prefix is -// accepted. -func (s *SerialNumber) UnmarshalJSON(data []byte) error { - if sn, ok := maybeString(data); ok { - // Using base 0 to accept prefixes 0b, 0o, 0x but defaults as base 10. - b, ok := new(big.Int).SetString(sn, 0) - if !ok { - return errors.Errorf("error unmarshaling json: serialNumber %s is not valid", sn) - } - *s = SerialNumber{ - Int: b, - } - return nil - } - - // Assume a number. - var i int64 - if err := json.Unmarshal(data, &i); err != nil { - return errors.Wrap(err, "error unmarshaling json") - } - *s = SerialNumber{ - Int: new(big.Int).SetInt64(i), - } - return nil -} diff --git a/vendor/go.step.sm/crypto/x509util/marshal_utils.go b/vendor/go.step.sm/crypto/x509util/marshal_utils.go deleted file mode 100644 index 4408e41b..00000000 --- a/vendor/go.step.sm/crypto/x509util/marshal_utils.go +++ /dev/null @@ -1,212 +0,0 @@ -package x509util - -import ( - "encoding/asn1" - "encoding/json" - "net" - "net/url" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -// MultiString is a type used to unmarshal a JSON string or an array of strings -// into a []string. -type MultiString []string - -// UnmarshalJSON implements the json.Unmarshaler interface for MultiString. -func (m *MultiString) UnmarshalJSON(data []byte) error { - if s, ok := maybeString(data); ok { - *m = MultiString([]string{s}) - return nil - } - - var v []string - if err := json.Unmarshal(data, &v); err != nil { - return errors.Wrap(err, "error unmarshaling json") - } - *m = MultiString(v) - return nil -} - -// MultiIP is a type used to unmarshal a JSON string or an array of strings into -// a []net.IP. -type MultiIP []net.IP - -// UnmarshalJSON implements the json.Unmarshaler interface for MultiIP. -func (m *MultiIP) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - if ms != nil { - ips := make([]net.IP, len(ms)) - for i, s := range ms { - ip := net.ParseIP(s) - if ip == nil { - return errors.Errorf("error unmarshaling json: ip %s is not valid", s) - } - ips[i] = ip - } - - *m = MultiIP(ips) - } - return nil -} - -// MultiIPNet is a type used to unmarshal a JSON string or an array of strings -// into a []*net.IPNet. -type MultiIPNet []*net.IPNet - -// MarshalJSON implements the json.Marshaler interface for MultiIPNet. -func (m MultiIPNet) MarshalJSON() ([]byte, error) { - if m == nil { - return []byte("null"), nil - } - ipNets := make([]string, len(m)) - for i, v := range m { - ipNets[i] = v.String() - } - return json.Marshal(ipNets) -} - -// UnmarshalJSON implements the json.Unmarshaler interface for MultiIPNet. -func (m *MultiIPNet) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - if ms != nil { - ipNets := make([]*net.IPNet, len(ms)) - for i, s := range ms { - _, ipNet, err := net.ParseCIDR(s) - if err != nil { - return errors.Wrap(err, "error unmarshaling json") - } - ipNets[i] = ipNet - } - - *m = MultiIPNet(ipNets) - } - return nil -} - -// MultiURL is a type used to unmarshal a JSON string or an array of strings -// into a []*url.URL. -type MultiURL []*url.URL - -// MarshalJSON implements the json.Marshaler interface for MultiURL. -func (m MultiURL) MarshalJSON() ([]byte, error) { - if m == nil { - return []byte("null"), nil - } - urls := make([]string, len(m)) - for i, u := range m { - urls[i] = u.String() - } - return json.Marshal(urls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface for MultiURL. -func (m *MultiURL) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - if ms != nil { - urls := make([]*url.URL, len(ms)) - for i, s := range ms { - u, err := url.Parse(s) - if err != nil { - return errors.Wrap(err, "error unmarshaling json") - } - urls[i] = u - } - - *m = MultiURL(urls) - } - return nil -} - -// MultiObjectIdentifier is a type used to unmarshal a JSON string or an array -// of strings into a []asn1.ObjectIdentifier. -type MultiObjectIdentifier []asn1.ObjectIdentifier - -// MarshalJSON implements the json.Marshaler interface for MultiObjectIdentifier. -func (m MultiObjectIdentifier) MarshalJSON() ([]byte, error) { - if m == nil { - return []byte("null"), nil - } - oids := make([]string, len(m)) - for i, u := range m { - oids[i] = u.String() - } - return json.Marshal(oids) -} - -// UnmarshalJSON implements the json.Unmarshaler interface for -// MultiObjectIdentifier. -func (m *MultiObjectIdentifier) UnmarshalJSON(data []byte) error { - ms, err := unmarshalMultiString(data) - if err != nil { - return err - } - if ms != nil { - oids := make([]asn1.ObjectIdentifier, len(ms)) - for i, s := range ms { - oid, err := parseObjectIdentifier(s) - if err != nil { - return err - } - oids[i] = oid - } - - *m = MultiObjectIdentifier(oids) - } - return nil -} - -func maybeString(data []byte) (string, bool) { - if len(data) > 0 && data[0] == '"' { - var v string - if err := json.Unmarshal(data, &v); err == nil { - return v, true - } - } - return "", false -} - -func unmarshalString(data []byte) (string, error) { - var v string - if err := json.Unmarshal(data, &v); err != nil { - return v, errors.Wrap(err, "error unmarshaling json") - } - return v, nil -} - -func unmarshalMultiString(data []byte) ([]string, error) { - var v MultiString - if err := json.Unmarshal(data, &v); err != nil { - return nil, errors.Wrap(err, "error unmarshaling json") - } - return []string(v), nil -} - -func parseObjectIdentifier(oid string) (asn1.ObjectIdentifier, error) { - if oid == "" { - return asn1.ObjectIdentifier{}, nil - } - - parts := strings.Split(oid, ".") - oids := make([]int, len(parts)) - - for i, s := range parts { - n, err := strconv.Atoi(s) - if err != nil { - return asn1.ObjectIdentifier{}, errors.Errorf("error unmarshaling json: %s is not an ASN1 object identifier", oid) - } - oids[i] = n - } - return asn1.ObjectIdentifier(oids), nil -} diff --git a/vendor/go.step.sm/crypto/x509util/name.go b/vendor/go.step.sm/crypto/x509util/name.go deleted file mode 100644 index f290de63..00000000 --- a/vendor/go.step.sm/crypto/x509util/name.go +++ /dev/null @@ -1,127 +0,0 @@ -package x509util - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/json" - - "github.com/pkg/errors" -) - -// Name is the JSON representation of X.501 type Name, used in the X.509 subject -// and issuer fields. -type Name struct { - Country MultiString `json:"country,omitempty"` - Organization MultiString `json:"organization,omitempty"` - OrganizationalUnit MultiString `json:"organizationalUnit,omitempty"` - Locality MultiString `json:"locality,omitempty"` - Province MultiString `json:"province,omitempty"` - StreetAddress MultiString `json:"streetAddress,omitempty"` - PostalCode MultiString `json:"postalCode,omitempty"` - SerialNumber string `json:"serialNumber,omitempty"` - CommonName string `json:"commonName,omitempty"` -} - -// UnmarshalJSON implements the json.Unmarshal interface and unmarshals a JSON -// object in the Name struct or a string as just the subject common name. -func (n *Name) UnmarshalJSON(data []byte) error { - if cn, ok := maybeString(data); ok { - n.CommonName = cn - return nil - } - - type nameAlias Name - var nn nameAlias - if err := json.Unmarshal(data, &nn); err != nil { - return errors.Wrap(err, "error unmarshaling json") - } - *n = Name(nn) - return nil -} - -// Subject is the JSON representation of the X.509 subject field. -type Subject Name - -func newSubject(n pkix.Name) Subject { - return Subject{ - Country: n.Country, - Organization: n.Organization, - OrganizationalUnit: n.OrganizationalUnit, - Locality: n.Locality, - Province: n.Province, - StreetAddress: n.StreetAddress, - PostalCode: n.PostalCode, - SerialNumber: n.SerialNumber, - CommonName: n.CommonName, - } -} - -// UnmarshalJSON implements the json.Unmarshal interface and unmarshals a JSON -// object in the Subject struct or a string as just the subject common name. -func (s *Subject) UnmarshalJSON(data []byte) error { - var name Name - if err := name.UnmarshalJSON(data); err != nil { - return err - } - *s = Subject(name) - return nil -} - -// Set sets the subject in the given certificate. -func (s Subject) Set(c *x509.Certificate) { - c.Subject = pkix.Name{ - Country: s.Country, - Organization: s.Organization, - OrganizationalUnit: s.OrganizationalUnit, - Locality: s.Locality, - Province: s.Province, - StreetAddress: s.StreetAddress, - PostalCode: s.PostalCode, - SerialNumber: s.SerialNumber, - CommonName: s.CommonName, - } -} - -// Issuer is the JSON representation of the X.509 issuer field. -type Issuer Name - -// nolint:unused -func newIssuer(n pkix.Name) Issuer { - return Issuer{ - Country: n.Country, - Organization: n.Organization, - OrganizationalUnit: n.OrganizationalUnit, - Locality: n.Locality, - Province: n.Province, - StreetAddress: n.StreetAddress, - PostalCode: n.PostalCode, - SerialNumber: n.SerialNumber, - CommonName: n.CommonName, - } -} - -// UnmarshalJSON implements the json.Unmarshal interface and unmarshals a JSON -// object in the Issuer struct or a string as just the subject common name. -func (i *Issuer) UnmarshalJSON(data []byte) error { - var name Name - if err := name.UnmarshalJSON(data); err != nil { - return err - } - *i = Issuer(name) - return nil -} - -// Set sets the issuer in the given certificate. -func (i Issuer) Set(c *x509.Certificate) { - c.Issuer = pkix.Name{ - Country: i.Country, - Organization: i.Organization, - OrganizationalUnit: i.OrganizationalUnit, - Locality: i.Locality, - Province: i.Province, - StreetAddress: i.StreetAddress, - PostalCode: i.PostalCode, - SerialNumber: i.SerialNumber, - CommonName: i.CommonName, - } -} diff --git a/vendor/go.step.sm/crypto/x509util/options.go b/vendor/go.step.sm/crypto/x509util/options.go deleted file mode 100644 index e075312a..00000000 --- a/vendor/go.step.sm/crypto/x509util/options.go +++ /dev/null @@ -1,95 +0,0 @@ -package x509util - -import ( - "bytes" - "crypto/x509" - "encoding/base64" - "io/ioutil" - "text/template" - - "github.com/Masterminds/sprig/v3" - "github.com/pkg/errors" - "go.step.sm/crypto/internal/step" -) - -// getFuncMap returns the list of functions provided by sprig. It changes the -// function "fail" to set the given string, this way we can report template -// errors directly to the template without having the wrapper that text/template -// adds. -func getFuncMap(failMessage *string) template.FuncMap { - m := sprig.TxtFuncMap() - m["fail"] = func(msg string) (string, error) { - *failMessage = msg - return "", errors.New(msg) - } - return m -} - -// Options are the options that can be passed to NewCertificate. -type Options struct { - CertBuffer *bytes.Buffer -} - -func (o *Options) apply(cr *x509.CertificateRequest, opts []Option) (*Options, error) { - for _, fn := range opts { - if err := fn(cr, o); err != nil { - return o, err - } - } - return o, nil -} - -// Option is the type used as a variadic argument in NewCertificate. -type Option func(cr *x509.CertificateRequest, o *Options) error - -// WithTemplate is an options that executes the given template text with the -// given data. -func WithTemplate(text string, data TemplateData) Option { - return func(cr *x509.CertificateRequest, o *Options) error { - terr := new(TemplateError) - funcMap := getFuncMap(&terr.Message) - - tmpl, err := template.New("template").Funcs(funcMap).Parse(text) - if err != nil { - return errors.Wrapf(err, "error parsing template") - } - - buf := new(bytes.Buffer) - data.SetCertificateRequest(cr) - if err := tmpl.Execute(buf, data); err != nil { - if terr.Message != "" { - return terr - } - return errors.Wrapf(err, "error executing template") - } - o.CertBuffer = buf - return nil - } -} - -// WithTemplateBase64 is an options that executes the given template base64 -// string with the given data. -func WithTemplateBase64(s string, data TemplateData) Option { - return func(cr *x509.CertificateRequest, o *Options) error { - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return errors.Wrap(err, "error decoding template") - } - fn := WithTemplate(string(b), data) - return fn(cr, o) - } -} - -// WithTemplateFile is an options that reads the template file and executes it -// with the given data. -func WithTemplateFile(path string, data TemplateData) Option { - return func(cr *x509.CertificateRequest, o *Options) error { - filename := step.Abs(path) - b, err := ioutil.ReadFile(filename) - if err != nil { - return errors.Wrapf(err, "error reading %s", path) - } - fn := WithTemplate(string(b), data) - return fn(cr, o) - } -} diff --git a/vendor/go.step.sm/crypto/x509util/templates.go b/vendor/go.step.sm/crypto/x509util/templates.go deleted file mode 100644 index bdfce603..00000000 --- a/vendor/go.step.sm/crypto/x509util/templates.go +++ /dev/null @@ -1,181 +0,0 @@ -package x509util - -import ( - "crypto/x509" -) - -// Variables used to hold template data. -const ( - SubjectKey = "Subject" - SANsKey = "SANs" - TokenKey = "Token" - InsecureKey = "Insecure" - UserKey = "User" - CertificateRequestKey = "CR" -) - -// TemplateError represents an error in a template produced by the fail -// function. -type TemplateError struct { - Message string -} - -// Error implements the error interface and returns the error string when a -// template executes the `fail "message"` function. -func (e *TemplateError) Error() string { - return e.Message -} - -// TemplateData is an alias for map[string]interface{}. It represents the data -// passed to the templates. -type TemplateData map[string]interface{} - -// NewTemplateData creates a new map for templates data. -func NewTemplateData() TemplateData { - return TemplateData{} -} - -// CreateTemplateData creates a new TemplateData with the given common name and SANs. -func CreateTemplateData(commonName string, sans []string) TemplateData { - return TemplateData{ - SubjectKey: Subject{ - CommonName: commonName, - }, - SANsKey: CreateSANs(sans), - } -} - -// Set sets a key-value pair in the template data. -func (t TemplateData) Set(key string, v interface{}) { - t[key] = v -} - -// SetInsecure sets a key-value pair in the insecure template data. -func (t TemplateData) SetInsecure(key string, v interface{}) { - if m, ok := t[InsecureKey].(TemplateData); ok { - m[key] = v - } else { - t[InsecureKey] = TemplateData{key: v} - } -} - -// SetSubject sets the given subject in the template data. -func (t TemplateData) SetSubject(v Subject) { - t.Set(SubjectKey, v) -} - -// SetCommonName sets the given common name in the subject in the template data. -func (t TemplateData) SetCommonName(cn string) { - s, _ := t[SubjectKey].(Subject) - s.CommonName = cn - t[SubjectKey] = s -} - -// SetSANs sets the given SANs in the template data. -func (t TemplateData) SetSANs(sans []string) { - t.Set(SANsKey, CreateSANs(sans)) -} - -// SetToken sets the given token in the template data. -func (t TemplateData) SetToken(v interface{}) { - t.Set(TokenKey, v) -} - -// SetUserData sets the given user provided object in the insecure template -// data. -func (t TemplateData) SetUserData(v interface{}) { - t.SetInsecure(UserKey, v) -} - -// SetCertificateRequest sets the given certificate request in the insecure -// template data. -func (t TemplateData) SetCertificateRequest(cr *x509.CertificateRequest) { - t.SetInsecure(CertificateRequestKey, newCertificateRequest(cr)) -} - -// DefaultLeafTemplate is the default template used to generate a leaf -// certificate. -const DefaultLeafTemplate = `{ - "subject": {{ toJson .Subject }}, - "sans": {{ toJson .SANs }}, -{{- if typeIs "*rsa.PublicKey" .Insecure.CR.PublicKey }} - "keyUsage": ["keyEncipherment", "digitalSignature"], -{{- else }} - "keyUsage": ["digitalSignature"], -{{- end }} - "extKeyUsage": ["serverAuth", "clientAuth"] -}` - -// DefaultIIDLeafTemplate is the template used by default on instance identity -// provisioners like AWS, GCP or Azure. By default, those provisioners allow the -// SANs provided in the certificate request, but the option `DisableCustomSANs` -// can be provided to force only the verified domains, if the option is true -// `.SANs` will be set with the verified domains. -const DefaultIIDLeafTemplate = `{ - "subject": {"commonName": {{ toJson .Insecure.CR.Subject.CommonName }}}, -{{- if .SANs }} - "sans": {{ toJson .SANs }}, -{{- else }} - "dnsNames": {{ toJson .Insecure.CR.DNSNames }}, - "emailAddresses": {{ toJson .Insecure.CR.EmailAddresses }}, - "ipAddresses": {{ toJson .Insecure.CR.IPAddresses }}, - "uris": {{ toJson .Insecure.CR.URIs }}, -{{- end }} -{{- if typeIs "*rsa.PublicKey" .Insecure.CR.PublicKey }} - "keyUsage": ["keyEncipherment", "digitalSignature"], -{{- else }} - "keyUsage": ["digitalSignature"], -{{- end }} - "extKeyUsage": ["serverAuth", "clientAuth"] -}` - -// DefaultAdminLeafTemplate is a template used by default by K8sSA and -// admin-OIDC provisioners. This template takes all the SANs and subject from -// the certificate request. -const DefaultAdminLeafTemplate = `{ - "subject": {{ toJson .Insecure.CR.Subject }}, - "dnsNames": {{ toJson .Insecure.CR.DNSNames }}, - "emailAddresses": {{ toJson .Insecure.CR.EmailAddresses }}, - "ipAddresses": {{ toJson .Insecure.CR.IPAddresses }}, - "uris": {{ toJson .Insecure.CR.URIs }}, -{{- if typeIs "*rsa.PublicKey" .Insecure.CR.PublicKey }} - "keyUsage": ["keyEncipherment", "digitalSignature"], -{{- else }} - "keyUsage": ["digitalSignature"], -{{- end }} - "extKeyUsage": ["serverAuth", "clientAuth"] -}` - -// DefaultIntermediateTemplate is a template that can be used to generate an -// intermediate certificate. -const DefaultIntermediateTemplate = `{ - "subject": {{ toJson .Subject }}, - "keyUsage": ["certSign", "crlSign"], - "basicConstraints": { - "isCA": true, - "maxPathLen": 0 - } -}` - -// DefaultRootTemplate is a template that can be used to generate a root -// certificate. -const DefaultRootTemplate = `{ - "subject": {{ toJson .Subject }}, - "issuer": {{ toJson .Subject }}, - "keyUsage": ["certSign", "crlSign"], - "basicConstraints": { - "isCA": true, - "maxPathLen": 1 - } -}` - -// CertificateRequestTemplate is a template that will sign the given certificate -// request. -const CertificateRequestTemplate = `{{ toJson .Insecure.CR }}` - -// DefaultCertificateRequestTemplate is the templated used by default when -// creating a new certificate request. -const DefaultCertificateRequestTemplate = `{ - "subject": {{ toJson .Subject }}, - "sans": {{ toJson .SANs }} -}` diff --git a/vendor/go.step.sm/crypto/x509util/utils.go b/vendor/go.step.sm/crypto/x509util/utils.go deleted file mode 100644 index 26a5528c..00000000 --- a/vendor/go.step.sm/crypto/x509util/utils.go +++ /dev/null @@ -1,129 +0,0 @@ -package x509util - -import ( - "crypto" - "crypto/rand" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "math/big" - "net" - "net/url" - "strings" - - "github.com/pkg/errors" -) - -// FingerprintEncoding defines the supported encodigns in certificate -// fingerprints. -type FingerprintEncoding int - -// Supported fingerprint encodings. -const ( - HexFingerprint FingerprintEncoding = iota - Base64Fingerprint - Base64UrlFingerprint -) - -// SplitSANs splits a slice of Subject Alternative Names into slices of -// IP Addresses and DNS Names. If an element is not an IP address, then it -// is bucketed as a DNS Name. -func SplitSANs(sans []string) (dnsNames []string, ips []net.IP, emails []string, uris []*url.URL) { - dnsNames = []string{} - ips = []net.IP{} - emails = []string{} - uris = []*url.URL{} - for _, san := range sans { - if ip := net.ParseIP(san); ip != nil { - ips = append(ips, ip) - } else if u, err := url.Parse(san); err == nil && u.Scheme != "" { - uris = append(uris, u) - } else if strings.Contains(san, "@") { - emails = append(emails, san) - } else { - dnsNames = append(dnsNames, san) - } - } - return -} - -// CreateSANs splits the given sans and returns a list of SubjectAlternativeName -// structs. -func CreateSANs(sans []string) []SubjectAlternativeName { - dnsNames, ips, emails, uris := SplitSANs(sans) - sanTypes := make([]SubjectAlternativeName, 0, len(sans)) - for _, v := range dnsNames { - sanTypes = append(sanTypes, SubjectAlternativeName{Type: "dns", Value: v}) - } - for _, v := range ips { - sanTypes = append(sanTypes, SubjectAlternativeName{Type: "ip", Value: v.String()}) - } - for _, v := range emails { - sanTypes = append(sanTypes, SubjectAlternativeName{Type: "email", Value: v}) - } - for _, v := range uris { - sanTypes = append(sanTypes, SubjectAlternativeName{Type: "uri", Value: v.String()}) - } - return sanTypes -} - -// Fingerprint returns the SHA-256 fingerprint of the certificate. -func Fingerprint(cert *x509.Certificate) string { - return EncodedFingerprint(cert, HexFingerprint) -} - -// EncodedFingerprint returns an encoded the SHA-256 fingerprint of the -// certificate using the specified encoding. In an invalid encoding is passed, -// the return value will be an empty string. -func EncodedFingerprint(cert *x509.Certificate, encoding FingerprintEncoding) string { - sum := sha256.Sum256(cert.Raw) - switch encoding { - case HexFingerprint: - return strings.ToLower(hex.EncodeToString(sum[:])) - case Base64Fingerprint: - return base64.StdEncoding.EncodeToString(sum[:]) - case Base64UrlFingerprint: - return base64.URLEncoding.EncodeToString(sum[:]) - default: - return "" - } -} - -// generateSerialNumber returns a random serial number. -func generateSerialNumber() (*big.Int, error) { - limit := new(big.Int).Lsh(big.NewInt(1), 128) - sn, err := rand.Int(rand.Reader, limit) - if err != nil { - return nil, errors.Wrap(err, "error generating serial number") - } - return sn, nil -} - -// subjectPublicKeyInfo is a PKIX public key structure defined in RFC 5280. -type subjectPublicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - SubjectPublicKey asn1.BitString -} - -// generateSubjectKeyID generates the key identifier according the the RFC 5280 -// section 4.2.1.2. -// -// The keyIdentifier is composed of the 160-bit SHA-1 hash of the value of the -// BIT STRING subjectPublicKey (excluding the tag, length, and number of unused -// bits). -func generateSubjectKeyID(pub crypto.PublicKey) ([]byte, error) { - b, err := x509.MarshalPKIXPublicKey(pub) - if err != nil { - return nil, errors.Wrap(err, "error marshaling public key") - } - var info subjectPublicKeyInfo - if _, err = asn1.Unmarshal(b, &info); err != nil { - return nil, errors.Wrap(err, "error unmarshaling public key") - } - hash := sha1.Sum(info.SubjectPublicKey.Bytes) - return hash[:], nil -} diff --git a/vendor/go.step.sm/linkedca/.gitignore b/vendor/go.step.sm/linkedca/.gitignore deleted file mode 100644 index 66fd13c9..00000000 --- a/vendor/go.step.sm/linkedca/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ diff --git a/vendor/go.step.sm/linkedca/LICENSE b/vendor/go.step.sm/linkedca/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/go.step.sm/linkedca/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.step.sm/linkedca/Makefile b/vendor/go.step.sm/linkedca/Makefile deleted file mode 100644 index 1390f1b5..00000000 --- a/vendor/go.step.sm/linkedca/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -all: generate - -generate: - protoc --proto_path=. --go_out=. --go-grpc_out=. --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative provisioners.proto admin.proto majordomo.proto - -.PHONY: all generate diff --git a/vendor/go.step.sm/linkedca/README.md b/vendor/go.step.sm/linkedca/README.md deleted file mode 100644 index d86cfcb0..00000000 --- a/vendor/go.step.sm/linkedca/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# linkedca -Support for Linked CAs using protocol buffers and gRPC diff --git a/vendor/go.step.sm/linkedca/admin.pb.go b/vendor/go.step.sm/linkedca/admin.pb.go deleted file mode 100644 index 3aa52cc4..00000000 --- a/vendor/go.step.sm/linkedca/admin.pb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.8 -// source: admin.proto - -package linkedca - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Admin_Type int32 - -const ( - Admin_UNKNOWN Admin_Type = 0 - Admin_ADMIN Admin_Type = 1 - Admin_SUPER_ADMIN Admin_Type = 2 -) - -// Enum value maps for Admin_Type. -var ( - Admin_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ADMIN", - 2: "SUPER_ADMIN", - } - Admin_Type_value = map[string]int32{ - "UNKNOWN": 0, - "ADMIN": 1, - "SUPER_ADMIN": 2, - } -) - -func (x Admin_Type) Enum() *Admin_Type { - p := new(Admin_Type) - *p = x - return p -} - -func (x Admin_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Admin_Type) Descriptor() protoreflect.EnumDescriptor { - return file_admin_proto_enumTypes[0].Descriptor() -} - -func (Admin_Type) Type() protoreflect.EnumType { - return &file_admin_proto_enumTypes[0] -} - -func (x Admin_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Admin_Type.Descriptor instead. -func (Admin_Type) EnumDescriptor() ([]byte, []int) { - return file_admin_proto_rawDescGZIP(), []int{0, 0} -} - -type Admin struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AuthorityId string `protobuf:"bytes,2,opt,name=authority_id,json=authorityId,proto3" json:"authority_id,omitempty"` - Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` - ProvisionerId string `protobuf:"bytes,4,opt,name=provisioner_id,json=provisionerId,proto3" json:"provisioner_id,omitempty"` - Type Admin_Type `protobuf:"varint,5,opt,name=type,proto3,enum=linkedca.Admin_Type" json:"type,omitempty"` - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - DeletedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"` -} - -func (x *Admin) Reset() { - *x = Admin{} - if protoimpl.UnsafeEnabled { - mi := &file_admin_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Admin) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Admin) ProtoMessage() {} - -func (x *Admin) ProtoReflect() protoreflect.Message { - mi := &file_admin_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Admin.ProtoReflect.Descriptor instead. -func (*Admin) Descriptor() ([]byte, []int) { - return file_admin_proto_rawDescGZIP(), []int{0} -} - -func (x *Admin) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Admin) GetAuthorityId() string { - if x != nil { - return x.AuthorityId - } - return "" -} - -func (x *Admin) GetSubject() string { - if x != nil { - return x.Subject - } - return "" -} - -func (x *Admin) GetProvisionerId() string { - if x != nil { - return x.ProvisionerId - } - return "" -} - -func (x *Admin) GetType() Admin_Type { - if x != nil { - return x.Type - } - return Admin_UNKNOWN -} - -func (x *Admin) GetCreatedAt() *timestamppb.Timestamp { - if x != nil { - return x.CreatedAt - } - return nil -} - -func (x *Admin) GetDeletedAt() *timestamppb.Timestamp { - if x != nil { - return x.DeletedAt - } - return nil -} - -type AdminList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Admins []*Admin `protobuf:"bytes,1,rep,name=admins,proto3" json:"admins,omitempty"` -} - -func (x *AdminList) Reset() { - *x = AdminList{} - if protoimpl.UnsafeEnabled { - mi := &file_admin_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AdminList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AdminList) ProtoMessage() {} - -func (x *AdminList) ProtoReflect() protoreflect.Message { - mi := &file_admin_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AdminList.ProtoReflect.Descriptor instead. -func (*AdminList) Descriptor() ([]byte, []int) { - return file_admin_proto_rawDescGZIP(), []int{1} -} - -func (x *AdminList) GetAdmins() []*Admin { - if x != nil { - return x.Admins - } - return nil -} - -var File_admin_proto protoreflect.FileDescriptor - -var file_admin_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6c, - 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcc, 0x02, 0x0a, 0x05, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, - 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x64, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x2f, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, - 0x44, 0x4d, 0x49, 0x4e, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x55, 0x50, 0x45, 0x52, 0x5f, - 0x41, 0x44, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x22, 0x34, 0x0a, 0x09, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x06, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, - 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x06, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x42, 0x15, 0x5a, - 0x13, 0x67, 0x6f, 0x2e, 0x73, 0x74, 0x65, 0x70, 0x2e, 0x73, 0x6d, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, - 0x65, 0x64, 0x63, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_admin_proto_rawDescOnce sync.Once - file_admin_proto_rawDescData = file_admin_proto_rawDesc -) - -func file_admin_proto_rawDescGZIP() []byte { - file_admin_proto_rawDescOnce.Do(func() { - file_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_admin_proto_rawDescData) - }) - return file_admin_proto_rawDescData -} - -var file_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_admin_proto_goTypes = []interface{}{ - (Admin_Type)(0), // 0: linkedca.Admin.Type - (*Admin)(nil), // 1: linkedca.Admin - (*AdminList)(nil), // 2: linkedca.AdminList - (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp -} -var file_admin_proto_depIdxs = []int32{ - 0, // 0: linkedca.Admin.type:type_name -> linkedca.Admin.Type - 3, // 1: linkedca.Admin.created_at:type_name -> google.protobuf.Timestamp - 3, // 2: linkedca.Admin.deleted_at:type_name -> google.protobuf.Timestamp - 1, // 3: linkedca.AdminList.admins:type_name -> linkedca.Admin - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_admin_proto_init() } -func file_admin_proto_init() { - if File_admin_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_admin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Admin); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_admin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdminList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_admin_proto_rawDesc, - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_admin_proto_goTypes, - DependencyIndexes: file_admin_proto_depIdxs, - EnumInfos: file_admin_proto_enumTypes, - MessageInfos: file_admin_proto_msgTypes, - }.Build() - File_admin_proto = out.File - file_admin_proto_rawDesc = nil - file_admin_proto_goTypes = nil - file_admin_proto_depIdxs = nil -} diff --git a/vendor/go.step.sm/linkedca/admin.proto b/vendor/go.step.sm/linkedca/admin.proto deleted file mode 100644 index 53121619..00000000 --- a/vendor/go.step.sm/linkedca/admin.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package linkedca; - -option go_package = "go.step.sm/linkedca"; - -import "google/protobuf/timestamp.proto"; - -message Admin { - enum Type { - UNKNOWN = 0; - ADMIN = 1; - SUPER_ADMIN = 2; - } - string id = 1; - string authority_id = 2; - string subject = 3; - string provisioner_id = 4; - Type type = 5; - google.protobuf.Timestamp created_at = 6; - google.protobuf.Timestamp deleted_at = 7; -} - -message AdminList { - repeated Admin admins = 1; -} diff --git a/vendor/go.step.sm/linkedca/majordomo.pb.go b/vendor/go.step.sm/linkedca/majordomo.pb.go deleted file mode 100644 index 5c3b8fb3..00000000 --- a/vendor/go.step.sm/linkedca/majordomo.pb.go +++ /dev/null @@ -1,1325 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.8 -// source: majordomo.proto - -package linkedca - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type TODO struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *TODO) Reset() { - *x = TODO{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TODO) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TODO) ProtoMessage() {} - -func (x *TODO) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TODO.ProtoReflect.Descriptor instead. -func (*TODO) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{0} -} - -type LoginRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AuthorityId string `protobuf:"bytes,1,opt,name=authority_id,json=authorityId,proto3" json:"authority_id,omitempty"` - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - Password string `protobuf:"bytes,3,opt,name=password,proto3" json:"password,omitempty"` - PemCertificateRequest string `protobuf:"bytes,4,opt,name=pem_certificate_request,json=pemCertificateRequest,proto3" json:"pem_certificate_request,omitempty"` -} - -func (x *LoginRequest) Reset() { - *x = LoginRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoginRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoginRequest) ProtoMessage() {} - -func (x *LoginRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. -func (*LoginRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{1} -} - -func (x *LoginRequest) GetAuthorityId() string { - if x != nil { - return x.AuthorityId - } - return "" -} - -func (x *LoginRequest) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *LoginRequest) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -func (x *LoginRequest) GetPemCertificateRequest() string { - if x != nil { - return x.PemCertificateRequest - } - return "" -} - -type LoginResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PemCertificate string `protobuf:"bytes,1,opt,name=pem_certificate,json=pemCertificate,proto3" json:"pem_certificate,omitempty"` - PemCertificateChain string `protobuf:"bytes,2,opt,name=pem_certificate_chain,json=pemCertificateChain,proto3" json:"pem_certificate_chain,omitempty"` -} - -func (x *LoginResponse) Reset() { - *x = LoginResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoginResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoginResponse) ProtoMessage() {} - -func (x *LoginResponse) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoginResponse.ProtoReflect.Descriptor instead. -func (*LoginResponse) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{2} -} - -func (x *LoginResponse) GetPemCertificate() string { - if x != nil { - return x.PemCertificate - } - return "" -} - -func (x *LoginResponse) GetPemCertificateChain() string { - if x != nil { - return x.PemCertificateChain - } - return "" -} - -type ConfigurationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AuthorityId string `protobuf:"bytes,1,opt,name=authority_id,json=authorityId,proto3" json:"authority_id,omitempty"` -} - -func (x *ConfigurationRequest) Reset() { - *x = ConfigurationRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConfigurationRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConfigurationRequest) ProtoMessage() {} - -func (x *ConfigurationRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConfigurationRequest.ProtoReflect.Descriptor instead. -func (*ConfigurationRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{3} -} - -func (x *ConfigurationRequest) GetAuthorityId() string { - if x != nil { - return x.AuthorityId - } - return "" -} - -type ConfigurationResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Provisioners []*Provisioner `protobuf:"bytes,1,rep,name=provisioners,proto3" json:"provisioners,omitempty"` - Admins []*Admin `protobuf:"bytes,2,rep,name=admins,proto3" json:"admins,omitempty"` -} - -func (x *ConfigurationResponse) Reset() { - *x = ConfigurationResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConfigurationResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConfigurationResponse) ProtoMessage() {} - -func (x *ConfigurationResponse) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConfigurationResponse.ProtoReflect.Descriptor instead. -func (*ConfigurationResponse) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{4} -} - -func (x *ConfigurationResponse) GetProvisioners() []*Provisioner { - if x != nil { - return x.Provisioners - } - return nil -} - -func (x *ConfigurationResponse) GetAdmins() []*Admin { - if x != nil { - return x.Admins - } - return nil -} - -type CreateProvisionerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type Provisioner_Type `protobuf:"varint,1,opt,name=type,proto3,enum=linkedca.Provisioner_Type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Details *ProvisionerDetails `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` - Claims *Claims `protobuf:"bytes,4,opt,name=claims,proto3" json:"claims,omitempty"` - X509Template *Template `protobuf:"bytes,5,opt,name=x509_template,json=x509Template,proto3" json:"x509_template,omitempty"` - SshTemplate *Template `protobuf:"bytes,6,opt,name=ssh_template,json=sshTemplate,proto3" json:"ssh_template,omitempty"` -} - -func (x *CreateProvisionerRequest) Reset() { - *x = CreateProvisionerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateProvisionerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateProvisionerRequest) ProtoMessage() {} - -func (x *CreateProvisionerRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateProvisionerRequest.ProtoReflect.Descriptor instead. -func (*CreateProvisionerRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{5} -} - -func (x *CreateProvisionerRequest) GetType() Provisioner_Type { - if x != nil { - return x.Type - } - return Provisioner_NOOP -} - -func (x *CreateProvisionerRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateProvisionerRequest) GetDetails() *ProvisionerDetails { - if x != nil { - return x.Details - } - return nil -} - -func (x *CreateProvisionerRequest) GetClaims() *Claims { - if x != nil { - return x.Claims - } - return nil -} - -func (x *CreateProvisionerRequest) GetX509Template() *Template { - if x != nil { - return x.X509Template - } - return nil -} - -func (x *CreateProvisionerRequest) GetSshTemplate() *Template { - if x != nil { - return x.SshTemplate - } - return nil -} - -type UpdateProvisionerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Details *ProvisionerDetails `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"` - Claims *Claims `protobuf:"bytes,4,opt,name=claims,proto3" json:"claims,omitempty"` - X509Template *Template `protobuf:"bytes,5,opt,name=x509_template,json=x509Template,proto3" json:"x509_template,omitempty"` - SshTemplate *Template `protobuf:"bytes,6,opt,name=ssh_template,json=sshTemplate,proto3" json:"ssh_template,omitempty"` -} - -func (x *UpdateProvisionerRequest) Reset() { - *x = UpdateProvisionerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateProvisionerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateProvisionerRequest) ProtoMessage() {} - -func (x *UpdateProvisionerRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateProvisionerRequest.ProtoReflect.Descriptor instead. -func (*UpdateProvisionerRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{6} -} - -func (x *UpdateProvisionerRequest) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *UpdateProvisionerRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *UpdateProvisionerRequest) GetDetails() *ProvisionerDetails { - if x != nil { - return x.Details - } - return nil -} - -func (x *UpdateProvisionerRequest) GetClaims() *Claims { - if x != nil { - return x.Claims - } - return nil -} - -func (x *UpdateProvisionerRequest) GetX509Template() *Template { - if x != nil { - return x.X509Template - } - return nil -} - -func (x *UpdateProvisionerRequest) GetSshTemplate() *Template { - if x != nil { - return x.SshTemplate - } - return nil -} - -type DeleteProvisionerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *DeleteProvisionerRequest) Reset() { - *x = DeleteProvisionerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteProvisionerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteProvisionerRequest) ProtoMessage() {} - -func (x *DeleteProvisionerRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteProvisionerRequest.ProtoReflect.Descriptor instead. -func (*DeleteProvisionerRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{7} -} - -func (x *DeleteProvisionerRequest) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -type CreateAdminRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` - ProvisionerId string `protobuf:"bytes,2,opt,name=provisioner_id,json=provisionerId,proto3" json:"provisioner_id,omitempty"` - Type Admin_Type `protobuf:"varint,3,opt,name=type,proto3,enum=linkedca.Admin_Type" json:"type,omitempty"` -} - -func (x *CreateAdminRequest) Reset() { - *x = CreateAdminRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateAdminRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateAdminRequest) ProtoMessage() {} - -func (x *CreateAdminRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateAdminRequest.ProtoReflect.Descriptor instead. -func (*CreateAdminRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{8} -} - -func (x *CreateAdminRequest) GetSubject() string { - if x != nil { - return x.Subject - } - return "" -} - -func (x *CreateAdminRequest) GetProvisionerId() string { - if x != nil { - return x.ProvisionerId - } - return "" -} - -func (x *CreateAdminRequest) GetType() Admin_Type { - if x != nil { - return x.Type - } - return Admin_UNKNOWN -} - -type UpdateAdminRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Type Admin_Type `protobuf:"varint,2,opt,name=type,proto3,enum=linkedca.Admin_Type" json:"type,omitempty"` -} - -func (x *UpdateAdminRequest) Reset() { - *x = UpdateAdminRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateAdminRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateAdminRequest) ProtoMessage() {} - -func (x *UpdateAdminRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateAdminRequest.ProtoReflect.Descriptor instead. -func (*UpdateAdminRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{9} -} - -func (x *UpdateAdminRequest) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *UpdateAdminRequest) GetType() Admin_Type { - if x != nil { - return x.Type - } - return Admin_UNKNOWN -} - -type DeleteAdminRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *DeleteAdminRequest) Reset() { - *x = DeleteAdminRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteAdminRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteAdminRequest) ProtoMessage() {} - -func (x *DeleteAdminRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteAdminRequest.ProtoReflect.Descriptor instead. -func (*DeleteAdminRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{10} -} - -func (x *DeleteAdminRequest) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -type CertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PemCertificate string `protobuf:"bytes,1,opt,name=pem_certificate,json=pemCertificate,proto3" json:"pem_certificate,omitempty"` - PemCertificateChain string `protobuf:"bytes,2,opt,name=pem_certificate_chain,json=pemCertificateChain,proto3" json:"pem_certificate_chain,omitempty"` -} - -func (x *CertificateRequest) Reset() { - *x = CertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CertificateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CertificateRequest) ProtoMessage() {} - -func (x *CertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CertificateRequest.ProtoReflect.Descriptor instead. -func (*CertificateRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{11} -} - -func (x *CertificateRequest) GetPemCertificate() string { - if x != nil { - return x.PemCertificate - } - return "" -} - -func (x *CertificateRequest) GetPemCertificateChain() string { - if x != nil { - return x.PemCertificateChain - } - return "" -} - -type CertificateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *CertificateResponse) Reset() { - *x = CertificateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CertificateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CertificateResponse) ProtoMessage() {} - -func (x *CertificateResponse) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CertificateResponse.ProtoReflect.Descriptor instead. -func (*CertificateResponse) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{12} -} - -func (x *CertificateResponse) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -type SSHCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Certificate string `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"` -} - -func (x *SSHCertificateRequest) Reset() { - *x = SSHCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SSHCertificateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SSHCertificateRequest) ProtoMessage() {} - -func (x *SSHCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SSHCertificateRequest.ProtoReflect.Descriptor instead. -func (*SSHCertificateRequest) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{13} -} - -func (x *SSHCertificateRequest) GetCertificate() string { - if x != nil { - return x.Certificate - } - return "" -} - -type SSHCertificateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *SSHCertificateResponse) Reset() { - *x = SSHCertificateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_majordomo_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SSHCertificateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SSHCertificateResponse) ProtoMessage() {} - -func (x *SSHCertificateResponse) ProtoReflect() protoreflect.Message { - mi := &file_majordomo_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SSHCertificateResponse.ProtoReflect.Descriptor instead. -func (*SSHCertificateResponse) Descriptor() ([]byte, []int) { - return file_majordomo_proto_rawDescGZIP(), []int{14} -} - -func (x *SSHCertificateResponse) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -var File_majordomo_proto protoreflect.FileDescriptor - -var file_majordomo_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x64, 0x6f, 0x6d, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x08, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x1a, 0x0b, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x12, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x06, 0x0a, 0x04, - 0x54, 0x4f, 0x44, 0x4f, 0x22, 0xa1, 0x01, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, - 0x12, 0x36, 0x0a, 0x17, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x15, 0x70, 0x65, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6c, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x65, 0x6d, - 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x70, 0x65, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x70, 0x65, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x22, 0x39, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, - 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x49, - 0x64, 0x22, 0x7b, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x27, 0x0a, 0x06, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, - 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x06, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x22, 0xb0, - 0x02, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, - 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x36, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, - 0x63, 0x61, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x52, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, - 0x73, 0x12, 0x37, 0x0a, 0x0d, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x78, 0x35, - 0x30, 0x39, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x73, - 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x73, 0x68, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x22, 0x90, 0x02, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x63, 0x6c, - 0x61, 0x69, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6c, 0x69, 0x6e, - 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x52, 0x06, 0x63, 0x6c, - 0x61, 0x69, 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x0d, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x69, - 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, - 0x0c, 0x78, 0x35, 0x30, 0x39, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, - 0x0c, 0x73, 0x73, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x73, 0x68, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x22, 0x2a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x22, 0x7f, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, - 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x4e, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, - 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x22, 0x24, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, - 0x0f, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x65, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x6d, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x70, 0x65, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x22, 0x25, 0x0a, 0x13, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, - 0x64, 0x22, 0x39, 0x0a, 0x15, 0x53, 0x53, 0x48, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x28, 0x0a, 0x16, - 0x53, 0x53, 0x48, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0xda, 0x06, 0x0a, 0x09, 0x4d, 0x61, 0x6a, 0x6f, 0x72, - 0x64, 0x6f, 0x6d, 0x6f, 0x12, 0x38, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x2e, - 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, - 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, - 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x6c, - 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x6c, - 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x6c, - 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x12, 0x1c, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x0f, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x12, 0x3c, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, - 0x12, 0x1c, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, - 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, - 0x3c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x1c, - 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x6c, - 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4e, 0x0a, - 0x0f, 0x50, 0x6f, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x1c, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, - 0x12, 0x50, 0x6f, 0x73, 0x74, 0x53, 0x53, 0x48, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x53, - 0x53, 0x48, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, - 0x53, 0x53, 0x48, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x2e, 0x6c, 0x69, - 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, 0x4f, 0x44, 0x4f, 0x1a, 0x0e, 0x2e, 0x6c, 0x69, - 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, 0x4f, 0x44, 0x4f, 0x12, 0x36, 0x0a, 0x14, 0x52, - 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x53, 0x53, 0x48, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x12, 0x0e, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, - 0x4f, 0x44, 0x4f, 0x1a, 0x0e, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, - 0x4f, 0x44, 0x4f, 0x42, 0x15, 0x5a, 0x13, 0x67, 0x6f, 0x2e, 0x73, 0x74, 0x65, 0x70, 0x2e, 0x73, - 0x6d, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_majordomo_proto_rawDescOnce sync.Once - file_majordomo_proto_rawDescData = file_majordomo_proto_rawDesc -) - -func file_majordomo_proto_rawDescGZIP() []byte { - file_majordomo_proto_rawDescOnce.Do(func() { - file_majordomo_proto_rawDescData = protoimpl.X.CompressGZIP(file_majordomo_proto_rawDescData) - }) - return file_majordomo_proto_rawDescData -} - -var file_majordomo_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_majordomo_proto_goTypes = []interface{}{ - (*TODO)(nil), // 0: linkedca.TODO - (*LoginRequest)(nil), // 1: linkedca.LoginRequest - (*LoginResponse)(nil), // 2: linkedca.LoginResponse - (*ConfigurationRequest)(nil), // 3: linkedca.ConfigurationRequest - (*ConfigurationResponse)(nil), // 4: linkedca.ConfigurationResponse - (*CreateProvisionerRequest)(nil), // 5: linkedca.CreateProvisionerRequest - (*UpdateProvisionerRequest)(nil), // 6: linkedca.UpdateProvisionerRequest - (*DeleteProvisionerRequest)(nil), // 7: linkedca.DeleteProvisionerRequest - (*CreateAdminRequest)(nil), // 8: linkedca.CreateAdminRequest - (*UpdateAdminRequest)(nil), // 9: linkedca.UpdateAdminRequest - (*DeleteAdminRequest)(nil), // 10: linkedca.DeleteAdminRequest - (*CertificateRequest)(nil), // 11: linkedca.CertificateRequest - (*CertificateResponse)(nil), // 12: linkedca.CertificateResponse - (*SSHCertificateRequest)(nil), // 13: linkedca.SSHCertificateRequest - (*SSHCertificateResponse)(nil), // 14: linkedca.SSHCertificateResponse - (*Provisioner)(nil), // 15: linkedca.Provisioner - (*Admin)(nil), // 16: linkedca.Admin - (Provisioner_Type)(0), // 17: linkedca.Provisioner.Type - (*ProvisionerDetails)(nil), // 18: linkedca.ProvisionerDetails - (*Claims)(nil), // 19: linkedca.Claims - (*Template)(nil), // 20: linkedca.Template - (Admin_Type)(0), // 21: linkedca.Admin.Type -} -var file_majordomo_proto_depIdxs = []int32{ - 15, // 0: linkedca.ConfigurationResponse.provisioners:type_name -> linkedca.Provisioner - 16, // 1: linkedca.ConfigurationResponse.admins:type_name -> linkedca.Admin - 17, // 2: linkedca.CreateProvisionerRequest.type:type_name -> linkedca.Provisioner.Type - 18, // 3: linkedca.CreateProvisionerRequest.details:type_name -> linkedca.ProvisionerDetails - 19, // 4: linkedca.CreateProvisionerRequest.claims:type_name -> linkedca.Claims - 20, // 5: linkedca.CreateProvisionerRequest.x509_template:type_name -> linkedca.Template - 20, // 6: linkedca.CreateProvisionerRequest.ssh_template:type_name -> linkedca.Template - 18, // 7: linkedca.UpdateProvisionerRequest.details:type_name -> linkedca.ProvisionerDetails - 19, // 8: linkedca.UpdateProvisionerRequest.claims:type_name -> linkedca.Claims - 20, // 9: linkedca.UpdateProvisionerRequest.x509_template:type_name -> linkedca.Template - 20, // 10: linkedca.UpdateProvisionerRequest.ssh_template:type_name -> linkedca.Template - 21, // 11: linkedca.CreateAdminRequest.type:type_name -> linkedca.Admin.Type - 21, // 12: linkedca.UpdateAdminRequest.type:type_name -> linkedca.Admin.Type - 1, // 13: linkedca.Majordomo.Login:input_type -> linkedca.LoginRequest - 3, // 14: linkedca.Majordomo.GetConfiguration:input_type -> linkedca.ConfigurationRequest - 5, // 15: linkedca.Majordomo.CreateProvisioner:input_type -> linkedca.CreateProvisionerRequest - 6, // 16: linkedca.Majordomo.UpdateProvisioner:input_type -> linkedca.UpdateProvisionerRequest - 7, // 17: linkedca.Majordomo.DeleteProvisioner:input_type -> linkedca.DeleteProvisionerRequest - 8, // 18: linkedca.Majordomo.CreateAdmin:input_type -> linkedca.CreateAdminRequest - 9, // 19: linkedca.Majordomo.UpdateAdmin:input_type -> linkedca.UpdateAdminRequest - 10, // 20: linkedca.Majordomo.DeleteAdmin:input_type -> linkedca.DeleteAdminRequest - 11, // 21: linkedca.Majordomo.PostCertificate:input_type -> linkedca.CertificateRequest - 13, // 22: linkedca.Majordomo.PostSSHCertificate:input_type -> linkedca.SSHCertificateRequest - 0, // 23: linkedca.Majordomo.RevokeCertificate:input_type -> linkedca.TODO - 0, // 24: linkedca.Majordomo.RevokeSSHCertificate:input_type -> linkedca.TODO - 2, // 25: linkedca.Majordomo.Login:output_type -> linkedca.LoginResponse - 4, // 26: linkedca.Majordomo.GetConfiguration:output_type -> linkedca.ConfigurationResponse - 15, // 27: linkedca.Majordomo.CreateProvisioner:output_type -> linkedca.Provisioner - 15, // 28: linkedca.Majordomo.UpdateProvisioner:output_type -> linkedca.Provisioner - 15, // 29: linkedca.Majordomo.DeleteProvisioner:output_type -> linkedca.Provisioner - 16, // 30: linkedca.Majordomo.CreateAdmin:output_type -> linkedca.Admin - 16, // 31: linkedca.Majordomo.UpdateAdmin:output_type -> linkedca.Admin - 16, // 32: linkedca.Majordomo.DeleteAdmin:output_type -> linkedca.Admin - 12, // 33: linkedca.Majordomo.PostCertificate:output_type -> linkedca.CertificateResponse - 14, // 34: linkedca.Majordomo.PostSSHCertificate:output_type -> linkedca.SSHCertificateResponse - 0, // 35: linkedca.Majordomo.RevokeCertificate:output_type -> linkedca.TODO - 0, // 36: linkedca.Majordomo.RevokeSSHCertificate:output_type -> linkedca.TODO - 25, // [25:37] is the sub-list for method output_type - 13, // [13:25] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name -} - -func init() { file_majordomo_proto_init() } -func file_majordomo_proto_init() { - if File_majordomo_proto != nil { - return - } - file_admin_proto_init() - file_provisioners_proto_init() - if !protoimpl.UnsafeEnabled { - file_majordomo_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TODO); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoginRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoginResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigurationRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigurationResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateProvisionerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateProvisionerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteProvisionerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateAdminRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateAdminRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteAdminRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertificateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_majordomo_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHCertificateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_majordomo_proto_rawDesc, - NumEnums: 0, - NumMessages: 15, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_majordomo_proto_goTypes, - DependencyIndexes: file_majordomo_proto_depIdxs, - MessageInfos: file_majordomo_proto_msgTypes, - }.Build() - File_majordomo_proto = out.File - file_majordomo_proto_rawDesc = nil - file_majordomo_proto_goTypes = nil - file_majordomo_proto_depIdxs = nil -} diff --git a/vendor/go.step.sm/linkedca/majordomo.proto b/vendor/go.step.sm/linkedca/majordomo.proto deleted file mode 100644 index 1cbc1d7c..00000000 --- a/vendor/go.step.sm/linkedca/majordomo.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; - -package linkedca; - -option go_package = "go.step.sm/linkedca"; - -import "admin.proto"; -import "provisioners.proto"; - -// Majordomo is the public service used to sync configurations to CA's and post -// certificates. -service Majordomo { - // Login creates signs a given CSR and returns the certificate that will be - // used for authentication. - rpc Login(LoginRequest) returns (LoginResponse); - - // GetConfiguration returns the full configuration of an authority. - rpc GetConfiguration(ConfigurationRequest) returns (ConfigurationResponse); - - // CreateProvisioner adds a new provisioner to the majordomo authority and - // returns the proto representation. - rpc CreateProvisioner(CreateProvisionerRequest) returns (linkedca.Provisioner); - // UpdateProvisioners updates a previously created provisioner. - rpc UpdateProvisioner(UpdateProvisionerRequest) returns (linkedca.Provisioner); - // DeleteProvisioner deletes a previously created provisioner. - rpc DeleteProvisioner(DeleteProvisionerRequest) returns (linkedca.Provisioner); - - // CreateAdmin adds a new admin user to the majordomo authority. Admin users - // can add or delete provisioners. - rpc CreateAdmin(CreateAdminRequest) returns (linkedca.Admin); - // UpdateAdmin updates a previously created admin. - rpc UpdateAdmin(UpdateAdminRequest) returns (linkedca.Admin); - // DeleteAdmin deletes a previously created admin user - rpc DeleteAdmin(DeleteAdminRequest) returns (linkedca.Admin); - - // PostCertificate sends a signed X.509 certificate to majordomo. - rpc PostCertificate(CertificateRequest) returns (CertificateResponse); - // PostSSHCertificate sends a signed SSH certificate to majordomo. - rpc PostSSHCertificate(SSHCertificateRequest) returns (SSHCertificateResponse); - // RevokeCertificate marks an X.509 certificate as revoked. - rpc RevokeCertificate(TODO) returns (TODO); - // RevokeSSHCertificate marks an SSH certificate as revoked. - rpc RevokeSSHCertificate(TODO) returns (TODO); -} - -message TODO {} - -message LoginRequest { - string authority_id = 1; - string username = 2; - string password = 3; - string pem_certificate_request = 4; -} - -message LoginResponse { - string pem_certificate = 1; - string pem_certificate_chain = 2; -} - -message ConfigurationRequest { - string authority_id = 1; -} - -message ConfigurationResponse { - repeated linkedca.Provisioner provisioners = 1; - repeated linkedca.Admin admins = 2; -} - -message CreateProvisionerRequest { - linkedca.Provisioner.Type type = 1; - string name = 2; - linkedca.ProvisionerDetails details = 3; - linkedca.Claims claims = 4; - linkedca.Template x509_template = 5; - linkedca.Template ssh_template = 6; -} - -message UpdateProvisionerRequest { - string id = 1; - string name = 2; - linkedca.ProvisionerDetails details = 3; - linkedca.Claims claims = 4; - linkedca.Template x509_template = 5; - linkedca.Template ssh_template = 6; -} - -message DeleteProvisionerRequest { - string id = 1; -} - -message CreateAdminRequest { - string subject = 1; - string provisioner_id = 2; - linkedca.Admin.Type type = 3; -} - -message UpdateAdminRequest { - string id = 1; - linkedca.Admin.Type type = 2; -} - -message DeleteAdminRequest { - string id = 1; -} -message CertificateRequest { - string pem_certificate = 1; - string pem_certificate_chain = 2; -} - -message CertificateResponse { - string id = 1; -} - -message SSHCertificateRequest { - string certificate = 1; -} - -message SSHCertificateResponse { - string id = 1; -} diff --git a/vendor/go.step.sm/linkedca/majordomo_grpc.pb.go b/vendor/go.step.sm/linkedca/majordomo_grpc.pb.go deleted file mode 100644 index 990fb731..00000000 --- a/vendor/go.step.sm/linkedca/majordomo_grpc.pb.go +++ /dev/null @@ -1,527 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package linkedca - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// MajordomoClient is the client API for Majordomo service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type MajordomoClient interface { - // Login creates signs a given CSR and returns the certificate that will be - // used for authentication. - Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) - // GetConfiguration returns the full configuration of an authority. - GetConfiguration(ctx context.Context, in *ConfigurationRequest, opts ...grpc.CallOption) (*ConfigurationResponse, error) - // CreateProvisioner adds a new provisioner to the majordomo authority and - // returns the proto representation. - CreateProvisioner(ctx context.Context, in *CreateProvisionerRequest, opts ...grpc.CallOption) (*Provisioner, error) - // UpdateProvisioners updates a previously created provisioner. - UpdateProvisioner(ctx context.Context, in *UpdateProvisionerRequest, opts ...grpc.CallOption) (*Provisioner, error) - // DeleteProvisioner deletes a previously created provisioner. - DeleteProvisioner(ctx context.Context, in *DeleteProvisionerRequest, opts ...grpc.CallOption) (*Provisioner, error) - // CreateAdmin adds a new admin user to the majordomo authority. Admin users - // can add or delete provisioners. - CreateAdmin(ctx context.Context, in *CreateAdminRequest, opts ...grpc.CallOption) (*Admin, error) - // UpdateAdmin updates a previously created admin. - UpdateAdmin(ctx context.Context, in *UpdateAdminRequest, opts ...grpc.CallOption) (*Admin, error) - // DeleteAdmin deletes a previously created admin user - DeleteAdmin(ctx context.Context, in *DeleteAdminRequest, opts ...grpc.CallOption) (*Admin, error) - // PostCertificate sends a signed X.509 certificate to majordomo. - PostCertificate(ctx context.Context, in *CertificateRequest, opts ...grpc.CallOption) (*CertificateResponse, error) - // PostSSHCertificate sends a signed SSH certificate to majordomo. - PostSSHCertificate(ctx context.Context, in *SSHCertificateRequest, opts ...grpc.CallOption) (*SSHCertificateResponse, error) - // RevokeCertificate marks an X.509 certificate as revoked. - RevokeCertificate(ctx context.Context, in *TODO, opts ...grpc.CallOption) (*TODO, error) - // RevokeSSHCertificate marks an SSH certificate as revoked. - RevokeSSHCertificate(ctx context.Context, in *TODO, opts ...grpc.CallOption) (*TODO, error) -} - -type majordomoClient struct { - cc grpc.ClientConnInterface -} - -func NewMajordomoClient(cc grpc.ClientConnInterface) MajordomoClient { - return &majordomoClient{cc} -} - -func (c *majordomoClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) { - out := new(LoginResponse) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/Login", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) GetConfiguration(ctx context.Context, in *ConfigurationRequest, opts ...grpc.CallOption) (*ConfigurationResponse, error) { - out := new(ConfigurationResponse) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/GetConfiguration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) CreateProvisioner(ctx context.Context, in *CreateProvisionerRequest, opts ...grpc.CallOption) (*Provisioner, error) { - out := new(Provisioner) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/CreateProvisioner", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) UpdateProvisioner(ctx context.Context, in *UpdateProvisionerRequest, opts ...grpc.CallOption) (*Provisioner, error) { - out := new(Provisioner) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/UpdateProvisioner", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) DeleteProvisioner(ctx context.Context, in *DeleteProvisionerRequest, opts ...grpc.CallOption) (*Provisioner, error) { - out := new(Provisioner) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/DeleteProvisioner", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) CreateAdmin(ctx context.Context, in *CreateAdminRequest, opts ...grpc.CallOption) (*Admin, error) { - out := new(Admin) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/CreateAdmin", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) UpdateAdmin(ctx context.Context, in *UpdateAdminRequest, opts ...grpc.CallOption) (*Admin, error) { - out := new(Admin) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/UpdateAdmin", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) DeleteAdmin(ctx context.Context, in *DeleteAdminRequest, opts ...grpc.CallOption) (*Admin, error) { - out := new(Admin) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/DeleteAdmin", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) PostCertificate(ctx context.Context, in *CertificateRequest, opts ...grpc.CallOption) (*CertificateResponse, error) { - out := new(CertificateResponse) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/PostCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) PostSSHCertificate(ctx context.Context, in *SSHCertificateRequest, opts ...grpc.CallOption) (*SSHCertificateResponse, error) { - out := new(SSHCertificateResponse) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/PostSSHCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) RevokeCertificate(ctx context.Context, in *TODO, opts ...grpc.CallOption) (*TODO, error) { - out := new(TODO) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/RevokeCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *majordomoClient) RevokeSSHCertificate(ctx context.Context, in *TODO, opts ...grpc.CallOption) (*TODO, error) { - out := new(TODO) - err := c.cc.Invoke(ctx, "/linkedca.Majordomo/RevokeSSHCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MajordomoServer is the server API for Majordomo service. -// All implementations must embed UnimplementedMajordomoServer -// for forward compatibility -type MajordomoServer interface { - // Login creates signs a given CSR and returns the certificate that will be - // used for authentication. - Login(context.Context, *LoginRequest) (*LoginResponse, error) - // GetConfiguration returns the full configuration of an authority. - GetConfiguration(context.Context, *ConfigurationRequest) (*ConfigurationResponse, error) - // CreateProvisioner adds a new provisioner to the majordomo authority and - // returns the proto representation. - CreateProvisioner(context.Context, *CreateProvisionerRequest) (*Provisioner, error) - // UpdateProvisioners updates a previously created provisioner. - UpdateProvisioner(context.Context, *UpdateProvisionerRequest) (*Provisioner, error) - // DeleteProvisioner deletes a previously created provisioner. - DeleteProvisioner(context.Context, *DeleteProvisionerRequest) (*Provisioner, error) - // CreateAdmin adds a new admin user to the majordomo authority. Admin users - // can add or delete provisioners. - CreateAdmin(context.Context, *CreateAdminRequest) (*Admin, error) - // UpdateAdmin updates a previously created admin. - UpdateAdmin(context.Context, *UpdateAdminRequest) (*Admin, error) - // DeleteAdmin deletes a previously created admin user - DeleteAdmin(context.Context, *DeleteAdminRequest) (*Admin, error) - // PostCertificate sends a signed X.509 certificate to majordomo. - PostCertificate(context.Context, *CertificateRequest) (*CertificateResponse, error) - // PostSSHCertificate sends a signed SSH certificate to majordomo. - PostSSHCertificate(context.Context, *SSHCertificateRequest) (*SSHCertificateResponse, error) - // RevokeCertificate marks an X.509 certificate as revoked. - RevokeCertificate(context.Context, *TODO) (*TODO, error) - // RevokeSSHCertificate marks an SSH certificate as revoked. - RevokeSSHCertificate(context.Context, *TODO) (*TODO, error) - mustEmbedUnimplementedMajordomoServer() -} - -// UnimplementedMajordomoServer must be embedded to have forward compatible implementations. -type UnimplementedMajordomoServer struct { -} - -func (UnimplementedMajordomoServer) Login(context.Context, *LoginRequest) (*LoginResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Login not implemented") -} -func (UnimplementedMajordomoServer) GetConfiguration(context.Context, *ConfigurationRequest) (*ConfigurationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetConfiguration not implemented") -} -func (UnimplementedMajordomoServer) CreateProvisioner(context.Context, *CreateProvisionerRequest) (*Provisioner, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateProvisioner not implemented") -} -func (UnimplementedMajordomoServer) UpdateProvisioner(context.Context, *UpdateProvisionerRequest) (*Provisioner, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateProvisioner not implemented") -} -func (UnimplementedMajordomoServer) DeleteProvisioner(context.Context, *DeleteProvisionerRequest) (*Provisioner, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProvisioner not implemented") -} -func (UnimplementedMajordomoServer) CreateAdmin(context.Context, *CreateAdminRequest) (*Admin, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateAdmin not implemented") -} -func (UnimplementedMajordomoServer) UpdateAdmin(context.Context, *UpdateAdminRequest) (*Admin, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateAdmin not implemented") -} -func (UnimplementedMajordomoServer) DeleteAdmin(context.Context, *DeleteAdminRequest) (*Admin, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteAdmin not implemented") -} -func (UnimplementedMajordomoServer) PostCertificate(context.Context, *CertificateRequest) (*CertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PostCertificate not implemented") -} -func (UnimplementedMajordomoServer) PostSSHCertificate(context.Context, *SSHCertificateRequest) (*SSHCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PostSSHCertificate not implemented") -} -func (UnimplementedMajordomoServer) RevokeCertificate(context.Context, *TODO) (*TODO, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") -} -func (UnimplementedMajordomoServer) RevokeSSHCertificate(context.Context, *TODO) (*TODO, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeSSHCertificate not implemented") -} -func (UnimplementedMajordomoServer) mustEmbedUnimplementedMajordomoServer() {} - -// UnsafeMajordomoServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to MajordomoServer will -// result in compilation errors. -type UnsafeMajordomoServer interface { - mustEmbedUnimplementedMajordomoServer() -} - -func RegisterMajordomoServer(s grpc.ServiceRegistrar, srv MajordomoServer) { - s.RegisterService(&Majordomo_ServiceDesc, srv) -} - -func _Majordomo_Login_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LoginRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).Login(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/Login", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).Login(ctx, req.(*LoginRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_GetConfiguration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ConfigurationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).GetConfiguration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/GetConfiguration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).GetConfiguration(ctx, req.(*ConfigurationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_CreateProvisioner_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateProvisionerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).CreateProvisioner(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/CreateProvisioner", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).CreateProvisioner(ctx, req.(*CreateProvisionerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_UpdateProvisioner_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateProvisionerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).UpdateProvisioner(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/UpdateProvisioner", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).UpdateProvisioner(ctx, req.(*UpdateProvisionerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_DeleteProvisioner_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteProvisionerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).DeleteProvisioner(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/DeleteProvisioner", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).DeleteProvisioner(ctx, req.(*DeleteProvisionerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_CreateAdmin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateAdminRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).CreateAdmin(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/CreateAdmin", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).CreateAdmin(ctx, req.(*CreateAdminRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_UpdateAdmin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateAdminRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).UpdateAdmin(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/UpdateAdmin", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).UpdateAdmin(ctx, req.(*UpdateAdminRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_DeleteAdmin_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteAdminRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).DeleteAdmin(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/DeleteAdmin", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).DeleteAdmin(ctx, req.(*DeleteAdminRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_PostCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).PostCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/PostCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).PostCertificate(ctx, req.(*CertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_PostSSHCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SSHCertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).PostSSHCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/PostSSHCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).PostSSHCertificate(ctx, req.(*SSHCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TODO) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).RevokeCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/RevokeCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).RevokeCertificate(ctx, req.(*TODO)) - } - return interceptor(ctx, in, info, handler) -} - -func _Majordomo_RevokeSSHCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(TODO) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MajordomoServer).RevokeSSHCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/linkedca.Majordomo/RevokeSSHCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MajordomoServer).RevokeSSHCertificate(ctx, req.(*TODO)) - } - return interceptor(ctx, in, info, handler) -} - -// Majordomo_ServiceDesc is the grpc.ServiceDesc for Majordomo service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Majordomo_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "linkedca.Majordomo", - HandlerType: (*MajordomoServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Login", - Handler: _Majordomo_Login_Handler, - }, - { - MethodName: "GetConfiguration", - Handler: _Majordomo_GetConfiguration_Handler, - }, - { - MethodName: "CreateProvisioner", - Handler: _Majordomo_CreateProvisioner_Handler, - }, - { - MethodName: "UpdateProvisioner", - Handler: _Majordomo_UpdateProvisioner_Handler, - }, - { - MethodName: "DeleteProvisioner", - Handler: _Majordomo_DeleteProvisioner_Handler, - }, - { - MethodName: "CreateAdmin", - Handler: _Majordomo_CreateAdmin_Handler, - }, - { - MethodName: "UpdateAdmin", - Handler: _Majordomo_UpdateAdmin_Handler, - }, - { - MethodName: "DeleteAdmin", - Handler: _Majordomo_DeleteAdmin_Handler, - }, - { - MethodName: "PostCertificate", - Handler: _Majordomo_PostCertificate_Handler, - }, - { - MethodName: "PostSSHCertificate", - Handler: _Majordomo_PostSSHCertificate_Handler, - }, - { - MethodName: "RevokeCertificate", - Handler: _Majordomo_RevokeCertificate_Handler, - }, - { - MethodName: "RevokeSSHCertificate", - Handler: _Majordomo_RevokeSSHCertificate_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "majordomo.proto", -} diff --git a/vendor/go.step.sm/linkedca/provisioners.pb.go b/vendor/go.step.sm/linkedca/provisioners.pb.go deleted file mode 100644 index b9b2e5d2..00000000 --- a/vendor/go.step.sm/linkedca/provisioners.pb.go +++ /dev/null @@ -1,1915 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.8 -// source: provisioners.proto - -package linkedca - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Provisioner_Type int32 - -const ( - Provisioner_NOOP Provisioner_Type = 0 - Provisioner_JWK Provisioner_Type = 1 - Provisioner_OIDC Provisioner_Type = 2 - Provisioner_GCP Provisioner_Type = 3 - Provisioner_AWS Provisioner_Type = 4 - Provisioner_AZURE Provisioner_Type = 5 - Provisioner_ACME Provisioner_Type = 6 - Provisioner_X5C Provisioner_Type = 7 - Provisioner_K8SSA Provisioner_Type = 8 - Provisioner_SSHPOP Provisioner_Type = 9 - Provisioner_SCEP Provisioner_Type = 10 -) - -// Enum value maps for Provisioner_Type. -var ( - Provisioner_Type_name = map[int32]string{ - 0: "NOOP", - 1: "JWK", - 2: "OIDC", - 3: "GCP", - 4: "AWS", - 5: "AZURE", - 6: "ACME", - 7: "X5C", - 8: "K8SSA", - 9: "SSHPOP", - 10: "SCEP", - } - Provisioner_Type_value = map[string]int32{ - "NOOP": 0, - "JWK": 1, - "OIDC": 2, - "GCP": 3, - "AWS": 4, - "AZURE": 5, - "ACME": 6, - "X5C": 7, - "K8SSA": 8, - "SSHPOP": 9, - "SCEP": 10, - } -) - -func (x Provisioner_Type) Enum() *Provisioner_Type { - p := new(Provisioner_Type) - *p = x - return p -} - -func (x Provisioner_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Provisioner_Type) Descriptor() protoreflect.EnumDescriptor { - return file_provisioners_proto_enumTypes[0].Descriptor() -} - -func (Provisioner_Type) Type() protoreflect.EnumType { - return &file_provisioners_proto_enumTypes[0] -} - -func (x Provisioner_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Provisioner_Type.Descriptor instead. -func (Provisioner_Type) EnumDescriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{0, 0} -} - -type Provisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - AuthorityId string `protobuf:"bytes,2,opt,name=authority_id,json=authorityId,proto3" json:"authority_id,omitempty"` - Type Provisioner_Type `protobuf:"varint,3,opt,name=type,proto3,enum=linkedca.Provisioner_Type" json:"type,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Details *ProvisionerDetails `protobuf:"bytes,5,opt,name=details,proto3" json:"details,omitempty"` - Claims *Claims `protobuf:"bytes,6,opt,name=claims,proto3" json:"claims,omitempty"` - X509Template *Template `protobuf:"bytes,7,opt,name=x509_template,json=x509Template,proto3" json:"x509_template,omitempty"` - SshTemplate *Template `protobuf:"bytes,8,opt,name=ssh_template,json=sshTemplate,proto3" json:"ssh_template,omitempty"` - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - DeletedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"` -} - -func (x *Provisioner) Reset() { - *x = Provisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Provisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Provisioner) ProtoMessage() {} - -func (x *Provisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Provisioner.ProtoReflect.Descriptor instead. -func (*Provisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{0} -} - -func (x *Provisioner) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Provisioner) GetAuthorityId() string { - if x != nil { - return x.AuthorityId - } - return "" -} - -func (x *Provisioner) GetType() Provisioner_Type { - if x != nil { - return x.Type - } - return Provisioner_NOOP -} - -func (x *Provisioner) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Provisioner) GetDetails() *ProvisionerDetails { - if x != nil { - return x.Details - } - return nil -} - -func (x *Provisioner) GetClaims() *Claims { - if x != nil { - return x.Claims - } - return nil -} - -func (x *Provisioner) GetX509Template() *Template { - if x != nil { - return x.X509Template - } - return nil -} - -func (x *Provisioner) GetSshTemplate() *Template { - if x != nil { - return x.SshTemplate - } - return nil -} - -func (x *Provisioner) GetCreatedAt() *timestamppb.Timestamp { - if x != nil { - return x.CreatedAt - } - return nil -} - -func (x *Provisioner) GetDeletedAt() *timestamppb.Timestamp { - if x != nil { - return x.DeletedAt - } - return nil -} - -type ProvisionerDetails struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Data: - // *ProvisionerDetails_JWK - // *ProvisionerDetails_OIDC - // *ProvisionerDetails_GCP - // *ProvisionerDetails_AWS - // *ProvisionerDetails_Azure - // *ProvisionerDetails_ACME - // *ProvisionerDetails_X5C - // *ProvisionerDetails_K8SSA - // *ProvisionerDetails_SSHPOP - // *ProvisionerDetails_SCEP - Data isProvisionerDetails_Data `protobuf_oneof:"data"` -} - -func (x *ProvisionerDetails) Reset() { - *x = ProvisionerDetails{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProvisionerDetails) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProvisionerDetails) ProtoMessage() {} - -func (x *ProvisionerDetails) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProvisionerDetails.ProtoReflect.Descriptor instead. -func (*ProvisionerDetails) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{1} -} - -func (m *ProvisionerDetails) GetData() isProvisionerDetails_Data { - if m != nil { - return m.Data - } - return nil -} - -func (x *ProvisionerDetails) GetJWK() *JWKProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_JWK); ok { - return x.JWK - } - return nil -} - -func (x *ProvisionerDetails) GetOIDC() *OIDCProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_OIDC); ok { - return x.OIDC - } - return nil -} - -func (x *ProvisionerDetails) GetGCP() *GCPProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_GCP); ok { - return x.GCP - } - return nil -} - -func (x *ProvisionerDetails) GetAWS() *AWSProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_AWS); ok { - return x.AWS - } - return nil -} - -func (x *ProvisionerDetails) GetAzure() *AzureProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_Azure); ok { - return x.Azure - } - return nil -} - -func (x *ProvisionerDetails) GetACME() *ACMEProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_ACME); ok { - return x.ACME - } - return nil -} - -func (x *ProvisionerDetails) GetX5C() *X5CProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_X5C); ok { - return x.X5C - } - return nil -} - -func (x *ProvisionerDetails) GetK8SSA() *K8SSAProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_K8SSA); ok { - return x.K8SSA - } - return nil -} - -func (x *ProvisionerDetails) GetSSHPOP() *SSHPOPProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_SSHPOP); ok { - return x.SSHPOP - } - return nil -} - -func (x *ProvisionerDetails) GetSCEP() *SCEPProvisioner { - if x, ok := x.GetData().(*ProvisionerDetails_SCEP); ok { - return x.SCEP - } - return nil -} - -type isProvisionerDetails_Data interface { - isProvisionerDetails_Data() -} - -type ProvisionerDetails_JWK struct { - JWK *JWKProvisioner `protobuf:"bytes,20,opt,name=JWK,proto3,oneof"` -} - -type ProvisionerDetails_OIDC struct { - OIDC *OIDCProvisioner `protobuf:"bytes,21,opt,name=OIDC,proto3,oneof"` -} - -type ProvisionerDetails_GCP struct { - GCP *GCPProvisioner `protobuf:"bytes,22,opt,name=GCP,proto3,oneof"` -} - -type ProvisionerDetails_AWS struct { - AWS *AWSProvisioner `protobuf:"bytes,23,opt,name=AWS,proto3,oneof"` -} - -type ProvisionerDetails_Azure struct { - Azure *AzureProvisioner `protobuf:"bytes,24,opt,name=Azure,proto3,oneof"` -} - -type ProvisionerDetails_ACME struct { - ACME *ACMEProvisioner `protobuf:"bytes,25,opt,name=ACME,proto3,oneof"` -} - -type ProvisionerDetails_X5C struct { - X5C *X5CProvisioner `protobuf:"bytes,26,opt,name=X5C,proto3,oneof"` -} - -type ProvisionerDetails_K8SSA struct { - K8SSA *K8SSAProvisioner `protobuf:"bytes,27,opt,name=K8sSA,proto3,oneof"` -} - -type ProvisionerDetails_SSHPOP struct { - SSHPOP *SSHPOPProvisioner `protobuf:"bytes,28,opt,name=SSHPOP,proto3,oneof"` -} - -type ProvisionerDetails_SCEP struct { - SCEP *SCEPProvisioner `protobuf:"bytes,29,opt,name=SCEP,proto3,oneof"` -} - -func (*ProvisionerDetails_JWK) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_OIDC) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_GCP) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_AWS) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_Azure) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_ACME) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_X5C) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_K8SSA) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_SSHPOP) isProvisionerDetails_Data() {} - -func (*ProvisionerDetails_SCEP) isProvisionerDetails_Data() {} - -type ProvisionerList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Provisioners []*Provisioner `protobuf:"bytes,1,rep,name=provisioners,proto3" json:"provisioners,omitempty"` -} - -func (x *ProvisionerList) Reset() { - *x = ProvisionerList{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProvisionerList) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProvisionerList) ProtoMessage() {} - -func (x *ProvisionerList) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProvisionerList.ProtoReflect.Descriptor instead. -func (*ProvisionerList) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{2} -} - -func (x *ProvisionerList) GetProvisioners() []*Provisioner { - if x != nil { - return x.Provisioners - } - return nil -} - -type Claims struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - X509 *X509Claims `protobuf:"bytes,1,opt,name=x509,proto3" json:"x509,omitempty"` - Ssh *SSHClaims `protobuf:"bytes,2,opt,name=ssh,proto3" json:"ssh,omitempty"` - DisableRenewal bool `protobuf:"varint,3,opt,name=disable_renewal,json=disableRenewal,proto3" json:"disable_renewal,omitempty"` -} - -func (x *Claims) Reset() { - *x = Claims{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Claims) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Claims) ProtoMessage() {} - -func (x *Claims) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Claims.ProtoReflect.Descriptor instead. -func (*Claims) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{3} -} - -func (x *Claims) GetX509() *X509Claims { - if x != nil { - return x.X509 - } - return nil -} - -func (x *Claims) GetSsh() *SSHClaims { - if x != nil { - return x.Ssh - } - return nil -} - -func (x *Claims) GetDisableRenewal() bool { - if x != nil { - return x.DisableRenewal - } - return false -} - -type X509Claims struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - Durations *Durations `protobuf:"bytes,2,opt,name=durations,proto3" json:"durations,omitempty"` -} - -func (x *X509Claims) Reset() { - *x = X509Claims{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *X509Claims) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*X509Claims) ProtoMessage() {} - -func (x *X509Claims) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use X509Claims.ProtoReflect.Descriptor instead. -func (*X509Claims) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{4} -} - -func (x *X509Claims) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *X509Claims) GetDurations() *Durations { - if x != nil { - return x.Durations - } - return nil -} - -type SSHClaims struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - UserDurations *Durations `protobuf:"bytes,2,opt,name=user_durations,json=userDurations,proto3" json:"user_durations,omitempty"` - HostDurations *Durations `protobuf:"bytes,3,opt,name=host_durations,json=hostDurations,proto3" json:"host_durations,omitempty"` -} - -func (x *SSHClaims) Reset() { - *x = SSHClaims{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SSHClaims) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SSHClaims) ProtoMessage() {} - -func (x *SSHClaims) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SSHClaims.ProtoReflect.Descriptor instead. -func (*SSHClaims) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{5} -} - -func (x *SSHClaims) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *SSHClaims) GetUserDurations() *Durations { - if x != nil { - return x.UserDurations - } - return nil -} - -func (x *SSHClaims) GetHostDurations() *Durations { - if x != nil { - return x.HostDurations - } - return nil -} - -type Durations struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Default string `protobuf:"bytes,1,opt,name=default,proto3" json:"default,omitempty"` - Min string `protobuf:"bytes,2,opt,name=min,proto3" json:"min,omitempty"` - Max string `protobuf:"bytes,3,opt,name=max,proto3" json:"max,omitempty"` -} - -func (x *Durations) Reset() { - *x = Durations{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Durations) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Durations) ProtoMessage() {} - -func (x *Durations) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Durations.ProtoReflect.Descriptor instead. -func (*Durations) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{6} -} - -func (x *Durations) GetDefault() string { - if x != nil { - return x.Default - } - return "" -} - -func (x *Durations) GetMin() string { - if x != nil { - return x.Min - } - return "" -} - -func (x *Durations) GetMax() string { - if x != nil { - return x.Max - } - return "" -} - -type Template struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Template []byte `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *Template) Reset() { - *x = Template{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Template) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Template) ProtoMessage() {} - -func (x *Template) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Template.ProtoReflect.Descriptor instead. -func (*Template) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{7} -} - -func (x *Template) GetTemplate() []byte { - if x != nil { - return x.Template - } - return nil -} - -func (x *Template) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -type JWKProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` - EncryptedPrivateKey []byte `protobuf:"bytes,2,opt,name=encrypted_private_key,json=encryptedPrivateKey,proto3" json:"encrypted_private_key,omitempty"` -} - -func (x *JWKProvisioner) Reset() { - *x = JWKProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *JWKProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*JWKProvisioner) ProtoMessage() {} - -func (x *JWKProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use JWKProvisioner.ProtoReflect.Descriptor instead. -func (*JWKProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{8} -} - -func (x *JWKProvisioner) GetPublicKey() []byte { - if x != nil { - return x.PublicKey - } - return nil -} - -func (x *JWKProvisioner) GetEncryptedPrivateKey() []byte { - if x != nil { - return x.EncryptedPrivateKey - } - return nil -} - -type OIDCProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - ClientSecret string `protobuf:"bytes,2,opt,name=client_secret,json=clientSecret,proto3" json:"client_secret,omitempty"` - ConfigurationEndpoint string `protobuf:"bytes,3,opt,name=configuration_endpoint,json=configurationEndpoint,proto3" json:"configuration_endpoint,omitempty"` - Admins []string `protobuf:"bytes,4,rep,name=admins,proto3" json:"admins,omitempty"` - Domains []string `protobuf:"bytes,5,rep,name=domains,proto3" json:"domains,omitempty"` - Groups []string `protobuf:"bytes,6,rep,name=groups,proto3" json:"groups,omitempty"` - ListenAddress string `protobuf:"bytes,7,opt,name=listen_address,json=listenAddress,proto3" json:"listen_address,omitempty"` - TenantId string `protobuf:"bytes,8,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` -} - -func (x *OIDCProvisioner) Reset() { - *x = OIDCProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OIDCProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OIDCProvisioner) ProtoMessage() {} - -func (x *OIDCProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OIDCProvisioner.ProtoReflect.Descriptor instead. -func (*OIDCProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{9} -} - -func (x *OIDCProvisioner) GetClientId() string { - if x != nil { - return x.ClientId - } - return "" -} - -func (x *OIDCProvisioner) GetClientSecret() string { - if x != nil { - return x.ClientSecret - } - return "" -} - -func (x *OIDCProvisioner) GetConfigurationEndpoint() string { - if x != nil { - return x.ConfigurationEndpoint - } - return "" -} - -func (x *OIDCProvisioner) GetAdmins() []string { - if x != nil { - return x.Admins - } - return nil -} - -func (x *OIDCProvisioner) GetDomains() []string { - if x != nil { - return x.Domains - } - return nil -} - -func (x *OIDCProvisioner) GetGroups() []string { - if x != nil { - return x.Groups - } - return nil -} - -func (x *OIDCProvisioner) GetListenAddress() string { - if x != nil { - return x.ListenAddress - } - return "" -} - -func (x *OIDCProvisioner) GetTenantId() string { - if x != nil { - return x.TenantId - } - return "" -} - -type GCPProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceAccounts []string `protobuf:"bytes,1,rep,name=service_accounts,json=serviceAccounts,proto3" json:"service_accounts,omitempty"` - ProjectIds []string `protobuf:"bytes,2,rep,name=project_ids,json=projectIds,proto3" json:"project_ids,omitempty"` - DisableCustomSans bool `protobuf:"varint,3,opt,name=disable_custom_sans,json=disableCustomSans,proto3" json:"disable_custom_sans,omitempty"` - DisableTrustOnFirstUse bool `protobuf:"varint,4,opt,name=disable_trust_on_first_use,json=disableTrustOnFirstUse,proto3" json:"disable_trust_on_first_use,omitempty"` - InstanceAge string `protobuf:"bytes,5,opt,name=instance_age,json=instanceAge,proto3" json:"instance_age,omitempty"` -} - -func (x *GCPProvisioner) Reset() { - *x = GCPProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GCPProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GCPProvisioner) ProtoMessage() {} - -func (x *GCPProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GCPProvisioner.ProtoReflect.Descriptor instead. -func (*GCPProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{10} -} - -func (x *GCPProvisioner) GetServiceAccounts() []string { - if x != nil { - return x.ServiceAccounts - } - return nil -} - -func (x *GCPProvisioner) GetProjectIds() []string { - if x != nil { - return x.ProjectIds - } - return nil -} - -func (x *GCPProvisioner) GetDisableCustomSans() bool { - if x != nil { - return x.DisableCustomSans - } - return false -} - -func (x *GCPProvisioner) GetDisableTrustOnFirstUse() bool { - if x != nil { - return x.DisableTrustOnFirstUse - } - return false -} - -func (x *GCPProvisioner) GetInstanceAge() string { - if x != nil { - return x.InstanceAge - } - return "" -} - -type AWSProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Accounts []string `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts,omitempty"` - DisableCustomSans bool `protobuf:"varint,2,opt,name=disable_custom_sans,json=disableCustomSans,proto3" json:"disable_custom_sans,omitempty"` - DisableTrustOnFirstUse bool `protobuf:"varint,3,opt,name=disable_trust_on_first_use,json=disableTrustOnFirstUse,proto3" json:"disable_trust_on_first_use,omitempty"` - InstanceAge string `protobuf:"bytes,4,opt,name=instance_age,json=instanceAge,proto3" json:"instance_age,omitempty"` -} - -func (x *AWSProvisioner) Reset() { - *x = AWSProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AWSProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AWSProvisioner) ProtoMessage() {} - -func (x *AWSProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AWSProvisioner.ProtoReflect.Descriptor instead. -func (*AWSProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{11} -} - -func (x *AWSProvisioner) GetAccounts() []string { - if x != nil { - return x.Accounts - } - return nil -} - -func (x *AWSProvisioner) GetDisableCustomSans() bool { - if x != nil { - return x.DisableCustomSans - } - return false -} - -func (x *AWSProvisioner) GetDisableTrustOnFirstUse() bool { - if x != nil { - return x.DisableTrustOnFirstUse - } - return false -} - -func (x *AWSProvisioner) GetInstanceAge() string { - if x != nil { - return x.InstanceAge - } - return "" -} - -type AzureProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TenantId string `protobuf:"bytes,1,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` - ResourceGroups []string `protobuf:"bytes,2,rep,name=resource_groups,json=resourceGroups,proto3" json:"resource_groups,omitempty"` - Audience string `protobuf:"bytes,3,opt,name=audience,proto3" json:"audience,omitempty"` - DisableCustomSans bool `protobuf:"varint,4,opt,name=disable_custom_sans,json=disableCustomSans,proto3" json:"disable_custom_sans,omitempty"` - DisableTrustOnFirstUse bool `protobuf:"varint,5,opt,name=disable_trust_on_first_use,json=disableTrustOnFirstUse,proto3" json:"disable_trust_on_first_use,omitempty"` -} - -func (x *AzureProvisioner) Reset() { - *x = AzureProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AzureProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AzureProvisioner) ProtoMessage() {} - -func (x *AzureProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AzureProvisioner.ProtoReflect.Descriptor instead. -func (*AzureProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{12} -} - -func (x *AzureProvisioner) GetTenantId() string { - if x != nil { - return x.TenantId - } - return "" -} - -func (x *AzureProvisioner) GetResourceGroups() []string { - if x != nil { - return x.ResourceGroups - } - return nil -} - -func (x *AzureProvisioner) GetAudience() string { - if x != nil { - return x.Audience - } - return "" -} - -func (x *AzureProvisioner) GetDisableCustomSans() bool { - if x != nil { - return x.DisableCustomSans - } - return false -} - -func (x *AzureProvisioner) GetDisableTrustOnFirstUse() bool { - if x != nil { - return x.DisableTrustOnFirstUse - } - return false -} - -type ACMEProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ForceCn bool `protobuf:"varint,1,opt,name=force_cn,json=forceCn,proto3" json:"force_cn,omitempty"` -} - -func (x *ACMEProvisioner) Reset() { - *x = ACMEProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ACMEProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ACMEProvisioner) ProtoMessage() {} - -func (x *ACMEProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ACMEProvisioner.ProtoReflect.Descriptor instead. -func (*ACMEProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{13} -} - -func (x *ACMEProvisioner) GetForceCn() bool { - if x != nil { - return x.ForceCn - } - return false -} - -type X5CProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Roots [][]byte `protobuf:"bytes,1,rep,name=roots,proto3" json:"roots,omitempty"` -} - -func (x *X5CProvisioner) Reset() { - *x = X5CProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *X5CProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*X5CProvisioner) ProtoMessage() {} - -func (x *X5CProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use X5CProvisioner.ProtoReflect.Descriptor instead. -func (*X5CProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{14} -} - -func (x *X5CProvisioner) GetRoots() [][]byte { - if x != nil { - return x.Roots - } - return nil -} - -type K8SSAProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PublicKeys [][]byte `protobuf:"bytes,1,rep,name=public_keys,json=publicKeys,proto3" json:"public_keys,omitempty"` -} - -func (x *K8SSAProvisioner) Reset() { - *x = K8SSAProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *K8SSAProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*K8SSAProvisioner) ProtoMessage() {} - -func (x *K8SSAProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use K8SSAProvisioner.ProtoReflect.Descriptor instead. -func (*K8SSAProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{15} -} - -func (x *K8SSAProvisioner) GetPublicKeys() [][]byte { - if x != nil { - return x.PublicKeys - } - return nil -} - -type SSHPOPProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *SSHPOPProvisioner) Reset() { - *x = SSHPOPProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SSHPOPProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SSHPOPProvisioner) ProtoMessage() {} - -func (x *SSHPOPProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SSHPOPProvisioner.ProtoReflect.Descriptor instead. -func (*SSHPOPProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{16} -} - -type SCEPProvisioner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ForceCn bool `protobuf:"varint,1,opt,name=force_cn,json=forceCn,proto3" json:"force_cn,omitempty"` - Challenge string `protobuf:"bytes,2,opt,name=challenge,proto3" json:"challenge,omitempty"` - Capabilities []string `protobuf:"bytes,3,rep,name=capabilities,proto3" json:"capabilities,omitempty"` - MinimumPublicKeyLength int32 `protobuf:"varint,4,opt,name=minimum_public_key_length,json=minimumPublicKeyLength,proto3" json:"minimum_public_key_length,omitempty"` -} - -func (x *SCEPProvisioner) Reset() { - *x = SCEPProvisioner{} - if protoimpl.UnsafeEnabled { - mi := &file_provisioners_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SCEPProvisioner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SCEPProvisioner) ProtoMessage() {} - -func (x *SCEPProvisioner) ProtoReflect() protoreflect.Message { - mi := &file_provisioners_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SCEPProvisioner.ProtoReflect.Descriptor instead. -func (*SCEPProvisioner) Descriptor() ([]byte, []int) { - return file_provisioners_proto_rawDescGZIP(), []int{17} -} - -func (x *SCEPProvisioner) GetForceCn() bool { - if x != nil { - return x.ForceCn - } - return false -} - -func (x *SCEPProvisioner) GetChallenge() string { - if x != nil { - return x.Challenge - } - return "" -} - -func (x *SCEPProvisioner) GetCapabilities() []string { - if x != nil { - return x.Capabilities - } - return nil -} - -func (x *SCEPProvisioner) GetMinimumPublicKeyLength() int32 { - if x != nil { - return x.MinimumPublicKeyLength - } - return 0 -} - -var File_provisioners_proto protoreflect.FileDescriptor - -var file_provisioners_proto_rawDesc = []byte{ - 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xc2, 0x04, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x1a, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, - 0x63, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x28, - 0x0a, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, - 0x52, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x0d, 0x78, 0x35, 0x30, 0x39, - 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x52, 0x0c, 0x78, 0x35, 0x30, 0x39, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x73, 0x68, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, - 0x63, 0x61, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x73, 0x68, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x74, - 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4f, 0x50, 0x10, 0x00, - 0x12, 0x07, 0x0a, 0x03, 0x4a, 0x57, 0x4b, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x49, 0x44, - 0x43, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x43, 0x50, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, - 0x41, 0x57, 0x53, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x5a, 0x55, 0x52, 0x45, 0x10, 0x05, - 0x12, 0x08, 0x0a, 0x04, 0x41, 0x43, 0x4d, 0x45, 0x10, 0x06, 0x12, 0x07, 0x0a, 0x03, 0x58, 0x35, - 0x43, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x4b, 0x38, 0x53, 0x53, 0x41, 0x10, 0x08, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x53, 0x48, 0x50, 0x4f, 0x50, 0x10, 0x09, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x43, - 0x45, 0x50, 0x10, 0x0a, 0x22, 0x86, 0x04, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x03, 0x4a, - 0x57, 0x4b, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x4a, 0x57, 0x4b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x03, 0x4a, 0x57, 0x4b, 0x12, 0x2f, 0x0a, 0x04, 0x4f, 0x49, 0x44, - 0x43, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, - 0x63, 0x61, 0x2e, 0x4f, 0x49, 0x44, 0x43, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x04, 0x4f, 0x49, 0x44, 0x43, 0x12, 0x2c, 0x0a, 0x03, 0x47, 0x43, - 0x50, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, - 0x63, 0x61, 0x2e, 0x47, 0x43, 0x50, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x03, 0x47, 0x43, 0x50, 0x12, 0x2c, 0x0a, 0x03, 0x41, 0x57, 0x53, 0x18, - 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, - 0x2e, 0x41, 0x57, 0x53, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x48, - 0x00, 0x52, 0x03, 0x41, 0x57, 0x53, 0x12, 0x32, 0x0a, 0x05, 0x41, 0x7a, 0x75, 0x72, 0x65, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, - 0x2e, 0x41, 0x7a, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x05, 0x41, 0x7a, 0x75, 0x72, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x41, 0x43, - 0x4d, 0x45, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x41, 0x43, 0x4d, 0x45, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x04, 0x41, 0x43, 0x4d, 0x45, 0x12, 0x2c, 0x0a, 0x03, 0x58, - 0x35, 0x43, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x58, 0x35, 0x43, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x03, 0x58, 0x35, 0x43, 0x12, 0x32, 0x0a, 0x05, 0x4b, 0x38, 0x73, - 0x53, 0x41, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, - 0x64, 0x63, 0x61, 0x2e, 0x4b, 0x38, 0x73, 0x53, 0x41, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x05, 0x4b, 0x38, 0x73, 0x53, 0x41, 0x12, 0x35, 0x0a, - 0x06, 0x53, 0x53, 0x48, 0x50, 0x4f, 0x50, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x53, 0x53, 0x48, 0x50, 0x4f, 0x50, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x53, 0x53, - 0x48, 0x50, 0x4f, 0x50, 0x12, 0x2f, 0x0a, 0x04, 0x53, 0x43, 0x45, 0x50, 0x18, 0x1d, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x53, 0x43, - 0x45, 0x50, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x04, 0x53, 0x43, 0x45, 0x50, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x4c, 0x0a, - 0x0f, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, - 0x12, 0x39, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, - 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x52, 0x0c, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x06, - 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x12, 0x28, 0x0a, 0x04, 0x78, 0x35, 0x30, 0x39, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, - 0x58, 0x35, 0x30, 0x39, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x52, 0x04, 0x78, 0x35, 0x30, 0x39, - 0x12, 0x25, 0x0a, 0x03, 0x73, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x53, 0x53, 0x48, 0x43, 0x6c, 0x61, 0x69, - 0x6d, 0x73, 0x52, 0x03, 0x73, 0x73, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x6c, - 0x22, 0x59, 0x0a, 0x0a, 0x58, 0x35, 0x30, 0x39, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x09, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6c, 0x69, - 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x09, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x09, - 0x53, 0x53, 0x48, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6c, 0x69, - 0x6e, 0x6b, 0x65, 0x64, 0x63, 0x61, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x0d, 0x75, 0x73, 0x65, 0x72, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x3a, 0x0a, 0x0e, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6c, 0x69, 0x6e, 0x6b, 0x65, 0x64, - 0x63, 0x61, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x68, 0x6f, - 0x73, 0x74, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x49, 0x0a, 0x09, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0x3a, 0x0a, 0x08, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x63, 0x0a, 0x0e, 0x4a, 0x57, 0x4b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, - 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x98, 0x02, 0x0a, 0x0f, 0x4f, 0x49, 0x44, 0x43, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x35, 0x0a, - 0x16, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, - 0x49, 0x64, 0x22, 0xeb, 0x01, 0x0a, 0x0e, 0x47, 0x43, 0x50, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, - 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x61, 0x6e, - 0x73, 0x12, 0x3a, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x75, - 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, - 0x75, 0x73, 0x74, 0x4f, 0x6e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x55, 0x73, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x67, 0x65, - 0x22, 0xbb, 0x01, 0x0a, 0x0e, 0x41, 0x57, 0x53, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, - 0x2e, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x73, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x61, 0x6e, 0x73, 0x12, - 0x3a, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, - 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x16, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x72, 0x75, 0x73, - 0x74, 0x4f, 0x6e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x55, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x67, 0x65, 0x22, 0xe0, - 0x01, 0x0a, 0x10, 0x41, 0x7a, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, - 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, - 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, - 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x61, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x11, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x53, 0x61, 0x6e, 0x73, 0x12, 0x3a, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, - 0x75, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x64, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x54, 0x72, 0x75, 0x73, 0x74, 0x4f, 0x6e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x55, 0x73, - 0x65, 0x22, 0x2c, 0x0a, 0x0f, 0x41, 0x43, 0x4d, 0x45, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x6e, 0x22, - 0x26, 0x0a, 0x0e, 0x58, 0x35, 0x43, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, - 0x72, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x22, 0x33, 0x0a, 0x10, 0x4b, 0x38, 0x73, 0x53, 0x41, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x13, 0x0a, 0x11, - 0x53, 0x53, 0x48, 0x50, 0x4f, 0x50, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, - 0x72, 0x22, 0xa9, 0x01, 0x0a, 0x0f, 0x53, 0x43, 0x45, 0x50, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x68, 0x61, 0x6c, 0x6c, 0x65, 0x6e, 0x67, 0x65, 0x12, 0x22, - 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x50, 0x75, - 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x15, 0x5a, - 0x13, 0x67, 0x6f, 0x2e, 0x73, 0x74, 0x65, 0x70, 0x2e, 0x73, 0x6d, 0x2f, 0x6c, 0x69, 0x6e, 0x6b, - 0x65, 0x64, 0x63, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_provisioners_proto_rawDescOnce sync.Once - file_provisioners_proto_rawDescData = file_provisioners_proto_rawDesc -) - -func file_provisioners_proto_rawDescGZIP() []byte { - file_provisioners_proto_rawDescOnce.Do(func() { - file_provisioners_proto_rawDescData = protoimpl.X.CompressGZIP(file_provisioners_proto_rawDescData) - }) - return file_provisioners_proto_rawDescData -} - -var file_provisioners_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_provisioners_proto_msgTypes = make([]protoimpl.MessageInfo, 18) -var file_provisioners_proto_goTypes = []interface{}{ - (Provisioner_Type)(0), // 0: linkedca.Provisioner.Type - (*Provisioner)(nil), // 1: linkedca.Provisioner - (*ProvisionerDetails)(nil), // 2: linkedca.ProvisionerDetails - (*ProvisionerList)(nil), // 3: linkedca.ProvisionerList - (*Claims)(nil), // 4: linkedca.Claims - (*X509Claims)(nil), // 5: linkedca.X509Claims - (*SSHClaims)(nil), // 6: linkedca.SSHClaims - (*Durations)(nil), // 7: linkedca.Durations - (*Template)(nil), // 8: linkedca.Template - (*JWKProvisioner)(nil), // 9: linkedca.JWKProvisioner - (*OIDCProvisioner)(nil), // 10: linkedca.OIDCProvisioner - (*GCPProvisioner)(nil), // 11: linkedca.GCPProvisioner - (*AWSProvisioner)(nil), // 12: linkedca.AWSProvisioner - (*AzureProvisioner)(nil), // 13: linkedca.AzureProvisioner - (*ACMEProvisioner)(nil), // 14: linkedca.ACMEProvisioner - (*X5CProvisioner)(nil), // 15: linkedca.X5CProvisioner - (*K8SSAProvisioner)(nil), // 16: linkedca.K8sSAProvisioner - (*SSHPOPProvisioner)(nil), // 17: linkedca.SSHPOPProvisioner - (*SCEPProvisioner)(nil), // 18: linkedca.SCEPProvisioner - (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp -} -var file_provisioners_proto_depIdxs = []int32{ - 0, // 0: linkedca.Provisioner.type:type_name -> linkedca.Provisioner.Type - 2, // 1: linkedca.Provisioner.details:type_name -> linkedca.ProvisionerDetails - 4, // 2: linkedca.Provisioner.claims:type_name -> linkedca.Claims - 8, // 3: linkedca.Provisioner.x509_template:type_name -> linkedca.Template - 8, // 4: linkedca.Provisioner.ssh_template:type_name -> linkedca.Template - 19, // 5: linkedca.Provisioner.created_at:type_name -> google.protobuf.Timestamp - 19, // 6: linkedca.Provisioner.deleted_at:type_name -> google.protobuf.Timestamp - 9, // 7: linkedca.ProvisionerDetails.JWK:type_name -> linkedca.JWKProvisioner - 10, // 8: linkedca.ProvisionerDetails.OIDC:type_name -> linkedca.OIDCProvisioner - 11, // 9: linkedca.ProvisionerDetails.GCP:type_name -> linkedca.GCPProvisioner - 12, // 10: linkedca.ProvisionerDetails.AWS:type_name -> linkedca.AWSProvisioner - 13, // 11: linkedca.ProvisionerDetails.Azure:type_name -> linkedca.AzureProvisioner - 14, // 12: linkedca.ProvisionerDetails.ACME:type_name -> linkedca.ACMEProvisioner - 15, // 13: linkedca.ProvisionerDetails.X5C:type_name -> linkedca.X5CProvisioner - 16, // 14: linkedca.ProvisionerDetails.K8sSA:type_name -> linkedca.K8sSAProvisioner - 17, // 15: linkedca.ProvisionerDetails.SSHPOP:type_name -> linkedca.SSHPOPProvisioner - 18, // 16: linkedca.ProvisionerDetails.SCEP:type_name -> linkedca.SCEPProvisioner - 1, // 17: linkedca.ProvisionerList.provisioners:type_name -> linkedca.Provisioner - 5, // 18: linkedca.Claims.x509:type_name -> linkedca.X509Claims - 6, // 19: linkedca.Claims.ssh:type_name -> linkedca.SSHClaims - 7, // 20: linkedca.X509Claims.durations:type_name -> linkedca.Durations - 7, // 21: linkedca.SSHClaims.user_durations:type_name -> linkedca.Durations - 7, // 22: linkedca.SSHClaims.host_durations:type_name -> linkedca.Durations - 23, // [23:23] is the sub-list for method output_type - 23, // [23:23] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name -} - -func init() { file_provisioners_proto_init() } -func file_provisioners_proto_init() { - if File_provisioners_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_provisioners_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Provisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProvisionerDetails); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProvisionerList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Claims); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*X509Claims); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHClaims); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Durations); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Template); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JWKProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OIDCProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GCPProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AWSProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AzureProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ACMEProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*X5CProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*K8SSAProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SSHPOPProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_provisioners_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SCEPProvisioner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_provisioners_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*ProvisionerDetails_JWK)(nil), - (*ProvisionerDetails_OIDC)(nil), - (*ProvisionerDetails_GCP)(nil), - (*ProvisionerDetails_AWS)(nil), - (*ProvisionerDetails_Azure)(nil), - (*ProvisionerDetails_ACME)(nil), - (*ProvisionerDetails_X5C)(nil), - (*ProvisionerDetails_K8SSA)(nil), - (*ProvisionerDetails_SSHPOP)(nil), - (*ProvisionerDetails_SCEP)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_provisioners_proto_rawDesc, - NumEnums: 1, - NumMessages: 18, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_provisioners_proto_goTypes, - DependencyIndexes: file_provisioners_proto_depIdxs, - EnumInfos: file_provisioners_proto_enumTypes, - MessageInfos: file_provisioners_proto_msgTypes, - }.Build() - File_provisioners_proto = out.File - file_provisioners_proto_rawDesc = nil - file_provisioners_proto_goTypes = nil - file_provisioners_proto_depIdxs = nil -} diff --git a/vendor/go.step.sm/linkedca/provisioners.proto b/vendor/go.step.sm/linkedca/provisioners.proto deleted file mode 100644 index 9385c2ca..00000000 --- a/vendor/go.step.sm/linkedca/provisioners.proto +++ /dev/null @@ -1,140 +0,0 @@ -syntax = "proto3"; - -package linkedca; - -option go_package = "go.step.sm/linkedca"; - -import "google/protobuf/timestamp.proto"; - -message Provisioner { - enum Type { - NOOP = 0; - JWK = 1; - OIDC = 2; - GCP = 3; - AWS = 4; - AZURE = 5; - ACME = 6; - X5C = 7; - K8SSA = 8; - SSHPOP = 9; - SCEP = 10; - } - string id = 1; - string authority_id = 2; - Type type = 3; - string name = 4; - ProvisionerDetails details = 5; - Claims claims = 6; - Template x509_template = 7; - Template ssh_template = 8; - google.protobuf.Timestamp created_at = 9; - google.protobuf.Timestamp deleted_at = 10; -} - -message ProvisionerDetails { - oneof data { - JWKProvisioner JWK = 20; - OIDCProvisioner OIDC = 21; - GCPProvisioner GCP = 22; - AWSProvisioner AWS = 23; - AzureProvisioner Azure = 24; - ACMEProvisioner ACME = 25; - X5CProvisioner X5C = 26; - K8sSAProvisioner K8sSA = 27; - SSHPOPProvisioner SSHPOP = 28; - SCEPProvisioner SCEP = 29; - } -} - -message ProvisionerList { - repeated Provisioner provisioners = 1; -} - -message Claims { - X509Claims x509 = 1; - SSHClaims ssh = 2; - bool disable_renewal = 3; -} - -message X509Claims { - bool enabled = 1; - Durations durations = 2; -} - -message SSHClaims { - bool enabled = 1; - Durations user_durations = 2; - Durations host_durations = 3; -} - -message Durations { - string default = 1; - string min = 2; - string max = 3; -} - -message Template { - bytes template = 1; - bytes data = 2; -} - -message JWKProvisioner { - bytes public_key = 1; - bytes encrypted_private_key = 2; -} - -message OIDCProvisioner { - string client_id = 1; - string client_secret = 2; - string configuration_endpoint = 3; - repeated string admins = 4; - repeated string domains = 5; - repeated string groups = 6; - string listen_address = 7; - string tenant_id = 8; -} - -message GCPProvisioner { - repeated string service_accounts = 1; - repeated string project_ids = 2; - bool disable_custom_sans = 3; - bool disable_trust_on_first_use = 4; - string instance_age = 5; -} - -message AWSProvisioner { - repeated string accounts = 1; - bool disable_custom_sans = 2; - bool disable_trust_on_first_use = 3; - string instance_age = 4; -} - -message AzureProvisioner { - string tenant_id = 1; - repeated string resource_groups = 2; - string audience = 3; - bool disable_custom_sans = 4; - bool disable_trust_on_first_use = 5; -} - -message ACMEProvisioner { - bool force_cn = 1; -} - -message X5CProvisioner { - repeated bytes roots = 1; -} - -message K8sSAProvisioner { - repeated bytes public_keys = 1; -} - -message SSHPOPProvisioner {} - -message SCEPProvisioner { - bool force_cn = 1; - string challenge = 2; - repeated string capabilities = 3; - int32 minimum_public_key_length = 4; -} \ No newline at end of file diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 571116cc..00000000 --- a/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,19 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - -# Also update COVER_IGNORE_PKGS in the Makefile. -ignore: - - /internal/gen-atomicint/ - - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index c3fa2538..00000000 --- a/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -/bin -.DS_Store -/vendor -cover.html -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 13d0a4f2..00000000 --- a/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -env: - global: - - GO111MODULE=on - -matrix: - include: - - go: oldstable - - go: stable - env: LINT=1 - -cache: - directories: - - vendor - -before_install: - - go version - -script: - - test -z "$LINT" || make lint - - make cover - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md deleted file mode 100644 index 24c0274d..00000000 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ /dev/null @@ -1,76 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [1.7.0] - 2020-09-14 -### Added -- Support JSON serialization and deserialization of primitive atomic types. -- Support Text marshalling and unmarshalling for string atomics. - -### Changed -- Disallow incorrect comparison of atomic values in a non-atomic way. - -### Removed -- Remove dependency on `golang.org/x/{lint, tools}`. - -## [1.6.0] - 2020-02-24 -### Changed -- Drop library dependency on `golang.org/x/{lint, tools}`. - -## [1.5.1] - 2019-11-19 -- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together - causing `CAS` to fail even though the old value matches. - -## [1.5.0] - 2019-10-29 -### Changed -- With Go modules, only the `go.uber.org/atomic` import path is supported now. - If you need to use the old import path, please add a `replace` directive to - your `go.mod`. - -## [1.4.0] - 2019-05-01 -### Added - - Add `atomic.Error` type for atomic operations on `error` values. - -## [1.3.2] - 2018-05-02 -### Added -- Add `atomic.Duration` type for atomic operations on `time.Duration` values. - -## [1.3.1] - 2017-11-14 -### Fixed -- Revert optimization for `atomic.String.Store("")` which caused data races. - -## [1.3.0] - 2017-11-13 -### Added -- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. - -### Changed -- Optimize `atomic.String.Store("")` by avoiding an allocation. - -## [1.2.0] - 2017-04-12 -### Added -- Shadow `atomic.Value` from `sync/atomic`. - -## [1.1.0] - 2017-03-10 -### Added -- Add atomic `Float64` type. - -### Changed -- Support new `go.uber.org/atomic` import path. - -## [1.0.0] - 2016-07-18 - -- Initial release. - -[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 -[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt deleted file mode 100644 index 8765c9fb..00000000 --- a/vendor/go.uber.org/atomic/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index 1b1376d4..00000000 --- a/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,78 +0,0 @@ -# Directory to place `go install`ed binaries into. -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -GEN_ATOMICINT = $(GOBIN)/gen-atomicint -GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper -STATICCHECK = $(GOBIN)/staticcheck - -GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) - -# Also update ignore section in .codecov.yml. -COVER_IGNORE_PKGS = \ - go.uber.org/atomic/internal/gen-atomicint \ - go.uber.org/atomic/internal/gen-atomicwrapper - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) - go build -o $@ ./internal/gen-atomicwrapper - -$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) - go build -o $@ ./internal/gen-atomicint - -.PHONY: golint -golint: $(GOLINT) - $(GOLINT) ./... - -.PHONY: staticcheck -staticcheck: $(STATICCHECK) - $(STATICCHECK) ./... - -.PHONY: lint -lint: gofmt golint staticcheck generatenodirty - -# comma separated list of packages to consider for code coverage. -COVER_PKG = $(shell \ - go list -find ./... | \ - grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ - paste -sd, -) - -.PHONY: cover -cover: - go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: generate -generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) - go generate ./... - -.PHONY: generatenodirty -generatenodirty: - @[ -z "$$(git status --porcelain)" ] || ( \ - echo "Working tree is dirty. Commit your changes first."; \ - exit 1 ) - @make generate - @status=$$(git status --porcelain); \ - [ -z "$$status" ] || ( \ - echo "Working tree is dirty after `make generate`:"; \ - echo "$$status"; \ - echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index ade0c20f..00000000 --- a/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation - -```shell -$ go get -u go.uber.org/atomic@v1 -``` - -### Legacy Import Path - -As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way -of using this package. If you are using Go modules, this package will fail to -compile with the legacy import path path `github.com/uber-go/atomic`. - -We recommend migrating your code to the new import path but if you're unable -to do so, or if your dependencies are still using the old import path, you -will have to add a `replace` directive to your `go.mod` file downgrading the -legacy import path to an older version. - -``` -replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 -``` - -You can do so automatically by running the following command. - -```shell -$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 -``` - -## Usage - -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status - -Stable. - ---- - -Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.com/uber-go/atomic -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go deleted file mode 100644 index 9cf1914b..00000000 --- a/vendor/go.uber.org/atomic/bool.go +++ /dev/null @@ -1,81 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" -) - -// Bool is an atomic type-safe wrapper for bool values. -type Bool struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroBool bool - -// NewBool creates a new Bool. -func NewBool(v bool) *Bool { - x := &Bool{} - if v != _zeroBool { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped bool. -func (x *Bool) Load() bool { - return truthy(x.v.Load()) -} - -// Store atomically stores the passed bool. -func (x *Bool) Store(v bool) { - x.v.Store(boolToInt(v)) -} - -// CAS is an atomic compare-and-swap for bool values. -func (x *Bool) CAS(o, n bool) bool { - return x.v.CAS(boolToInt(o), boolToInt(n)) -} - -// Swap atomically stores the given bool and returns the old -// value. -func (x *Bool) Swap(o bool) bool { - return truthy(x.v.Swap(boolToInt(o))) -} - -// MarshalJSON encodes the wrapped bool into JSON. -func (x *Bool) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a bool from JSON. -func (x *Bool) UnmarshalJSON(b []byte) error { - var v bool - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go deleted file mode 100644 index c7bf7a82..00000000 --- a/vendor/go.uber.org/atomic/bool_ext.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "strconv" -) - -//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go - -func truthy(n uint32) bool { - return n == 1 -} - -func boolToInt(b bool) uint32 { - if b { - return 1 - } - return 0 -} - -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { - for { - old := b.Load() - if b.CAS(old, !old) { - return old - } - } -} - -// String encodes the wrapped value as a string. -func (b *Bool) String() string { - return strconv.FormatBool(b.Load()) -} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go deleted file mode 100644 index ae7390ee..00000000 --- a/vendor/go.uber.org/atomic/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go deleted file mode 100644 index 027cfcb2..00000000 --- a/vendor/go.uber.org/atomic/duration.go +++ /dev/null @@ -1,82 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "time" -) - -// Duration is an atomic type-safe wrapper for time.Duration values. -type Duration struct { - _ nocmp // disallow non-atomic comparison - - v Int64 -} - -var _zeroDuration time.Duration - -// NewDuration creates a new Duration. -func NewDuration(v time.Duration) *Duration { - x := &Duration{} - if v != _zeroDuration { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped time.Duration. -func (x *Duration) Load() time.Duration { - return time.Duration(x.v.Load()) -} - -// Store atomically stores the passed time.Duration. -func (x *Duration) Store(v time.Duration) { - x.v.Store(int64(v)) -} - -// CAS is an atomic compare-and-swap for time.Duration values. -func (x *Duration) CAS(o, n time.Duration) bool { - return x.v.CAS(int64(o), int64(n)) -} - -// Swap atomically stores the given time.Duration and returns the old -// value. -func (x *Duration) Swap(o time.Duration) time.Duration { - return time.Duration(x.v.Swap(int64(o))) -} - -// MarshalJSON encodes the wrapped time.Duration into JSON. -func (x *Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a time.Duration from JSON. -func (x *Duration) UnmarshalJSON(b []byte) error { - var v time.Duration - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go deleted file mode 100644 index 6273b66b..00000000 --- a/vendor/go.uber.org/atomic/duration_ext.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) -} - -// String encodes the wrapped value as a string. -func (d *Duration) String() string { - return d.Load().String() -} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go deleted file mode 100644 index a6166fbe..00000000 --- a/vendor/go.uber.org/atomic/error.go +++ /dev/null @@ -1,51 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// Error is an atomic type-safe wrapper for error values. -type Error struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroError error - -// NewError creates a new Error. -func NewError(v error) *Error { - x := &Error{} - if v != _zeroError { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped error. -func (x *Error) Load() error { - return unpackError(x.v.Load()) -} - -// Store atomically stores the passed error. -func (x *Error) Store(v error) { - x.v.Store(packError(v)) -} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go deleted file mode 100644 index ffe0be21..00000000 --- a/vendor/go.uber.org/atomic/error_ext.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// atomic.Value panics on nil inputs, or if the underlying type changes. -// Stabilize by always storing a custom struct that we control. - -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go - -type packedError struct{ Value error } - -func packError(v error) interface{} { - return packedError{v} -} - -func unpackError(v interface{}) error { - if err, ok := v.(packedError); ok { - return err.Value - } - return nil -} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go deleted file mode 100644 index 07190602..00000000 --- a/vendor/go.uber.org/atomic/float64.go +++ /dev/null @@ -1,76 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float64 is an atomic type-safe wrapper for float64 values. -type Float64 struct { - _ nocmp // disallow non-atomic comparison - - v Uint64 -} - -var _zeroFloat64 float64 - -// NewFloat64 creates a new Float64. -func NewFloat64(v float64) *Float64 { - x := &Float64{} - if v != _zeroFloat64 { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped float64. -func (x *Float64) Load() float64 { - return math.Float64frombits(x.v.Load()) -} - -// Store atomically stores the passed float64. -func (x *Float64) Store(v float64) { - x.v.Store(math.Float64bits(v)) -} - -// CAS is an atomic compare-and-swap for float64 values. -func (x *Float64) CAS(o, n float64) bool { - return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) -} - -// MarshalJSON encodes the wrapped float64 into JSON. -func (x *Float64) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float64 from JSON. -func (x *Float64) UnmarshalJSON(b []byte) error { - var v float64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go deleted file mode 100644 index 927b1add..00000000 --- a/vendor/go.uber.org/atomic/float64_ext.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "strconv" - -//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { - for { - old := f.Load() - new := old + s - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) -} - -// String encodes the wrapped value as a string. -func (f *Float64) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(f.Load(), 'g', -1, 64) -} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go deleted file mode 100644 index 50d6b248..00000000 --- a/vendor/go.uber.org/atomic/gen.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go -//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go -//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go -//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go deleted file mode 100644 index 18ae5649..00000000 --- a/vendor/go.uber.org/atomic/int32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int32 is an atomic wrapper around int32. -type Int32 struct { - _ nocmp // disallow non-atomic comparison - - v int32 -} - -// NewInt32 creates a new Int32. -func NewInt32(i int32) *Int32 { - return &Int32{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) -} - -// MarshalJSON encodes the wrapped int32 into JSON. -func (i *Int32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int32. -func (i *Int32) UnmarshalJSON(b []byte) error { - var v int32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int32) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go deleted file mode 100644 index 2bcbbfaa..00000000 --- a/vendor/go.uber.org/atomic/int64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int64 is an atomic wrapper around int64. -type Int64 struct { - _ nocmp // disallow non-atomic comparison - - v int64 -} - -// NewInt64 creates a new Int64. -func NewInt64(i int64) *Int64 { - return &Int64{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) -} - -// MarshalJSON encodes the wrapped int64 into JSON. -func (i *Int64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int64. -func (i *Int64) UnmarshalJSON(b []byte) error { - var v int64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int64) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go deleted file mode 100644 index a8201cb4..00000000 --- a/vendor/go.uber.org/atomic/nocmp.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// nocmp is an uncomparable struct. Embed this inside another struct to make -// it uncomparable. -// -// type Foo struct { -// nocmp -// // ... -// } -// -// This DOES NOT: -// -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs -type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index 225b7a2b..00000000 --- a/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,54 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper for string values. -type String struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroString string - -// NewString creates a new String. -func NewString(v string) *String { - x := &String{} - if v != _zeroString { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped string. -func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString -} - -// Store atomically stores the passed string. -func (x *String) Store(v string) { - x.v.Store(v) -} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go deleted file mode 100644 index 3a955821..00000000 --- a/vendor/go.uber.org/atomic/string_ext.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go - -// String returns the wrapped value. -func (s *String) String() string { - return s.Load() -} - -// MarshalText encodes the wrapped string into a textual form. -// -// This makes it encodable as JSON, YAML, XML, and more. -func (s *String) MarshalText() ([]byte, error) { - return []byte(s.Load()), nil -} - -// UnmarshalText decodes text and replaces the wrapped string with it. -// -// This makes it decodable from JSON, YAML, XML, and more. -func (s *String) UnmarshalText(b []byte) error { - s.Store(string(b)) - return nil -} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go deleted file mode 100644 index a973aba1..00000000 --- a/vendor/go.uber.org/atomic/uint32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint32 is an atomic wrapper around uint32. -type Uint32 struct { - _ nocmp // disallow non-atomic comparison - - v uint32 -} - -// NewUint32 creates a new Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) -} - -// MarshalJSON encodes the wrapped uint32 into JSON. -func (i *Uint32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint32. -func (i *Uint32) UnmarshalJSON(b []byte) error { - var v uint32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint32) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go deleted file mode 100644 index 3b6c71fd..00000000 --- a/vendor/go.uber.org/atomic/uint64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint64 is an atomic wrapper around uint64. -type Uint64 struct { - _ nocmp // disallow non-atomic comparison - - v uint64 -} - -// NewUint64 creates a new Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) -} - -// MarshalJSON encodes the wrapped uint64 into JSON. -func (i *Uint64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint64. -func (i *Uint64) UnmarshalJSON(b []byte) error { - var v uint64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint64) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go deleted file mode 100644 index 671f3a38..00000000 --- a/vendor/go.uber.org/atomic/value.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "sync/atomic" - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct { - atomic.Value - - _ nocmp // disallow non-atomic comparison -} diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml deleted file mode 100644 index 6d4d1be7..00000000 --- a/vendor/go.uber.org/multierr/.codecov.yml +++ /dev/null @@ -1,15 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore deleted file mode 100644 index b9a05e3d..00000000 --- a/vendor/go.uber.org/multierr/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -cover.html -cover.out -/bin diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml deleted file mode 100644 index 8636ab42..00000000 --- a/vendor/go.uber.org/multierr/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/multierr - -env: - global: - - GO111MODULE=on - -go: - - oldstable - - stable - -before_install: -- go version - -script: -- | - set -e - make lint - make cover - -after_success: -- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md deleted file mode 100644 index 6f1db9ef..00000000 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ /dev/null @@ -1,60 +0,0 @@ -Releases -======== - -v1.6.0 (2020-09-14) -=================== - -- Actually drop library dependency on development-time tooling. - - -v1.5.0 (2020-02-24) -=================== - -- Drop library dependency on development-time tooling. - - -v1.4.0 (2019-11-04) -=================== - -- Add `AppendInto` function to more ergonomically build errors inside a - loop. - - -v1.3.0 (2019-10-29) -=================== - -- Switch to Go modules. - - -v1.2.0 (2019-09-26) -=================== - -- Support extracting and matching against wrapped errors with `errors.As` - and `errors.Is`. - - -v1.1.0 (2017-06-30) -=================== - -- Added an `Errors(error) []error` function to extract the underlying list of - errors for a multierr error. - - -v1.0.0 (2017-05-31) -=================== - -No changes since v0.2.0. This release is committing to making no breaking -changes to the current API in the 1.X series. - - -v0.2.0 (2017-04-11) -=================== - -- Repeatedly appending to the same error is now faster due to fewer - allocations. - - -v0.1.0 (2017-31-03) -=================== - -- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt deleted file mode 100644 index 858e0247..00000000 --- a/vendor/go.uber.org/multierr/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile deleted file mode 100644 index 31600440..00000000 --- a/vendor/go.uber.org/multierr/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -# Directory to put `go install`ed binaries in. -export GOBIN ?= $(shell pwd)/bin - -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) - -.PHONY: golint -golint: - @cd tools && go install golang.org/x/lint/golint - @$(GOBIN)/golint ./... - -.PHONY: staticcheck -staticcheck: - @cd tools && go install honnef.co/go/tools/cmd/staticcheck - @$(GOBIN)/staticcheck ./... - -.PHONY: lint -lint: gofmt golint staticcheck - -.PHONY: cover -cover: - go test -coverprofile=cover.out -coverpkg=./... -v ./... - go tool cover -html=cover.out -o cover.html - -update-license: - @cd tools && go install go.uber.org/tools/update-license - @$(GOBIN)/update-license $(GO_FILES) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md deleted file mode 100644 index 751bd65e..00000000 --- a/vendor/go.uber.org/multierr/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -`multierr` allows combining one or more Go `error`s together. - -## Installation - - go get -u go.uber.org/multierr - -## Status - -Stable: No breaking changes will be made before 2.0. - -------------------------------------------------------------------------------- - -Released under the [MIT License]. - -[MIT License]: LICENSE.txt -[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg -[doc]: https://godoc.org/go.uber.org/multierr -[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master -[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://travis-ci.com/uber-go/multierr -[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go deleted file mode 100644 index 5c9b67d5..00000000 --- a/vendor/go.uber.org/multierr/error.go +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package multierr allows combining one or more errors together. -// -// Overview -// -// Errors can be combined with the use of the Combine function. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) -// -// If only two errors are being combined, the Append function may be used -// instead. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// This makes it possible to record resource cleanup failures from deferred -// blocks with the help of named return values. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } -// -// The underlying list of errors for a returned error object may be retrieved -// with the Errors function. -// -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } -// -// Advanced Usage -// -// Errors returned by Combine and Append MAY implement the following -// interface. -// -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } -// -// Note that if you need access to list of errors behind a multierr error, you -// should prefer using the Errors function. That said, if you need cheap -// read-only access to the underlying errors slice, you can attempt to cast -// the error to this interface. You MUST handle the failure case gracefully -// because errors returned by Combine and Append are not guaranteed to -// implement this interface. -// -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } -package multierr // import "go.uber.org/multierr" - -import ( - "bytes" - "fmt" - "io" - "strings" - "sync" - - "go.uber.org/atomic" -) - -var ( - // Separator for single-line error messages. - _singlelineSeparator = []byte("; ") - - // Prefix for multi-line messages - _multilinePrefix = []byte("the following errors occurred:") - - // Prefix for the first and following lines of an item in a list of - // multi-line error messages. - // - // For example, if a single item is: - // - // foo - // bar - // - // It will become, - // - // - foo - // bar - _multilineSeparator = []byte("\n - ") - _multilineIndent = []byte(" ") -) - -// _bufferPool is a pool of bytes.Buffers. -var _bufferPool = sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, -} - -type errorGroup interface { - Errors() []error -} - -// Errors returns a slice containing zero or more errors that the supplied -// error is composed of. If the error is nil, a nil slice is returned. -// -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) -// -// If the error is not composed of other errors, the returned slice contains -// just the error that was passed in. -// -// Callers of this function are free to modify the returned slice. -func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result -} - -// multiError is an error that holds one or more errors. -// -// An instance of this is guaranteed to be non-empty and flattened. That is, -// none of the errors inside multiError are other multiErrors. -// -// multiError formats to a semi-colon delimited list of error messages with -// %v and with a more readable multi-line format with %+v. -type multiError struct { - copyNeeded atomic.Bool - errors []error -} - -var _ errorGroup = (*multiError)(nil) - -// Errors returns the list of underlying errors. -// -// This slice MUST NOT be modified. -func (merr *multiError) Errors() []error { - if merr == nil { - return nil - } - return merr.errors -} - -func (merr *multiError) Error() string { - if merr == nil { - return "" - } - - buff := _bufferPool.Get().(*bytes.Buffer) - buff.Reset() - - merr.writeSingleline(buff) - - result := buff.String() - _bufferPool.Put(buff) - return result -} - -func (merr *multiError) Format(f fmt.State, c rune) { - if c == 'v' && f.Flag('+') { - merr.writeMultiline(f) - } else { - merr.writeSingleline(f) - } -} - -func (merr *multiError) writeSingleline(w io.Writer) { - first := true - for _, item := range merr.errors { - if first { - first = false - } else { - w.Write(_singlelineSeparator) - } - io.WriteString(w, item.Error()) - } -} - -func (merr *multiError) writeMultiline(w io.Writer) { - w.Write(_multilinePrefix) - for _, item := range merr.errors { - w.Write(_multilineSeparator) - writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) - } -} - -// Writes s to the writer with the given prefix added before each line after -// the first. -func writePrefixLine(w io.Writer, prefix []byte, s string) { - first := true - for len(s) > 0 { - if first { - first = false - } else { - w.Write(prefix) - } - - idx := strings.IndexByte(s, '\n') - if idx < 0 { - idx = len(s) - 1 - } - - io.WriteString(w, s[:idx+1]) - s = s[idx+1:] - } -} - -type inspectResult struct { - // Number of top-level non-nil errors - Count int - - // Total number of errors including multiErrors - Capacity int - - // Index of the first non-nil error in the list. Value is meaningless if - // Count is zero. - FirstErrorIdx int - - // Whether the list contains at least one multiError - ContainsMultiError bool -} - -// Inspects the given slice of errors so that we can efficiently allocate -// space for it. -func inspect(errors []error) (res inspectResult) { - first := true - for i, err := range errors { - if err == nil { - continue - } - - res.Count++ - if first { - first = false - res.FirstErrorIdx = i - } - - if merr, ok := err.(*multiError); ok { - res.Capacity += len(merr.errors) - res.ContainsMultiError = true - } else { - res.Capacity++ - } - } - return -} - -// fromSlice converts the given list of errors into a single error. -func fromSlice(errors []error) error { - res := inspect(errors) - switch res.Count { - case 0: - return nil - case 1: - // only one non-nil entry - return errors[res.FirstErrorIdx] - case len(errors): - if !res.ContainsMultiError { - // already flat - return &multiError{errors: errors} - } - } - - nonNilErrs := make([]error, 0, res.Capacity) - for _, err := range errors[res.FirstErrorIdx:] { - if err == nil { - continue - } - - if nested, ok := err.(*multiError); ok { - nonNilErrs = append(nonNilErrs, nested.errors...) - } else { - nonNilErrs = append(nonNilErrs, err) - } - } - - return &multiError{errors: nonNilErrs} -} - -// Combine combines the passed errors into a single error. -// -// If zero arguments were passed or if all items are nil, a nil error is -// returned. -// -// Combine(nil, nil) // == nil -// -// If only a single error was passed, it is returned as-is. -// -// Combine(err) // == err -// -// Combine skips over nil arguments so this function may be used to combine -// together errors from operations that fail independently of each other. -// -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) -// -// If any of the passed errors is a multierr error, it will be flattened along -// with the other errors. -// -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) -// -// The returned error formats into a readable multi-line error message if -// formatted with %+v. -// -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) -func Combine(errors ...error) error { - return fromSlice(errors) -} - -// Append appends the given errors together. Either value may be nil. -// -// This function is a specialization of Combine for the common case where -// there are only two errors. -// -// err = multierr.Append(reader.Close(), writer.Close()) -// -// The following pattern may also be used to record failure of deferred -// operations without losing information about the original error. -// -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() -func Append(left error, right error) error { - switch { - case left == nil: - return right - case right == nil: - return left - } - - if _, ok := right.(*multiError); !ok { - if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { - // Common case where the error on the left is constantly being - // appended to. - errs := append(l.errors, right) - return &multiError{errors: errs} - } else if !ok { - // Both errors are single errors. - return &multiError{errors: []error{left, right}} - } - } - - // Either right or both, left and right, are multiErrors. Rely on usual - // expensive logic. - errors := [2]error{left, right} - return fromSlice(errors[0:]) -} - -// AppendInto appends an error into the destination of an error pointer and -// returns whether the error being appended was non-nil. -// -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) -// -// The above is equivalent to, -// -// err := multierr.Append(r.Close(), w.Close()) -// -// As AppendInto reports whether the provided error was non-nil, it may be -// used to build a multierr error in a loop more ergonomically. For example: -// -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } -// -// Compare this with a verison that relies solely on Append: -// -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } -func AppendInto(into *error, err error) (errored bool) { - if into == nil { - // We panic if 'into' is nil. This is not documented above - // because suggesting that the pointer must be non-nil may - // confuse users into thinking that the error that it points - // to must be non-nil. - panic("misuse of multierr.AppendInto: into pointer must not be nil") - } - - if err == nil { - return false - } - *into = Append(*into, err) - return true -} diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084ec..00000000 --- a/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go deleted file mode 100644 index 264b0eac..00000000 --- a/vendor/go.uber.org/multierr/go113.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// +build go1.13 - -package multierr - -import "errors" - -// As attempts to find the first error in the error list that matches the type -// of the value that target points to. -// -// This function allows errors.As to traverse the values stored on the -// multierr error. -func (merr *multiError) As(target interface{}) bool { - for _, err := range merr.Errors() { - if errors.As(err, target) { - return true - } - } - return false -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored on the -// multierr error. -func (merr *multiError) Is(target error) bool { - for _, err := range merr.Errors() { - if errors.Is(err, target) { - return true - } - } - return false -} diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml deleted file mode 100644 index 8e5ca7d3..00000000 --- a/vendor/go.uber.org/zap/.codecov.yml +++ /dev/null @@ -1,17 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 95% # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure -ignore: - - internal/readme/readme.go - diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore deleted file mode 100644 index da9d9d00..00000000 --- a/vendor/go.uber.org/zap/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -vendor - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -*.pprof -*.out -*.log - -/bin -cover.out -cover.html diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl deleted file mode 100644 index 3154a1e6..00000000 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ /dev/null @@ -1,109 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -{{.BenchmarkAddingFields}} - -Log a message with a logger that already has 10 fields of context: - -{{.BenchmarkAccumulatedContext}} - -Log a static string, without any context or `printf`-style templating: - -{{.BenchmarkWithoutFields}} - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
    - -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in zap's [glide.lock][] file. [↩](#anchor-versions) - -[doc-img]: https://godoc.org/go.uber.org/zap?status.svg -[doc]: https://godoc.org/go.uber.org/zap -[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master -[ci]: https://travis-ci.com/uber-go/zap -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock - diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md deleted file mode 100644 index 794ee303..00000000 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ /dev/null @@ -1,516 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). - -## 1.19.1 (8 Sep 2021) - -### Fixed -* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. -* [#1003][]: JSON: Fix inaccurate precision when encoding float32. - -[#1001]: https://github.com/uber-go/zap/pull/1001 -[#1003]: https://github.com/uber-go/zap/pull/1003 - -## 1.19.0 (9 Aug 2021) - -Enhancements: -* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. -* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields - better. - -[#975]: https://github.com/uber-go/zap/pull/975 -[#984]: https://github.com/uber-go/zap/pull/984 - -Thanks to @lancoLiu and @thockin for their contributions to this release. - -## 1.18.1 (28 Jun 2021) - -Bugfixes: -* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. - -[#974]: https://github.com/uber-go/zap/pull/974 - -## 1.18.0 (28 Jun 2021) - -Enhancements: -* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers - messages in-memory and flushes them periodically. -* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. -* [#897][]: Add `zap.WithClock` option to control the source of time via the - new `zapcore.Clock` interface. -* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` - methods don't match expectations. -* [#943][]: Add support for filtering by level or arbitrary matcher function to - `zaptest/observer`. -* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's - `buffer.Buffer`. - -Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee -for their contributions to this release. - -[#691]: https://github.com/uber-go/zap/pull/691 -[#897]: https://github.com/uber-go/zap/pull/897 -[#943]: https://github.com/uber-go/zap/pull/943 -[#949]: https://github.com/uber-go/zap/pull/949 -[#961]: https://github.com/uber-go/zap/pull/961 -[#971]: https://github.com/uber-go/zap/pull/971 - -## 1.17.0 (25 May 2021) - -Bugfixes: -* [#867][]: Encode `` for nil `error` instead of a panic. -* [#931][], [#936][]: Update minimum version constraints to address - vulnerabilities in dependencies. - -Enhancements: -* [#865][]: Improve alignment of fields of the Logger struct, reducing its - size from 96 to 80 bytes. -* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. -* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler - with the `application/x-www-form-urlencoded` content type. -* [#912][]: Support multi-field encoding with `zap.Inline`. -* [#913][]: Speed up SugaredLogger for calls with a single string. -* [#928][]: Add support for filtering by field name to `zaptest/observer`. - -Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. - -## 1.16.0 (1 Sep 2020) - -Bugfixes: -* [#828][]: Fix missing newline in IncreaseLevel error messages. -* [#835][]: Fix panic in JSON encoder when encoding times or durations - without specifying a time or duration encoder. -* [#843][]: Honor CallerSkip when taking stack traces. -* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. -* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. - -Enhancements: -* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders - for custom layouts. -* [#697][]: Added support for a configurable delimiter in the console encoder. -* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. -* [#844][]: Add ability to include the calling function as part of logs. -* [#843][]: Add `StackSkip` for including truncated stacks as a field. -* [#861][]: Add options to customize Fatal behaviour for better testability. - -Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. - -## 1.15.0 (23 Apr 2020) - -Bugfixes: -* [#804][]: Fix handling of `Time` values out of `UnixNano` range. -* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. - -Enhancements: -* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This - allows disabling annotation of log entries with caller information if - previously enabled with `AddCaller`. -* [#813][]: Deprecate `NewSampler` constructor in favor of - `NewSamplerWithOptions` which supports a `SamplerHook` option. This option - adds support for monitoring sampling decisions through a hook. - -Thanks to @danielbprice for their contributions to this release. - -## 1.14.1 (14 Mar 2020) - -Bugfixes: -* [#791][]: Fix panic on attempting to build a logger with an invalid Config. -* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's - development-time dependencies. -* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to - be generated for arrays of `time.Time` objects when using string-based time - formats. - -Thanks to @YashishDua for their contributions to this release. - -## 1.14.0 (20 Feb 2020) - -Enhancements: -* [#771][]: Optimize calls for disabled log levels. -* [#773][]: Add millisecond duration encoder. -* [#775][]: Add option to increase the level of a logger. -* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. - -Thanks to @caibirdme for their contributions to this release. - -## 1.13.0 (13 Nov 2019) - -Enhancements: -* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors - to log pointers to primitives with support for `nil` values. - -Thanks to @jbizzle for their contributions to this release. - -## 1.12.0 (29 Oct 2019) - -Enhancements: -* [#751][]: Migrate to Go modules. - -## 1.11.0 (21 Oct 2019) - -Enhancements: -* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. -* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. - -Thanks to @juicemia, @uhthomas for their contributions to this release. - -## 1.10.0 (29 Apr 2019) - -Bugfixes: -* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a - string. -* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. - -Enhancements: -* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test - loggers. -* [#675][]: Don't panic when encoding a String field. -* [#704][]: Disable HTML escaping for JSON objects encoded using the - reflect-based encoder. - -Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions -to this release. - -## v1.9.1 (06 Aug 2018) - -Bugfixes: - -* [#614][]: MapObjectEncoder should not ignore empty slices. - -## v1.9.0 (19 Jul 2018) - -Enhancements: -* [#602][]: Reduce number of allocations when logging with reflection. -* [#572][], [#606][]: Expose a registry for third-party logging sinks. - -Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and -@dimroc for their contributions to this release. - -## v1.8.0 (13 Apr 2018) - -Enhancements: -* [#508][]: Make log level configurable when redirecting the standard - library's logger. -* [#518][]: Add a logger that writes to a `*testing.TB`. -* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. - -Bugfixes: -* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. - -Thanks to @DiSiqueira and @djui for their contributions to this release. - -## v1.7.1 (25 Sep 2017) - -Bugfixes: -* [#504][]: Store strings when using AddByteString with the map encoder. - -## v1.7.0 (21 Sep 2017) - -Enhancements: - -* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user - to specify the level of the logged messages. - -## v1.6.0 (30 Aug 2017) - -Enhancements: - -* [#491][]: Omit zap stack frames from stacktraces. -* [#490][]: Add a `ContextMap` method to observer logs for simpler - field validation in tests. - -## v1.5.0 (22 Jul 2017) - -Enhancements: - -* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. -* [#465][]: Support user-supplied encoders for logger names. - -Bugfixes: - -* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. - -Thanks to @richard-tunein and @pavius for their contributions to this release. - -## v1.4.1 (08 Jun 2017) - -This release fixes two bugs. - -Bugfixes: - -* [#435][]: Support a variety of case conventions when unmarshaling levels. -* [#444][]: Fix a panic in the observer. - -## v1.4.0 (12 May 2017) - -This release adds a few small features and is fully backward-compatible. - -Enhancements: - -* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to - override the Unix-style default. -* [#425][]: Preserve time zones when logging times. -* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a - variety of operations a bit simpler. - -## v1.3.0 (25 Apr 2017) - -This release adds an enhancement to zap's testing helpers as well as the -ability to marshal an AtomicLevel. It is fully backward-compatible. - -Enhancements: - -* [#415][]: Add a substring-filtering helper to zap's observer. This is - particularly useful when testing the `SugaredLogger`. -* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. - -## v1.2.0 (13 Apr 2017) - -This release adds a gRPC compatibility wrapper. It is fully backward-compatible. - -Enhancements: - -* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements - `grpclog.Logger`. - -## v1.1.0 (31 Mar 2017) - -This release fixes two bugs and adds some enhancements to zap's testing helpers. -It is fully backward-compatible. - -Bugfixes: - -* [#385][]: Fix caller path trimming on Windows. -* [#396][]: Fix a panic when attempting to use non-existent directories with - zap's configuration struct. - -Enhancements: - -* [#386][]: Add filtering helpers to zaptest's observing logger. - -Thanks to @moitias for contributing to this release. - -## v1.0.0 (14 Mar 2017) - -This is zap's first stable release. All exported APIs are now final, and no -further breaking changes will be made in the 1.x release series. Anyone using a -semver-aware dependency manager should now pin to `^1`. - -Breaking changes: - -* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without - casting from `[]byte` to `string`. -* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, - `zap.Logger`, and `zap.SugaredLogger`. -* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to - clash with other testing helpers. - -Bugfixes: - -* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier - for tab-separated console output. -* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to - work with concurrency-safe `WriteSyncer` implementations. -* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux - systems. -* [#373][]: Report the correct caller from zap's standard library - interoperability wrappers. - -Enhancements: - -* [#348][]: Add a registry allowing third-party encodings to work with zap's - built-in `Config`. -* [#327][]: Make the representation of logger callers configurable (like times, - levels, and durations). -* [#376][]: Allow third-party encoders to use their own buffer pools, which - removes the last performance advantage that zap's encoders have over plugins. -* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple - `WriteSyncer`s and lock the result. -* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in - Go 1.9). -* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it - easier for particularly punctilious users to unit test their application's - logging. - -Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their -contributions to this release. - -## v1.0.0-rc.3 (7 Mar 2017) - -This is the third release candidate for zap's stable release. There are no -breaking changes. - -Bugfixes: - -* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs - rather than `[]uint8`. - -Enhancements: - -* [#307][]: Users can opt into colored output for log levels. -* [#353][]: In addition to hijacking the output of the standard library's - package-global logging functions, users can now construct a zap-backed - `log.Logger` instance. -* [#311][]: Frames from common runtime functions and some of zap's internal - machinery are now omitted from stacktraces. - -Thanks to @ansel1 and @suyash for their contributions to this release. - -## v1.0.0-rc.2 (21 Feb 2017) - -This is the second release candidate for zap's stable release. It includes two -breaking changes. - -Breaking changes: - -* [#316][]: Zap's global loggers are now fully concurrency-safe - (previously, users had to ensure that `ReplaceGlobals` was called before the - loggers were in use). However, they must now be accessed via the `L()` and - `S()` functions. Users can update their projects with - - ``` - gofmt -r "zap.L -> zap.L()" -w . - gofmt -r "zap.S -> zap.S()" -w . - ``` -* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid - JSON and YAML struct tags on all config structs. This release fixes the tags - and adds static analysis to prevent similar bugs in the future. - -Bugfixes: - -* [#321][]: Redirecting the standard library's `log` output now - correctly reports the logger's caller. - -Enhancements: - -* [#325][] and [#333][]: Zap now transparently supports non-standard, rich - errors like those produced by `github.com/pkg/errors`. -* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is - now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> - zap.NewNop()' -w .`. -* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a - more informative error. - -Thanks to @skipor and @chapsuk for their contributions to this release. - -## v1.0.0-rc.1 (14 Feb 2017) - -This is the first release candidate for zap's stable release. There are multiple -breaking changes and improvements from the pre-release version. Most notably: - -* **Zap's import path is now "go.uber.org/zap"** — all users will - need to update their code. -* User-facing types and functions remain in the `zap` package. Code relevant - largely to extension authors is now in the `zapcore` package. -* The `zapcore.Core` type makes it easy for third-party packages to use zap's - internals but provide a different user-facing API. -* `Logger` is now a concrete type instead of an interface. -* A less verbose (though slower) logging API is included by default. -* Package-global loggers `L` and `S` are included. -* A human-friendly console encoder is included. -* A declarative config struct allows common logger configurations to be managed - as configuration instead of code. -* Sampling is more accurate, and doesn't depend on the standard library's shared - timer heap. - -## v0.1.0-beta.1 (6 Feb 2017) - -This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and -upgrade at their leisure. Since this is the first tagged release, there are no -backward compatibility concerns and all functionality is new. - -Early zap adopters should pin to the 0.1.x minor version until they're ready to -upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 -[#657]: https://github.com/uber-go/zap/pull/657 -[#706]: https://github.com/uber-go/zap/pull/706 -[#610]: https://github.com/uber-go/zap/pull/610 -[#675]: https://github.com/uber-go/zap/pull/675 -[#704]: https://github.com/uber-go/zap/pull/704 -[#725]: https://github.com/uber-go/zap/pull/725 -[#736]: https://github.com/uber-go/zap/pull/736 -[#751]: https://github.com/uber-go/zap/pull/751 -[#758]: https://github.com/uber-go/zap/pull/758 -[#771]: https://github.com/uber-go/zap/pull/771 -[#773]: https://github.com/uber-go/zap/pull/773 -[#775]: https://github.com/uber-go/zap/pull/775 -[#786]: https://github.com/uber-go/zap/pull/786 -[#791]: https://github.com/uber-go/zap/pull/791 -[#795]: https://github.com/uber-go/zap/pull/795 -[#799]: https://github.com/uber-go/zap/pull/799 -[#804]: https://github.com/uber-go/zap/pull/804 -[#812]: https://github.com/uber-go/zap/pull/812 -[#806]: https://github.com/uber-go/zap/pull/806 -[#813]: https://github.com/uber-go/zap/pull/813 -[#629]: https://github.com/uber-go/zap/pull/629 -[#697]: https://github.com/uber-go/zap/pull/697 -[#828]: https://github.com/uber-go/zap/pull/828 -[#835]: https://github.com/uber-go/zap/pull/835 -[#843]: https://github.com/uber-go/zap/pull/843 -[#844]: https://github.com/uber-go/zap/pull/844 -[#852]: https://github.com/uber-go/zap/pull/852 -[#854]: https://github.com/uber-go/zap/pull/854 -[#861]: https://github.com/uber-go/zap/pull/861 -[#862]: https://github.com/uber-go/zap/pull/862 -[#865]: https://github.com/uber-go/zap/pull/865 -[#867]: https://github.com/uber-go/zap/pull/867 -[#881]: https://github.com/uber-go/zap/pull/881 -[#903]: https://github.com/uber-go/zap/pull/903 -[#912]: https://github.com/uber-go/zap/pull/912 -[#913]: https://github.com/uber-go/zap/pull/913 -[#928]: https://github.com/uber-go/zap/pull/928 -[#931]: https://github.com/uber-go/zap/pull/931 -[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md deleted file mode 100644 index e327d9aa..00000000 --- a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, -body size, disability, ethnicity, gender identity and expression, level of -experience, nationality, personal appearance, race, religion, or sexual -identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an -appointed representative at an online or offline event. Representation of a -project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at oss-conduct@uber.com. The project -team will review and investigate all complaints, and will respond in a way -that it deems appropriate to the circumstances. The project team is obligated -to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.4, available at -[http://contributor-covenant.org/version/1/4][version]. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md deleted file mode 100644 index 5cd96568..00000000 --- a/vendor/go.uber.org/zap/CONTRIBUTING.md +++ /dev/null @@ -1,75 +0,0 @@ -# Contributing - -We'd love your help making zap the very best structured logging library in Go! - -If you'd like to add new exported APIs, please [open an issue][open-issue] -describing your proposal — discussing API changes ahead of time makes -pull request review much smoother. In your issue, pull request, and any other -communications, please remember to treat your fellow contributors with -respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. - -Note that you'll need to sign [Uber's Contributor License Agreement][cla] -before we can accept any of your contributions. If necessary, a bot will remind -you to accept the CLA when you open your pull request. - -## Setup - -[Fork][fork], then clone the repository: - -``` -mkdir -p $GOPATH/src/go.uber.org -cd $GOPATH/src/go.uber.org -git clone git@github.com:your_github_username/zap.git -cd zap -git remote add upstream https://github.com/uber-go/zap.git -git fetch upstream -``` - -Make sure that the tests and the linters pass: - -``` -make test -make lint -``` - -If you're not using the minor version of Go specified in the Makefile's -`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is -fine, but it means that you'll only discover lint failures after you open your -pull request. - -## Making Changes - -Start by creating a new branch for your changes: - -``` -cd $GOPATH/src/go.uber.org/zap -git checkout master -git fetch upstream -git rebase upstream/master -git checkout -b cool_new_feature -``` - -Make your changes, then ensure that `make lint` and `make test` still pass. If -you're satisfied with your changes, push them to your fork. - -``` -git push origin cool_new_feature -``` - -Then use the GitHub UI to open a pull request. - -At this point, you're waiting on us to review your changes. We *try* to respond -to issues and pull requests within a few business days, and we may suggest some -improvements or alternatives. Once your changes are approved, one of the -project maintainers will merge them. - -We're much more likely to approve your changes if you: - -* Add tests for new functionality. -* Write a [good commit message][commit-message]. -* Maintain backward compatibility. - -[fork]: https://github.com/uber-go/zap/fork -[open-issue]: https://github.com/uber-go/zap/issues/new -[cla]: https://cla-assistant.io/uber-go/zap -[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md deleted file mode 100644 index b183b20b..00000000 --- a/vendor/go.uber.org/zap/FAQ.md +++ /dev/null @@ -1,164 +0,0 @@ -# Frequently Asked Questions - -## Design - -### Why spend so much effort on logger performance? - -Of course, most applications won't notice the impact of a slow logger: they -already take tens or hundreds of milliseconds for each operation, so an extra -millisecond doesn't matter. - -On the other hand, why *not* make structured logging fast? The `SugaredLogger` -isn't any harder to use than other logging packages, and the `Logger` makes -structured logging possible in performance-sensitive contexts. Across a fleet -of Go microservices, making each application even slightly more efficient adds -up quickly. - -### Why aren't `Logger` and `SugaredLogger` interfaces? - -Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and -`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points -out][go-proverbs], "The bigger the interface, the weaker the abstraction." -Interfaces are also rigid — *any* change requires releasing a new major -version, since it breaks all third-party implementations. - -Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much -abstraction, and it lets us add methods without introducing breaking changes. -Your applications should define and depend upon an interface that includes -just the methods you use. - -### Why are some of my logs missing? - -Logs are dropped intentionally by zap when sampling is enabled. The production -configuration (as returned by `NewProductionConfig()` enables sampling which will -cause repeated logs within a second to be sampled. See more details on why sampling -is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). - -### Why sample application logs? - -Applications often experience runs of errors, either because of a bug or -because of a misbehaving user. Logging errors is usually a good idea, but it -can easily make this bad situation worse: not only is your application coping -with a flood of errors, it's also spending extra CPU cycles and I/O logging -those errors. Since writes are typically serialized, logging limits throughput -when you need it most. - -Sampling fixes this problem by dropping repetitive log entries. Under normal -conditions, your application writes out every entry. When similar entries are -logged hundreds or thousands of times each second, though, zap begins dropping -duplicates to preserve throughput. - -### Why do the structured logging APIs take a message in addition to fields? - -Subjectively, we find it helpful to accompany structured context with a brief -description. This isn't critical during development, but it makes debugging -and operating unfamiliar systems much easier. - -More concretely, zap's sampling algorithm uses the message to identify -duplicate entries. In our experience, this is a practical middle ground -between random sampling (which often drops the exact entry that you need while -debugging) and hashing the complete entry (which is prohibitively expensive). - -### Why include package-global loggers? - -Since so many other logging packages include a global logger, many -applications aren't designed to accept loggers as explicit parameters. -Changing function signatures is often a breaking change, so zap includes -global loggers to simplify migration. - -Avoid them where possible. - -### Why include dedicated Panic and Fatal log levels? - -In general, application code should handle errors gracefully instead of using -`panic` or `os.Exit`. However, every rule has exceptions, and it's common to -crash when an error is truly unrecoverable. To avoid losing any information -— especially the reason for the crash — the logger must flush any -buffered entries before the process exits. - -Zap makes this easy by offering `Panic` and `Fatal` logging methods that -automatically flush before exiting. Of course, this doesn't guarantee that -logs will never be lost, but it eliminates a common error. - -See the discussion in uber-go/zap#207 for more details. - -### What's `DPanic`? - -`DPanic` stands for "panic in development." In development, it logs at -`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to -catch errors that are theoretically possible, but shouldn't actually happen, -*without* crashing in production. - -If you've ever written code like this, you need `DPanic`: - -```go -if err != nil { - panic(fmt.Sprintf("shouldn't ever get here: %v", err)) -} -``` - -## Installation - -### What does the error `expects import "go.uber.org/zap"` mean? - -Either zap was installed incorrectly or you're referencing the wrong package -name in your code. - -Zap's source code happens to be hosted on GitHub, but the [import -path][import-path] is `go.uber.org/zap`. This gives us, the project -maintainers, the freedom to move the source code if necessary. However, it -means that you need to take a little care when installing and using the -package. - -If you follow two simple rules, everything should work: install zap with `go -get -u go.uber.org/zap`, and always import it in your code with `import -"go.uber.org/zap"`. Your code shouldn't contain *any* references to -`github.com/uber-go/zap`. - -## Usage - -### Does zap support log rotation? - -Zap doesn't natively support rotating log files, since we prefer to leave this -to an external program like `logrotate`. - -However, it's easy to integrate a log rotation package like -[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. - -```go -// lumberjack.Logger is already safe for concurrent use, so we don't need to -// lock it. -w := zapcore.AddSync(&lumberjack.Logger{ - Filename: "/var/log/myapp/foo.log", - MaxSize: 500, // megabytes - MaxBackups: 3, - MaxAge: 28, // days -}) -core := zapcore.NewCore( - zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), - w, - zap.InfoLevel, -) -logger := zap.New(core) -``` - -## Extensions - -We'd love to support every logging need within zap itself, but we're only -familiar with a handful of log ingestion systems, flag-parsing packages, and -the like. Rather than merging code that we can't effectively debug and -support, we'd rather grow an ecosystem of zap extensions. - -We're aware of the following extensions, but haven't used them ourselves: - -| Package | Integration | -| --- | --- | -| `github.com/tchap/zapext` | Sentry, syslog | -| `github.com/fgrosse/zaptest` | Ginkgo | -| `github.com/blendle/zapdriver` | Stackdriver | -| `github.com/moul/zapgorm` | Gorm | -| `github.com/moul/zapfilter` | Advanced filtering rules | - -[go-proverbs]: https://go-proverbs.github.io/ -[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths -[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt deleted file mode 100644 index 6652bed4..00000000 --- a/vendor/go.uber.org/zap/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016-2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile deleted file mode 100644 index 9b1bc3b0..00000000 --- a/vendor/go.uber.org/zap/Makefile +++ /dev/null @@ -1,73 +0,0 @@ -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck -BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem - -# Directories containing independent Go modules. -# -# We track coverage only for the main module. -MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test - -# Many Go tools take file globs or directories as arguments instead of packages. -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) - -.PHONY: all -all: lint test - -.PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking lint..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking staticcheck..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./checklicense.sh | tee -a lint.log - @[ ! -s lint.log ] - @echo "Checking 'go mod tidy'..." - @make tidy - @if ! git diff --quiet; then \ - echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ - git --no-pager diff; \ - fi - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -.PHONY: test -test: - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true - -.PHONY: cover -cover: - go test -race -coverprofile=cover.out -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: bench -BENCH ?= . -bench: - @$(foreach dir,$(MODULE_DIRS), ( \ - cd $(dir) && \ - go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ - ) &&) true - -.PHONY: updatereadme -updatereadme: - rm -f README.md - cat .readme.tmpl | go run internal/readme/readme.go > README.md - -.PHONY: tidy -tidy: - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md deleted file mode 100644 index 1e64d6cf..00000000 --- a/vendor/go.uber.org/zap/README.md +++ /dev/null @@ -1,134 +0,0 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] - -Blazing fast, structured, leveled logging in Go. - -## Installation - -`go get -u go.uber.org/zap` - -Note that zap only supports the two most recent minor versions of Go. - -## Quick Start - -In contexts where performance is nice, but not critical, use the -`SugaredLogger`. It's 4-10x faster than other structured logging -packages and includes both structured and `printf`-style APIs. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() // flushes buffer, if any -sugar := logger.Sugar() -sugar.Infow("failed to fetch URL", - // Structured context as loosely typed key-value pairs. - "url", url, - "attempt", 3, - "backoff", time.Second, -) -sugar.Infof("Failed to fetch URL: %s", url) -``` - -When performance and type safety are critical, use the `Logger`. It's even -faster than the `SugaredLogger` and allocates far less, but it only supports -structured logging. - -```go -logger, _ := zap.NewProduction() -defer logger.Sync() -logger.Info("failed to fetch URL", - // Structured context as strongly typed Field values. - zap.String("url", url), - zap.Int("attempt", 3), - zap.Duration("backoff", time.Second), -) -``` - -See the [documentation][doc] and [FAQ](FAQ.md) for more details. - -## Performance - -For applications that log in the hot path, reflection-based serialization and -string formatting are prohibitively expensive — they're CPU-intensive -and make many small allocations. Put differently, using `encoding/json` and -`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. - -Zap takes a different approach. It includes a reflection-free, zero-allocation -JSON encoder, and the base `Logger` strives to avoid serialization overhead -and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users *choose* when they need to count every -allocation and when they'd prefer a more familiar, loosely typed API. - -As measured by its own [benchmarking suite][], not only is zap more performant -than comparable structured logging packages — it's also faster than the -standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) - -Log a message and 10 fields: - -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 862 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op -| zerolog | 4021 ns/op | +366% | 76 allocs/op -| go-kit | 4542 ns/op | +427% | 105 allocs/op -| apex/log | 26785 ns/op | +3007% | 115 allocs/op -| logrus | 29501 ns/op | +3322% | 125 allocs/op -| log15 | 29906 ns/op | +3369% | 122 allocs/op - -Log a message with a logger that already has 10 fields of context: - -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 126 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op -| zerolog | 88 ns/op | -30% | 0 allocs/op -| go-kit | 5087 ns/op | +3937% | 103 allocs/op -| log15 | 18548 ns/op | +14621% | 73 allocs/op -| apex/log | 26012 ns/op | +20544% | 104 allocs/op -| logrus | 27236 ns/op | +21516% | 113 allocs/op - -Log a static string, without any context or `printf`-style templating: - -| Package | Time | Time % to zap | Objects Allocated | -| :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 118 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op -| zerolog | 93 ns/op | -21% | 0 allocs/op -| go-kit | 280 ns/op | +137% | 11 allocs/op -| standard library | 499 ns/op | +323% | 2 allocs/op -| apex/log | 1990 ns/op | +1586% | 10 allocs/op -| logrus | 3129 ns/op | +2552% | 24 allocs/op -| log15 | 3887 ns/op | +3194% | 23 allocs/op - -## Development Status: Stable - -All APIs are finalized, and no breaking changes will be made in the 1.x series -of releases. Users of semver-aware dependency management systems should pin -zap to `^1`. - -## Contributing - -We encourage and support an active, healthy community of contributors — -including you! Details are in the [contribution guide](CONTRIBUTING.md) and -the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on -issues and pull requests, but you can also report any negative conduct to -oss-conduct@uber.com. That email list is a private, safe space; even the zap -maintainers don't have access, so don't hesitate to hold us to a high -standard. - -
    - -Released under the [MIT License](LICENSE.txt). - -1 In particular, keep in mind that we may be -benchmarking against slightly older versions of other packages. Versions are -pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) - -[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap -[doc]: https://pkg.go.dev/go.uber.org/zap -[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml -[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/zap -[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks -[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod - diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go deleted file mode 100644 index 5be3704a..00000000 --- a/vendor/go.uber.org/zap/array.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "time" - - "go.uber.org/zap/zapcore" -) - -// Array constructs a field with the given key and ArrayMarshaler. It provides -// a flexible, but still type-safe and efficient, way to add array-like types -// to the logging context. The struct's MarshalLogArray method is called lazily. -func Array(key string, val zapcore.ArrayMarshaler) Field { - return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} -} - -// Bools constructs a field that carries a slice of bools. -func Bools(key string, bs []bool) Field { - return Array(key, bools(bs)) -} - -// ByteStrings constructs a field that carries a slice of []byte, each of which -// must be UTF-8 encoded text. -func ByteStrings(key string, bss [][]byte) Field { - return Array(key, byteStringsArray(bss)) -} - -// Complex128s constructs a field that carries a slice of complex numbers. -func Complex128s(key string, nums []complex128) Field { - return Array(key, complex128s(nums)) -} - -// Complex64s constructs a field that carries a slice of complex numbers. -func Complex64s(key string, nums []complex64) Field { - return Array(key, complex64s(nums)) -} - -// Durations constructs a field that carries a slice of time.Durations. -func Durations(key string, ds []time.Duration) Field { - return Array(key, durations(ds)) -} - -// Float64s constructs a field that carries a slice of floats. -func Float64s(key string, nums []float64) Field { - return Array(key, float64s(nums)) -} - -// Float32s constructs a field that carries a slice of floats. -func Float32s(key string, nums []float32) Field { - return Array(key, float32s(nums)) -} - -// Ints constructs a field that carries a slice of integers. -func Ints(key string, nums []int) Field { - return Array(key, ints(nums)) -} - -// Int64s constructs a field that carries a slice of integers. -func Int64s(key string, nums []int64) Field { - return Array(key, int64s(nums)) -} - -// Int32s constructs a field that carries a slice of integers. -func Int32s(key string, nums []int32) Field { - return Array(key, int32s(nums)) -} - -// Int16s constructs a field that carries a slice of integers. -func Int16s(key string, nums []int16) Field { - return Array(key, int16s(nums)) -} - -// Int8s constructs a field that carries a slice of integers. -func Int8s(key string, nums []int8) Field { - return Array(key, int8s(nums)) -} - -// Strings constructs a field that carries a slice of strings. -func Strings(key string, ss []string) Field { - return Array(key, stringArray(ss)) -} - -// Times constructs a field that carries a slice of time.Times. -func Times(key string, ts []time.Time) Field { - return Array(key, times(ts)) -} - -// Uints constructs a field that carries a slice of unsigned integers. -func Uints(key string, nums []uint) Field { - return Array(key, uints(nums)) -} - -// Uint64s constructs a field that carries a slice of unsigned integers. -func Uint64s(key string, nums []uint64) Field { - return Array(key, uint64s(nums)) -} - -// Uint32s constructs a field that carries a slice of unsigned integers. -func Uint32s(key string, nums []uint32) Field { - return Array(key, uint32s(nums)) -} - -// Uint16s constructs a field that carries a slice of unsigned integers. -func Uint16s(key string, nums []uint16) Field { - return Array(key, uint16s(nums)) -} - -// Uint8s constructs a field that carries a slice of unsigned integers. -func Uint8s(key string, nums []uint8) Field { - return Array(key, uint8s(nums)) -} - -// Uintptrs constructs a field that carries a slice of pointer addresses. -func Uintptrs(key string, us []uintptr) Field { - return Array(key, uintptrs(us)) -} - -// Errors constructs a field that carries a slice of errors. -func Errors(key string, errs []error) Field { - return Array(key, errArray(errs)) -} - -type bools []bool - -func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bs { - arr.AppendBool(bs[i]) - } - return nil -} - -type byteStringsArray [][]byte - -func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range bss { - arr.AppendByteString(bss[i]) - } - return nil -} - -type complex128s []complex128 - -func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex128(nums[i]) - } - return nil -} - -type complex64s []complex64 - -func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendComplex64(nums[i]) - } - return nil -} - -type durations []time.Duration - -func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ds { - arr.AppendDuration(ds[i]) - } - return nil -} - -type float64s []float64 - -func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat64(nums[i]) - } - return nil -} - -type float32s []float32 - -func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendFloat32(nums[i]) - } - return nil -} - -type ints []int - -func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt(nums[i]) - } - return nil -} - -type int64s []int64 - -func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt64(nums[i]) - } - return nil -} - -type int32s []int32 - -func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt32(nums[i]) - } - return nil -} - -type int16s []int16 - -func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt16(nums[i]) - } - return nil -} - -type int8s []int8 - -func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendInt8(nums[i]) - } - return nil -} - -type stringArray []string - -func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ss { - arr.AppendString(ss[i]) - } - return nil -} - -type times []time.Time - -func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range ts { - arr.AppendTime(ts[i]) - } - return nil -} - -type uints []uint - -func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint(nums[i]) - } - return nil -} - -type uint64s []uint64 - -func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint64(nums[i]) - } - return nil -} - -type uint32s []uint32 - -func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint32(nums[i]) - } - return nil -} - -type uint16s []uint16 - -func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint16(nums[i]) - } - return nil -} - -type uint8s []uint8 - -func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUint8(nums[i]) - } - return nil -} - -type uintptrs []uintptr - -func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range nums { - arr.AppendUintptr(nums[i]) - } - return nil -} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go deleted file mode 100644 index 9e929cd9..00000000 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package buffer provides a thin wrapper around a byte slice. Unlike the -// standard library's bytes.Buffer, it supports a portion of the strconv -// package's zero-allocation formatters. -package buffer // import "go.uber.org/zap/buffer" - -import ( - "strconv" - "time" -) - -const _size = 1024 // by default, create 1 KiB buffers - -// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so -// the only way to construct one is via a Pool. -type Buffer struct { - bs []byte - pool Pool -} - -// AppendByte writes a single byte to the Buffer. -func (b *Buffer) AppendByte(v byte) { - b.bs = append(b.bs, v) -} - -// AppendString writes a string to the Buffer. -func (b *Buffer) AppendString(s string) { - b.bs = append(b.bs, s...) -} - -// AppendInt appends an integer to the underlying buffer (assuming base 10). -func (b *Buffer) AppendInt(i int64) { - b.bs = strconv.AppendInt(b.bs, i, 10) -} - -// AppendTime appends the time formatted using the specified layout. -func (b *Buffer) AppendTime(t time.Time, layout string) { - b.bs = t.AppendFormat(b.bs, layout) -} - -// AppendUint appends an unsigned integer to the underlying buffer (assuming -// base 10). -func (b *Buffer) AppendUint(i uint64) { - b.bs = strconv.AppendUint(b.bs, i, 10) -} - -// AppendBool appends a bool to the underlying buffer. -func (b *Buffer) AppendBool(v bool) { - b.bs = strconv.AppendBool(b.bs, v) -} - -// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN -// or +/- Inf. -func (b *Buffer) AppendFloat(f float64, bitSize int) { - b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) -} - -// Len returns the length of the underlying byte slice. -func (b *Buffer) Len() int { - return len(b.bs) -} - -// Cap returns the capacity of the underlying byte slice. -func (b *Buffer) Cap() int { - return cap(b.bs) -} - -// Bytes returns a mutable reference to the underlying byte slice. -func (b *Buffer) Bytes() []byte { - return b.bs -} - -// String returns a string copy of the underlying byte slice. -func (b *Buffer) String() string { - return string(b.bs) -} - -// Reset resets the underlying byte slice. Subsequent writes re-use the slice's -// backing array. -func (b *Buffer) Reset() { - b.bs = b.bs[:0] -} - -// Write implements io.Writer. -func (b *Buffer) Write(bs []byte) (int, error) { - b.bs = append(b.bs, bs...) - return len(bs), nil -} - -// WriteByte writes a single byte to the Buffer. -// -// Error returned is always nil, function signature is compatible -// with bytes.Buffer and bufio.Writer -func (b *Buffer) WriteByte(v byte) error { - b.AppendByte(v) - return nil -} - -// WriteString writes a string to the Buffer. -// -// Error returned is always nil, function signature is compatible -// with bytes.Buffer and bufio.Writer -func (b *Buffer) WriteString(s string) (int, error) { - b.AppendString(s) - return len(s), nil -} - -// TrimNewline trims any final "\n" byte from the end of the buffer. -func (b *Buffer) TrimNewline() { - if i := len(b.bs) - 1; i >= 0 { - if b.bs[i] == '\n' { - b.bs = b.bs[:i] - } - } -} - -// Free returns the Buffer to its Pool. -// -// Callers must not retain references to the Buffer after calling Free. -func (b *Buffer) Free() { - b.pool.put(b) -} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go deleted file mode 100644 index 8fb3e202..00000000 --- a/vendor/go.uber.org/zap/buffer/pool.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package buffer - -import "sync" - -// A Pool is a type-safe wrapper around a sync.Pool. -type Pool struct { - p *sync.Pool -} - -// NewPool constructs a new Pool. -func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} -} - -// Get retrieves a Buffer from the pool, creating one if necessary. -func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) - buf.Reset() - buf.pool = p - return buf -} - -func (p Pool) put(buf *Buffer) { - p.p.Put(buf) -} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh deleted file mode 100644 index 345ac8b8..00000000 --- a/vendor/go.uber.org/zap/checklicense.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -e - -ERROR_COUNT=0 -while read -r file -do - case "$(head -1 "${file}")" in - *"Copyright (c) "*" Uber Technologies, Inc.") - # everything's cool - ;; - *) - echo "$file is missing license header." - (( ERROR_COUNT++ )) - ;; - esac -done < <(git ls-files "*\.go") - -exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go deleted file mode 100644 index 55637fb0..00000000 --- a/vendor/go.uber.org/zap/config.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "sort" - "time" - - "go.uber.org/zap/zapcore" -) - -// SamplingConfig sets a sampling strategy for the logger. Sampling caps the -// global CPU and I/O load that logging puts on your process while attempting -// to preserve a representative subset of your logs. -// -// If specified, the Sampler will invoke the Hook after each decision. -// -// Values configured here are per-second. See zapcore.NewSamplerWithOptions for -// details. -type SamplingConfig struct { - Initial int `json:"initial" yaml:"initial"` - Thereafter int `json:"thereafter" yaml:"thereafter"` - Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` -} - -// Config offers a declarative way to construct a logger. It doesn't do -// anything that can't be done with New, Options, and the various -// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to -// toggle common options. -// -// Note that Config intentionally supports only the most common options. More -// unusual logging setups (logging to network connections or message queues, -// splitting output between multiple files, etc.) are possible, but require -// direct use of the zapcore package. For sample code, see the package-level -// BasicConfiguration and AdvancedConfiguration examples. -// -// For an example showing runtime log level changes, see the documentation for -// AtomicLevel. -type Config struct { - // Level is the minimum enabled logging level. Note that this is a dynamic - // level, so calling Config.Level.SetLevel will atomically change the log - // level of all loggers descended from this config. - Level AtomicLevel `json:"level" yaml:"level"` - // Development puts the logger in development mode, which changes the - // behavior of DPanicLevel and takes stacktraces more liberally. - Development bool `json:"development" yaml:"development"` - // DisableCaller stops annotating logs with the calling function's file - // name and line number. By default, all logs are annotated. - DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` - // DisableStacktrace completely disables automatic stacktrace capturing. By - // default, stacktraces are captured for WarnLevel and above logs in - // development and ErrorLevel and above in production. - DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` - // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. - Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` - // Encoding sets the logger's encoding. Valid values are "json" and - // "console", as well as any third-party encodings registered via - // RegisterEncoder. - Encoding string `json:"encoding" yaml:"encoding"` - // EncoderConfig sets options for the chosen encoder. See - // zapcore.EncoderConfig for details. - EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` - // OutputPaths is a list of URLs or file paths to write logging output to. - // See Open for details. - OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` - // ErrorOutputPaths is a list of URLs to write internal logger errors to. - // The default is standard error. - // - // Note that this setting only affects internal errors; for sample code that - // sends error-level logs to a different location from info- and debug-level - // logs, see the package-level AdvancedConfiguration example. - ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` - // InitialFields is a collection of fields to add to the root logger. - InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` -} - -// NewProductionEncoderConfig returns an opinionated EncoderConfig for -// production environments. -func NewProductionEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - TimeKey: "ts", - LevelKey: "level", - NameKey: "logger", - CallerKey: "caller", - FunctionKey: zapcore.OmitKey, - MessageKey: "msg", - StacktraceKey: "stacktrace", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.EpochTimeEncoder, - EncodeDuration: zapcore.SecondsDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. -// -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. -func NewProductionConfig() Config { - return Config{ - Level: NewAtomicLevelAt(InfoLevel), - Development: false, - Sampling: &SamplingConfig{ - Initial: 100, - Thereafter: 100, - }, - Encoding: "json", - EncoderConfig: NewProductionEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for -// development environments. -func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { - return zapcore.EncoderConfig{ - // Keys can be anything except the empty string. - TimeKey: "T", - LevelKey: "L", - NameKey: "N", - CallerKey: "C", - FunctionKey: zapcore.OmitKey, - MessageKey: "M", - StacktraceKey: "S", - LineEnding: zapcore.DefaultLineEnding, - EncodeLevel: zapcore.CapitalLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - EncodeCaller: zapcore.ShortCallerEncoder, - } -} - -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. -// -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. -func NewDevelopmentConfig() Config { - return Config{ - Level: NewAtomicLevelAt(DebugLevel), - Development: true, - Encoding: "console", - EncoderConfig: NewDevelopmentEncoderConfig(), - OutputPaths: []string{"stderr"}, - ErrorOutputPaths: []string{"stderr"}, - } -} - -// Build constructs a logger from the Config and Options. -func (cfg Config) Build(opts ...Option) (*Logger, error) { - enc, err := cfg.buildEncoder() - if err != nil { - return nil, err - } - - sink, errSink, err := cfg.openSinks() - if err != nil { - return nil, err - } - - if cfg.Level == (AtomicLevel{}) { - return nil, fmt.Errorf("missing Level") - } - - log := New( - zapcore.NewCore(enc, sink, cfg.Level), - cfg.buildOptions(errSink)..., - ) - if len(opts) > 0 { - log = log.WithOptions(opts...) - } - return log, nil -} - -func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { - opts := []Option{ErrorOutput(errSink)} - - if cfg.Development { - opts = append(opts, Development()) - } - - if !cfg.DisableCaller { - opts = append(opts, AddCaller()) - } - - stackLevel := ErrorLevel - if cfg.Development { - stackLevel = WarnLevel - } - if !cfg.DisableStacktrace { - opts = append(opts, AddStacktrace(stackLevel)) - } - - if scfg := cfg.Sampling; scfg != nil { - opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { - var samplerOpts []zapcore.SamplerOption - if scfg.Hook != nil { - samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) - } - return zapcore.NewSamplerWithOptions( - core, - time.Second, - cfg.Sampling.Initial, - cfg.Sampling.Thereafter, - samplerOpts..., - ) - })) - } - - if len(cfg.InitialFields) > 0 { - fs := make([]Field, 0, len(cfg.InitialFields)) - keys := make([]string, 0, len(cfg.InitialFields)) - for k := range cfg.InitialFields { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - fs = append(fs, Any(k, cfg.InitialFields[k])) - } - opts = append(opts, Fields(fs...)) - } - - return opts -} - -func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { - sink, closeOut, err := Open(cfg.OutputPaths...) - if err != nil { - return nil, nil, err - } - errSink, _, err := Open(cfg.ErrorOutputPaths...) - if err != nil { - closeOut() - return nil, nil, err - } - return sink, errSink, nil -} - -func (cfg Config) buildEncoder() (zapcore.Encoder, error) { - return newEncoder(cfg.Encoding, cfg.EncoderConfig) -} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go deleted file mode 100644 index 8638dd1b..00000000 --- a/vendor/go.uber.org/zap/doc.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zap provides fast, structured, leveled logging. -// -// For applications that log in the hot path, reflection-based serialization -// and string formatting are prohibitively expensive - they're CPU-intensive -// and make many small allocations. Put differently, using json.Marshal and -// fmt.Fprintf to log tons of interface{} makes your application slow. -// -// Zap takes a different approach. It includes a reflection-free, -// zero-allocation JSON encoder, and the base Logger strives to avoid -// serialization overhead and allocations wherever possible. By building the -// high-level SugaredLogger on that foundation, zap lets users choose when -// they need to count every allocation and when they'd prefer a more familiar, -// loosely typed API. -// -// Choosing a Logger -// -// In contexts where performance is nice, but not critical, use the -// SugaredLogger. It's 4-10x faster than other structured logging packages and -// supports both structured and printf-style logging. Like log15 and go-kit, -// the SugaredLogger's structured logging APIs are loosely typed and accept a -// variadic number of key-value pairs. (For more advanced use cases, they also -// accept strongly typed fields - see the SugaredLogger.With documentation for -// details.) -// sugar := zap.NewExample().Sugar() -// defer sugar.Sync() -// sugar.Infow("failed to fetch URL", -// "url", "http://example.com", -// "attempt", 3, -// "backoff", time.Second, -// ) -// sugar.Infof("failed to fetch URL: %s", "http://example.com") -// -// By default, loggers are unbuffered. However, since zap's low-level APIs -// allow buffering, calling Sync before letting your process exit is a good -// habit. -// -// In the rare contexts where every microsecond and every allocation matter, -// use the Logger. It's even faster than the SugaredLogger and allocates far -// less, but it only supports strongly-typed, structured logging. -// logger := zap.NewExample() -// defer logger.Sync() -// logger.Info("failed to fetch URL", -// zap.String("url", "http://example.com"), -// zap.Int("attempt", 3), -// zap.Duration("backoff", time.Second), -// ) -// -// Choosing between the Logger and SugaredLogger doesn't need to be an -// application-wide decision: converting between the two is simple and -// inexpensive. -// logger := zap.NewExample() -// defer logger.Sync() -// sugar := logger.Sugar() -// plain := sugar.Desugar() -// -// Configuring Zap -// -// The simplest way to build a Logger is to use zap's opinionated presets: -// NewExample, NewProduction, and NewDevelopment. These presets build a logger -// with a single function call: -// logger, err := zap.NewProduction() -// if err != nil { -// log.Fatalf("can't initialize zap logger: %v", err) -// } -// defer logger.Sync() -// -// Presets are fine for small projects, but larger projects and organizations -// naturally require a bit more customization. For most users, zap's Config -// struct strikes the right balance between flexibility and convenience. See -// the package-level BasicConfiguration example for sample code. -// -// More unusual configurations (splitting output between files, sending logs -// to a message queue, etc.) are possible, but require direct use of -// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration -// example for sample code. -// -// Extending Zap -// -// The zap package itself is a relatively thin wrapper around the interfaces -// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., -// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an -// exception aggregation service, like Sentry or Rollbar) typically requires -// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core -// interfaces. See the zapcore documentation for details. -// -// Similarly, package authors can use the high-performance Encoder and Core -// implementations in the zapcore package to build their own loggers. -// -// Frequently Asked Questions -// -// An FAQ covering everything from installation errors to design decisions is -// available at https://github.com/uber-go/zap/blob/master/FAQ.md. -package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go deleted file mode 100644 index 08ed8335..00000000 --- a/vendor/go.uber.org/zap/encoder.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "sync" - - "go.uber.org/zap/zapcore" -) - -var ( - errNoEncoderNameSpecified = errors.New("no encoder name specified") - - _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ - "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewConsoleEncoder(encoderConfig), nil - }, - "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - return zapcore.NewJSONEncoder(encoderConfig), nil - }, - } - _encoderMutex sync.RWMutex -) - -// RegisterEncoder registers an encoder constructor, which the Config struct -// can then reference. By default, the "json" and "console" encoders are -// registered. -// -// Attempting to register an encoder whose name is already taken returns an -// error. -func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { - _encoderMutex.Lock() - defer _encoderMutex.Unlock() - if name == "" { - return errNoEncoderNameSpecified - } - if _, ok := _encoderNameToConstructor[name]; ok { - return fmt.Errorf("encoder already registered for name %q", name) - } - _encoderNameToConstructor[name] = constructor - return nil -} - -func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { - if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { - return nil, fmt.Errorf("missing EncodeTime in EncoderConfig") - } - - _encoderMutex.RLock() - defer _encoderMutex.RUnlock() - if name == "" { - return nil, errNoEncoderNameSpecified - } - constructor, ok := _encoderNameToConstructor[name] - if !ok { - return nil, fmt.Errorf("no encoder registered for name %q", name) - } - return constructor(encoderConfig) -} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go deleted file mode 100644 index 65982a51..00000000 --- a/vendor/go.uber.org/zap/error.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "sync" - - "go.uber.org/zap/zapcore" -) - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Error is shorthand for the common idiom NamedError("error", err). -func Error(err error) Field { - return NamedError("error", err) -} - -// NamedError constructs a field that lazily stores err.Error() under the -// provided key. Errors which also implement fmt.Formatter (like those produced -// by github.com/pkg/errors) will also have their verbose representation stored -// under key+"Verbose". If passed a nil error, the field is a no-op. -// -// For the common case in which the key is simply "error", the Error function -// is shorter and less repetitive. -func NamedError(key string, err error) Field { - if err == nil { - return Skip() - } - return Field{Key: key, Type: zapcore.ErrorType, Interface: err} -} - -type errArray []error - -func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - // To represent each error as an object with an "error" attribute and - // potentially an "errorVerbose" attribute, we need to wrap it in a - // type that implements LogObjectMarshaler. To prevent this from - // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) - elem.error = errs[i] - arr.AppendObject(elem) - elem.error = nil - _errArrayElemPool.Put(elem) - } - return nil -} - -type errArrayElem struct { - error -} - -func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { - // Re-use the error field's logic, which supports non-standard error types. - Error(e.error).AddTo(enc) - return nil -} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go deleted file mode 100644 index bbb745db..00000000 --- a/vendor/go.uber.org/zap/field.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "math" - "time" - - "go.uber.org/zap/zapcore" -) - -// Field is an alias for Field. Aliasing this type dramatically -// improves the navigability of this package's API documentation. -type Field = zapcore.Field - -var ( - _minTimeInt64 = time.Unix(0, math.MinInt64) - _maxTimeInt64 = time.Unix(0, math.MaxInt64) -) - -// Skip constructs a no-op field, which is often useful when handling invalid -// inputs in other Field constructors. -func Skip() Field { - return Field{Type: zapcore.SkipType} -} - -// nilField returns a field which will marshal explicitly as nil. See motivation -// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking -// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the -// implementation here should be changed to reflect that. -func nilField(key string) Field { return Reflect(key, nil) } - -// Binary constructs a field that carries an opaque binary blob. -// -// Binary data is serialized in an encoding-appropriate format. For example, -// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, -// use ByteString. -func Binary(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.BinaryType, Interface: val} -} - -// Bool constructs a field that carries a bool. -func Bool(key string, val bool) Field { - var ival int64 - if val { - ival = 1 - } - return Field{Key: key, Type: zapcore.BoolType, Integer: ival} -} - -// Boolp constructs a field that carries a *bool. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Boolp(key string, val *bool) Field { - if val == nil { - return nilField(key) - } - return Bool(key, *val) -} - -// ByteString constructs a field that carries UTF-8 encoded text as a []byte. -// To log opaque binary blobs (which aren't necessarily valid UTF-8), use -// Binary. -func ByteString(key string, val []byte) Field { - return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} -} - -// Complex128 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex128 to -// interface{}). -func Complex128(key string, val complex128) Field { - return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} -} - -// Complex128p constructs a field that carries a *complex128. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Complex128p(key string, val *complex128) Field { - if val == nil { - return nilField(key) - } - return Complex128(key, *val) -} - -// Complex64 constructs a field that carries a complex number. Unlike most -// numeric fields, this costs an allocation (to convert the complex64 to -// interface{}). -func Complex64(key string, val complex64) Field { - return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} -} - -// Complex64p constructs a field that carries a *complex64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Complex64p(key string, val *complex64) Field { - if val == nil { - return nilField(key) - } - return Complex64(key, *val) -} - -// Float64 constructs a field that carries a float64. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float64(key string, val float64) Field { - return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} -} - -// Float64p constructs a field that carries a *float64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Float64p(key string, val *float64) Field { - if val == nil { - return nilField(key) - } - return Float64(key, *val) -} - -// Float32 constructs a field that carries a float32. The way the -// floating-point value is represented is encoder-dependent, so marshaling is -// necessarily lazy. -func Float32(key string, val float32) Field { - return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} -} - -// Float32p constructs a field that carries a *float32. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Float32p(key string, val *float32) Field { - if val == nil { - return nilField(key) - } - return Float32(key, *val) -} - -// Int constructs a field with the given key and value. -func Int(key string, val int) Field { - return Int64(key, int64(val)) -} - -// Intp constructs a field that carries a *int. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Intp(key string, val *int) Field { - if val == nil { - return nilField(key) - } - return Int(key, *val) -} - -// Int64 constructs a field with the given key and value. -func Int64(key string, val int64) Field { - return Field{Key: key, Type: zapcore.Int64Type, Integer: val} -} - -// Int64p constructs a field that carries a *int64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int64p(key string, val *int64) Field { - if val == nil { - return nilField(key) - } - return Int64(key, *val) -} - -// Int32 constructs a field with the given key and value. -func Int32(key string, val int32) Field { - return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} -} - -// Int32p constructs a field that carries a *int32. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int32p(key string, val *int32) Field { - if val == nil { - return nilField(key) - } - return Int32(key, *val) -} - -// Int16 constructs a field with the given key and value. -func Int16(key string, val int16) Field { - return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} -} - -// Int16p constructs a field that carries a *int16. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int16p(key string, val *int16) Field { - if val == nil { - return nilField(key) - } - return Int16(key, *val) -} - -// Int8 constructs a field with the given key and value. -func Int8(key string, val int8) Field { - return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} -} - -// Int8p constructs a field that carries a *int8. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Int8p(key string, val *int8) Field { - if val == nil { - return nilField(key) - } - return Int8(key, *val) -} - -// String constructs a field with the given key and value. -func String(key string, val string) Field { - return Field{Key: key, Type: zapcore.StringType, String: val} -} - -// Stringp constructs a field that carries a *string. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Stringp(key string, val *string) Field { - if val == nil { - return nilField(key) - } - return String(key, *val) -} - -// Uint constructs a field with the given key and value. -func Uint(key string, val uint) Field { - return Uint64(key, uint64(val)) -} - -// Uintp constructs a field that carries a *uint. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uintp(key string, val *uint) Field { - if val == nil { - return nilField(key) - } - return Uint(key, *val) -} - -// Uint64 constructs a field with the given key and value. -func Uint64(key string, val uint64) Field { - return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} -} - -// Uint64p constructs a field that carries a *uint64. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint64p(key string, val *uint64) Field { - if val == nil { - return nilField(key) - } - return Uint64(key, *val) -} - -// Uint32 constructs a field with the given key and value. -func Uint32(key string, val uint32) Field { - return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} -} - -// Uint32p constructs a field that carries a *uint32. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint32p(key string, val *uint32) Field { - if val == nil { - return nilField(key) - } - return Uint32(key, *val) -} - -// Uint16 constructs a field with the given key and value. -func Uint16(key string, val uint16) Field { - return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} -} - -// Uint16p constructs a field that carries a *uint16. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint16p(key string, val *uint16) Field { - if val == nil { - return nilField(key) - } - return Uint16(key, *val) -} - -// Uint8 constructs a field with the given key and value. -func Uint8(key string, val uint8) Field { - return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} -} - -// Uint8p constructs a field that carries a *uint8. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uint8p(key string, val *uint8) Field { - if val == nil { - return nilField(key) - } - return Uint8(key, *val) -} - -// Uintptr constructs a field with the given key and value. -func Uintptr(key string, val uintptr) Field { - return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} -} - -// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Uintptrp(key string, val *uintptr) Field { - if val == nil { - return nilField(key) - } - return Uintptr(key, *val) -} - -// Reflect constructs a field with the given key and an arbitrary object. It uses -// an encoding-appropriate, reflection-based function to lazily serialize nearly -// any object into the logging context, but it's relatively slow and -// allocation-heavy. Outside tests, Any is always a better choice. -// -// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect -// includes the error message in the final log output. -func Reflect(key string, val interface{}) Field { - return Field{Key: key, Type: zapcore.ReflectType, Interface: val} -} - -// Namespace creates a named, isolated scope within the logger's context. All -// subsequent fields will be added to the new namespace. -// -// This helps prevent key collisions when injecting loggers into sub-components -// or third-party libraries. -func Namespace(key string) Field { - return Field{Key: key, Type: zapcore.NamespaceType} -} - -// Stringer constructs a field with the given key and the output of the value's -// String method. The Stringer's String method is called lazily. -func Stringer(key string, val fmt.Stringer) Field { - return Field{Key: key, Type: zapcore.StringerType, Interface: val} -} - -// Time constructs a Field with the given key and value. The encoder -// controls how the time is serialized. -func Time(key string, val time.Time) Field { - if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { - return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} - } - return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} -} - -// Timep constructs a field that carries a *time.Time. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Timep(key string, val *time.Time) Field { - if val == nil { - return nilField(key) - } - return Time(key, *val) -} - -// Stack constructs a field that stores a stacktrace of the current goroutine -// under provided key. Keep in mind that taking a stacktrace is eager and -// expensive (relatively speaking); this function both makes an allocation and -// takes about two microseconds. -func Stack(key string) Field { - return StackSkip(key, 1) // skip Stack -} - -// StackSkip constructs a field similarly to Stack, but also skips the given -// number of frames from the top of the stacktrace. -func StackSkip(key string, skip int) Field { - // Returning the stacktrace as a string costs an allocation, but saves us - // from expanding the zapcore.Field union struct to include a byte slice. Since - // taking a stacktrace is already so expensive (~10us), the extra allocation - // is okay. - return String(key, takeStacktrace(skip+1)) // skip StackSkip -} - -// Duration constructs a field with the given key and value. The encoder -// controls how the duration is serialized. -func Duration(key string, val time.Duration) Field { - return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} -} - -// Durationp constructs a field that carries a *time.Duration. The returned Field will safely -// and explicitly represent `nil` when appropriate. -func Durationp(key string, val *time.Duration) Field { - if val == nil { - return nilField(key) - } - return Duration(key, *val) -} - -// Object constructs a field with the given key and ObjectMarshaler. It -// provides a flexible, but still type-safe and efficient, way to add map- or -// struct-like user-defined types to the logging context. The struct's -// MarshalLogObject method is called lazily. -func Object(key string, val zapcore.ObjectMarshaler) Field { - return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} -} - -// Inline constructs a Field that is similar to Object, but it -// will add the elements of the provided ObjectMarshaler to the -// current namespace. -func Inline(val zapcore.ObjectMarshaler) Field { - return zapcore.Field{ - Type: zapcore.InlineMarshalerType, - Interface: val, - } -} - -// Any takes a key and an arbitrary value and chooses the best way to represent -// them as a field, falling back to a reflection-based approach only if -// necessary. -// -// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between -// them. To minimize surprises, []byte values are treated as binary blobs, byte -// values are treated as uint8, and runes are always treated as integers. -func Any(key string, value interface{}) Field { - switch val := value.(type) { - case zapcore.ObjectMarshaler: - return Object(key, val) - case zapcore.ArrayMarshaler: - return Array(key, val) - case bool: - return Bool(key, val) - case *bool: - return Boolp(key, val) - case []bool: - return Bools(key, val) - case complex128: - return Complex128(key, val) - case *complex128: - return Complex128p(key, val) - case []complex128: - return Complex128s(key, val) - case complex64: - return Complex64(key, val) - case *complex64: - return Complex64p(key, val) - case []complex64: - return Complex64s(key, val) - case float64: - return Float64(key, val) - case *float64: - return Float64p(key, val) - case []float64: - return Float64s(key, val) - case float32: - return Float32(key, val) - case *float32: - return Float32p(key, val) - case []float32: - return Float32s(key, val) - case int: - return Int(key, val) - case *int: - return Intp(key, val) - case []int: - return Ints(key, val) - case int64: - return Int64(key, val) - case *int64: - return Int64p(key, val) - case []int64: - return Int64s(key, val) - case int32: - return Int32(key, val) - case *int32: - return Int32p(key, val) - case []int32: - return Int32s(key, val) - case int16: - return Int16(key, val) - case *int16: - return Int16p(key, val) - case []int16: - return Int16s(key, val) - case int8: - return Int8(key, val) - case *int8: - return Int8p(key, val) - case []int8: - return Int8s(key, val) - case string: - return String(key, val) - case *string: - return Stringp(key, val) - case []string: - return Strings(key, val) - case uint: - return Uint(key, val) - case *uint: - return Uintp(key, val) - case []uint: - return Uints(key, val) - case uint64: - return Uint64(key, val) - case *uint64: - return Uint64p(key, val) - case []uint64: - return Uint64s(key, val) - case uint32: - return Uint32(key, val) - case *uint32: - return Uint32p(key, val) - case []uint32: - return Uint32s(key, val) - case uint16: - return Uint16(key, val) - case *uint16: - return Uint16p(key, val) - case []uint16: - return Uint16s(key, val) - case uint8: - return Uint8(key, val) - case *uint8: - return Uint8p(key, val) - case []byte: - return Binary(key, val) - case uintptr: - return Uintptr(key, val) - case *uintptr: - return Uintptrp(key, val) - case []uintptr: - return Uintptrs(key, val) - case time.Time: - return Time(key, val) - case *time.Time: - return Timep(key, val) - case []time.Time: - return Times(key, val) - case time.Duration: - return Duration(key, val) - case *time.Duration: - return Durationp(key, val) - case []time.Duration: - return Durations(key, val) - case error: - return NamedError(key, val) - case []error: - return Errors(key, val) - case fmt.Stringer: - return Stringer(key, val) - default: - return Reflect(key, val) - } -} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go deleted file mode 100644 index 13128750..00000000 --- a/vendor/go.uber.org/zap/flag.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "flag" - - "go.uber.org/zap/zapcore" -) - -// LevelFlag uses the standard library's flag.Var to declare a global flag -// with the specified name, default, and usage guidance. The returned value is -// a pointer to the value of the flag. -// -// If you don't want to use the flag package's global state, you can use any -// non-nil *Level as a flag.Value with your own *flag.FlagSet. -func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { - lvl := defaultLevel - flag.Var(&lvl, name, usage) - return &lvl -} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml deleted file mode 100644 index 8e1d05e9..00000000 --- a/vendor/go.uber.org/zap/glide.yaml +++ /dev/null @@ -1,34 +0,0 @@ -package: go.uber.org/zap -license: MIT -import: -- package: go.uber.org/atomic - version: ^1 -- package: go.uber.org/multierr - version: ^1 -testImport: -- package: github.com/satori/go.uuid -- package: github.com/sirupsen/logrus -- package: github.com/apex/log - subpackages: - - handlers/json -- package: github.com/go-kit/kit - subpackages: - - log -- package: github.com/stretchr/testify - subpackages: - - assert - - require -- package: gopkg.in/inconshreveable/log15.v2 -- package: github.com/mattn/goveralls -- package: github.com/pborman/uuid -- package: github.com/pkg/errors -- package: github.com/rs/zerolog -- package: golang.org/x/tools - subpackages: - - cover -- package: golang.org/x/lint - subpackages: - - golint -- package: github.com/axw/gocov - subpackages: - - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go deleted file mode 100644 index c1ac0507..00000000 --- a/vendor/go.uber.org/zap/global.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "bytes" - "fmt" - "log" - "os" - "sync" - - "go.uber.org/zap/zapcore" -) - -const ( - _loggerWriterDepth = 2 - _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + - "https://github.com/uber-go/zap/issues/new and reference this error: %v" -) - -var ( - _globalMu sync.RWMutex - _globalL = NewNop() - _globalS = _globalL.Sugar() -) - -// L returns the global Logger, which can be reconfigured with ReplaceGlobals. -// It's safe for concurrent use. -func L() *Logger { - _globalMu.RLock() - l := _globalL - _globalMu.RUnlock() - return l -} - -// S returns the global SugaredLogger, which can be reconfigured with -// ReplaceGlobals. It's safe for concurrent use. -func S() *SugaredLogger { - _globalMu.RLock() - s := _globalS - _globalMu.RUnlock() - return s -} - -// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a -// function to restore the original values. It's safe for concurrent use. -func ReplaceGlobals(logger *Logger) func() { - _globalMu.Lock() - prev := _globalL - _globalL = logger - _globalS = logger.Sugar() - _globalMu.Unlock() - return func() { ReplaceGlobals(prev) } -} - -// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at -// InfoLevel. To redirect the standard library's package-global logging -// functions, use RedirectStdLog instead. -func NewStdLog(l *Logger) *log.Logger { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - f := logger.Info - return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) -} - -// NewStdLogAt returns *log.Logger which writes to supplied zap logger at -// required level. -func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil -} - -// RedirectStdLog redirects output from the standard library's package-global -// logger to the supplied logger at InfoLevel. Since zap already handles caller -// annotations, timestamps, etc., it automatically disables the standard -// library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLog(l *Logger) func() { - f, err := redirectStdLogAt(l, InfoLevel) - if err != nil { - // Can't get here, since passing InfoLevel to redirectStdLogAt always - // works. - panic(fmt.Sprintf(_programmerErrorTemplate, err)) - } - return f -} - -// RedirectStdLogAt redirects output from the standard library's package-global -// logger to the supplied logger at the specified level. Since zap already -// handles caller annotations, timestamps, etc., it automatically disables the -// standard library's annotations and prefixing. -// -// It returns a function to restore the original prefix and flags and reset the -// standard library's output to os.Stderr. -func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - return redirectStdLogAt(l, level) -} - -func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { - flags := log.Flags() - prefix := log.Prefix() - log.SetFlags(0) - log.SetPrefix("") - logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) - logFunc, err := levelToFunc(logger, level) - if err != nil { - return nil, err - } - log.SetOutput(&loggerWriter{logFunc}) - return func() { - log.SetFlags(flags) - log.SetPrefix(prefix) - log.SetOutput(os.Stderr) - }, nil -} - -func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { - switch lvl { - case DebugLevel: - return logger.Debug, nil - case InfoLevel: - return logger.Info, nil - case WarnLevel: - return logger.Warn, nil - case ErrorLevel: - return logger.Error, nil - case DPanicLevel: - return logger.DPanic, nil - case PanicLevel: - return logger.Panic, nil - case FatalLevel: - return logger.Fatal, nil - } - return nil, fmt.Errorf("unrecognized level: %q", lvl) -} - -type loggerWriter struct { - logFunc func(msg string, fields ...Field) -} - -func (l *loggerWriter) Write(p []byte) (int, error) { - p = bytes.TrimSpace(p) - l.logFunc(string(p)) - return len(p), nil -} diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go deleted file mode 100644 index 6b5dbda8..00000000 --- a/vendor/go.uber.org/zap/global_go112.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// See #682 for more information. -// +build go1.12 - -package zap - -const _stdLogDefaultDepth = 1 diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go deleted file mode 100644 index d3ab9af9..00000000 --- a/vendor/go.uber.org/zap/global_prego112.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// See #682 for more information. -// +build !go1.12 - -package zap - -const _stdLogDefaultDepth = 2 diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go deleted file mode 100644 index 1297c33b..00000000 --- a/vendor/go.uber.org/zap/http_handler.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - - "go.uber.org/zap/zapcore" -) - -// ServeHTTP is a simple JSON endpoint that can report on or change the current -// logging level. -// -// GET -// -// The GET request returns a JSON description of the current logging level like: -// {"level":"info"} -// -// PUT -// -// The PUT request changes the logging level. It is perfectly safe to change the -// logging level while a program is running. Two content types are supported: -// -// Content-Type: application/x-www-form-urlencoded -// -// With this content type, the level can be provided through the request body or -// a query parameter. The log level is URL encoded like: -// -// level=debug -// -// The request body takes precedence over the query parameter, if both are -// specified. -// -// This content type is the default for a curl PUT request. Following are two -// example curl requests that both set the logging level to debug. -// -// curl -X PUT localhost:8080/log/level?level=debug -// curl -X PUT localhost:8080/log/level -d level=debug -// -// For any other content type, the payload is expected to be JSON encoded and -// look like: -// -// {"level":"info"} -// -// An example curl request could look like this: -// -// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' -// -func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { - type errorResponse struct { - Error string `json:"error"` - } - type payload struct { - Level zapcore.Level `json:"level"` - } - - enc := json.NewEncoder(w) - - switch r.Method { - case http.MethodGet: - enc.Encode(payload{Level: lvl.Level()}) - case http.MethodPut: - requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: err.Error()}) - return - } - lvl.SetLevel(requestedLvl) - enc.Encode(payload{Level: lvl.Level()}) - default: - w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ - Error: "Only GET and PUT are supported.", - }) - } -} - -// Decodes incoming PUT requests and returns the requested logging level. -func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { - if contentType == "application/x-www-form-urlencoded" { - return decodePutURL(r) - } - return decodePutJSON(r.Body) -} - -func decodePutURL(r *http.Request) (zapcore.Level, error) { - lvl := r.FormValue("level") - if lvl == "" { - return 0, fmt.Errorf("must specify logging level") - } - var l zapcore.Level - if err := l.UnmarshalText([]byte(lvl)); err != nil { - return 0, err - } - return l, nil -} - -func decodePutJSON(body io.Reader) (zapcore.Level, error) { - var pld struct { - Level *zapcore.Level `json:"level"` - } - if err := json.NewDecoder(body).Decode(&pld); err != nil { - return 0, fmt.Errorf("malformed request body: %v", err) - } - if pld.Level == nil { - return 0, fmt.Errorf("must specify logging level") - } - return *pld.Level, nil - -} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go deleted file mode 100644 index dad583aa..00000000 --- a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package bufferpool houses zap's shared internal buffer pool. Third-party -// packages can recreate the same functionality with buffers.NewPool. -package bufferpool - -import "go.uber.org/zap/buffer" - -var ( - _pool = buffer.NewPool() - // Get retrieves a buffer from the pool, creating one if necessary. - Get = _pool.Get -) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go deleted file mode 100644 index c4d5d02a..00000000 --- a/vendor/go.uber.org/zap/internal/color/color.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package color adds coloring functionality for TTY output. -package color - -import "fmt" - -// Foreground colors. -const ( - Black Color = iota + 30 - Red - Green - Yellow - Blue - Magenta - Cyan - White -) - -// Color represents a text color. -type Color uint8 - -// Add adds the coloring to the given string. -func (c Color) Add(s string) string { - return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) -} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go deleted file mode 100644 index dfc5b05f..00000000 --- a/vendor/go.uber.org/zap/internal/exit/exit.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package exit provides stubs so that unit tests can exercise code that calls -// os.Exit(1). -package exit - -import "os" - -var real = func() { os.Exit(1) } - -// Exit normally terminates the process by calling os.Exit(1). If the package -// is stubbed, it instead records a call in the testing spy. -func Exit() { - real() -} - -// A StubbedExit is a testing fake for os.Exit. -type StubbedExit struct { - Exited bool - prev func() -} - -// Stub substitutes a fake for the call to os.Exit(1). -func Stub() *StubbedExit { - s := &StubbedExit{prev: real} - real = s.exit - return s -} - -// WithStub runs the supplied function with Exit stubbed. It returns the stub -// used, so that users can test whether the process would have crashed. -func WithStub(f func()) *StubbedExit { - s := Stub() - defer s.Unstub() - f() - return s -} - -// Unstub restores the previous exit function. -func (se *StubbedExit) Unstub() { - real = se.prev -} - -func (se *StubbedExit) exit() { - se.Exited = true -} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go deleted file mode 100644 index 3567a9a1..00000000 --- a/vendor/go.uber.org/zap/level.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "go.uber.org/atomic" - "go.uber.org/zap/zapcore" -) - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel = zapcore.DebugLevel - // InfoLevel is the default logging priority. - InfoLevel = zapcore.InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel = zapcore.WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel = zapcore.ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel = zapcore.DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel = zapcore.PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel = zapcore.FatalLevel -) - -// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with -// an anonymous function. -// -// It's particularly useful when splitting log output between different -// outputs (e.g., standard error and standard out). For sample code, see the -// package-level AdvancedConfiguration example. -type LevelEnablerFunc func(zapcore.Level) bool - -// Enabled calls the wrapped function. -func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } - -// An AtomicLevel is an atomically changeable, dynamic logging level. It lets -// you safely change the log level of a tree of loggers (the root logger and -// any children created by adding context) at runtime. -// -// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to -// alter its level. -// -// AtomicLevels must be created with the NewAtomicLevel constructor to allocate -// their internal atomic pointer. -type AtomicLevel struct { - l *atomic.Int32 -} - -// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging -// enabled. -func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } -} - -// NewAtomicLevelAt is a convenience function that creates an AtomicLevel -// and then calls SetLevel with the given level. -func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { - a := NewAtomicLevel() - a.SetLevel(l) - return a -} - -// Enabled implements the zapcore.LevelEnabler interface, which allows the -// AtomicLevel to be used in place of traditional static levels. -func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { - return lvl.Level().Enabled(l) -} - -// Level returns the minimum enabled log level. -func (lvl AtomicLevel) Level() zapcore.Level { - return zapcore.Level(int8(lvl.l.Load())) -} - -// SetLevel alters the logging level. -func (lvl AtomicLevel) SetLevel(l zapcore.Level) { - lvl.l.Store(int32(l)) -} - -// String returns the string representation of the underlying Level. -func (lvl AtomicLevel) String() string { - return lvl.Level().String() -} - -// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text -// representations as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl *AtomicLevel) UnmarshalText(text []byte) error { - if lvl.l == nil { - lvl.l = &atomic.Int32{} - } - - var l zapcore.Level - if err := l.UnmarshalText(text); err != nil { - return err - } - - lvl.SetLevel(l) - return nil -} - -// MarshalText marshals the AtomicLevel to a byte slice. It uses the same -// text representation as the static zapcore.Levels ("debug", "info", "warn", -// "error", "dpanic", "panic", and "fatal"). -func (lvl AtomicLevel) MarshalText() (text []byte, err error) { - return lvl.Level().MarshalText() -} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go deleted file mode 100644 index f116bd93..00000000 --- a/vendor/go.uber.org/zap/logger.go +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - - "go.uber.org/zap/zapcore" -) - -// A Logger provides fast, leveled, structured logging. All methods are safe -// for concurrent use. -// -// The Logger is designed for contexts in which every microsecond and every -// allocation matters, so its API intentionally favors performance and type -// safety over brevity. For most applications, the SugaredLogger strikes a -// better balance between performance and ergonomics. -type Logger struct { - core zapcore.Core - - development bool - addCaller bool - onFatal zapcore.CheckWriteAction // default is WriteThenFatal - - name string - errorOutput zapcore.WriteSyncer - - addStack zapcore.LevelEnabler - - callerSkip int - - clock zapcore.Clock -} - -// New constructs a new Logger from the provided zapcore.Core and Options. If -// the passed zapcore.Core is nil, it falls back to using a no-op -// implementation. -// -// This is the most flexible way to construct a Logger, but also the most -// verbose. For typical use cases, the highly-opinionated presets -// (NewProduction, NewDevelopment, and NewExample) or the Config struct are -// more convenient. -// -// For sample code, see the package-level AdvancedConfiguration example. -func New(core zapcore.Core, options ...Option) *Logger { - if core == nil { - return NewNop() - } - log := &Logger{ - core: core, - errorOutput: zapcore.Lock(os.Stderr), - addStack: zapcore.FatalLevel + 1, - clock: zapcore.DefaultClock, - } - return log.WithOptions(options...) -} - -// NewNop returns a no-op Logger. It never writes out logs or internal errors, -// and it never runs user-defined hooks. -// -// Using WithOptions to replace the Core or error output of a no-op Logger can -// re-enable logging. -func NewNop() *Logger { - return &Logger{ - core: zapcore.NewNopCore(), - errorOutput: zapcore.AddSync(ioutil.Discard), - addStack: zapcore.FatalLevel + 1, - clock: zapcore.DefaultClock, - } -} - -// NewProduction builds a sensible production Logger that writes InfoLevel and -// above logs to standard error as JSON. -// -// It's a shortcut for NewProductionConfig().Build(...Option). -func NewProduction(options ...Option) (*Logger, error) { - return NewProductionConfig().Build(options...) -} - -// NewDevelopment builds a development Logger that writes DebugLevel and above -// logs to standard error in a human-friendly format. -// -// It's a shortcut for NewDevelopmentConfig().Build(...Option). -func NewDevelopment(options ...Option) (*Logger, error) { - return NewDevelopmentConfig().Build(options...) -} - -// NewExample builds a Logger that's designed for use in zap's testable -// examples. It writes DebugLevel and above logs to standard out as JSON, but -// omits the timestamp and calling function to keep example output -// short and deterministic. -func NewExample(options ...Option) *Logger { - encoderCfg := zapcore.EncoderConfig{ - MessageKey: "msg", - LevelKey: "level", - NameKey: "logger", - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, - EncodeDuration: zapcore.StringDurationEncoder, - } - core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) - return New(core).WithOptions(options...) -} - -// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, -// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a -// single application to use both Loggers and SugaredLoggers, converting -// between them on the boundaries of performance-sensitive code. -func (log *Logger) Sugar() *SugaredLogger { - core := log.clone() - core.callerSkip += 2 - return &SugaredLogger{core} -} - -// Named adds a new path segment to the logger's name. Segments are joined by -// periods. By default, Loggers are unnamed. -func (log *Logger) Named(s string) *Logger { - if s == "" { - return log - } - l := log.clone() - if log.name == "" { - l.name = s - } else { - l.name = strings.Join([]string{l.name, s}, ".") - } - return l -} - -// WithOptions clones the current Logger, applies the supplied Options, and -// returns the resulting Logger. It's safe to use concurrently. -func (log *Logger) WithOptions(opts ...Option) *Logger { - c := log.clone() - for _, opt := range opts { - opt.apply(c) - } - return c -} - -// With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. -func (log *Logger) With(fields ...Field) *Logger { - if len(fields) == 0 { - return log - } - l := log.clone() - l.core = l.core.With(fields) - return l -} - -// Check returns a CheckedEntry if logging a message at the specified level -// is enabled. It's a completely optional optimization; in high-performance -// applications, Check can help avoid allocating a slice to hold fields. -func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - return log.check(lvl, msg) -} - -// Debug logs a message at DebugLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Debug(msg string, fields ...Field) { - if ce := log.check(DebugLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Info logs a message at InfoLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Info(msg string, fields ...Field) { - if ce := log.check(InfoLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Warn logs a message at WarnLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Warn(msg string, fields ...Field) { - if ce := log.check(WarnLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Error logs a message at ErrorLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -func (log *Logger) Error(msg string, fields ...Field) { - if ce := log.check(ErrorLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// DPanic logs a message at DPanicLevel. The message includes any fields -// passed at the log site, as well as any fields accumulated on the logger. -// -// If the logger is in development mode, it then panics (DPanic means -// "development panic"). This is useful for catching errors that are -// recoverable, but shouldn't ever happen. -func (log *Logger) DPanic(msg string, fields ...Field) { - if ce := log.check(DPanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Panic logs a message at PanicLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then panics, even if logging at PanicLevel is disabled. -func (log *Logger) Panic(msg string, fields ...Field) { - if ce := log.check(PanicLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Fatal logs a message at FatalLevel. The message includes any fields passed -// at the log site, as well as any fields accumulated on the logger. -// -// The logger then calls os.Exit(1), even if logging at FatalLevel is -// disabled. -func (log *Logger) Fatal(msg string, fields ...Field) { - if ce := log.check(FatalLevel, msg); ce != nil { - ce.Write(fields...) - } -} - -// Sync calls the underlying Core's Sync method, flushing any buffered log -// entries. Applications should take care to call Sync before exiting. -func (log *Logger) Sync() error { - return log.core.Sync() -} - -// Core returns the Logger's underlying zapcore.Core. -func (log *Logger) Core() zapcore.Core { - return log.core -} - -func (log *Logger) clone() *Logger { - copy := *log - return © -} - -func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { - // check must always be called directly by a method in the Logger interface - // (e.g., Check, Info, Fatal). - const callerSkipOffset = 2 - - // Check the level first to reduce the cost of disabled log calls. - // Since Panic and higher may exit, we skip the optimization for those levels. - if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { - return nil - } - - // Create basic checked entry thru the core; this will be non-nil if the - // log message will actually be written somewhere. - ent := zapcore.Entry{ - LoggerName: log.name, - Time: log.clock.Now(), - Level: lvl, - Message: msg, - } - ce := log.core.Check(ent, nil) - willWrite := ce != nil - - // Set up any required terminal behavior. - switch ent.Level { - case zapcore.PanicLevel: - ce = ce.Should(ent, zapcore.WriteThenPanic) - case zapcore.FatalLevel: - onFatal := log.onFatal - // Noop is the default value for CheckWriteAction, and it leads to - // continued execution after a Fatal which is unexpected. - if onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.Should(ent, onFatal) - case zapcore.DPanicLevel: - if log.development { - ce = ce.Should(ent, zapcore.WriteThenPanic) - } - } - - // Only do further annotation if we're going to write this message; checked - // entries that exist only for terminal behavior don't benefit from - // annotation. - if !willWrite { - return ce - } - - // Thread the error output through to the CheckedEntry. - ce.ErrorOutput = log.errorOutput - if log.addCaller { - frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset) - if !defined { - fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) - log.errorOutput.Sync() - } - - ce.Entry.Caller = zapcore.EntryCaller{ - Defined: defined, - PC: frame.PC, - File: frame.File, - Line: frame.Line, - Function: frame.Function, - } - } - if log.addStack.Enabled(ce.Entry.Level) { - ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String - } - - return ce -} - -// getCallerFrame gets caller frame. The argument skip is the number of stack -// frames to ascend, with 0 identifying the caller of getCallerFrame. The -// boolean ok is false if it was not possible to recover the information. -// -// Note: This implementation is similar to runtime.Caller, but it returns the whole frame. -func getCallerFrame(skip int) (frame runtime.Frame, ok bool) { - const skipOffset = 2 // skip getCallerFrame and Callers - - pc := make([]uintptr, 1) - numFrames := runtime.Callers(skip+skipOffset, pc) - if numFrames < 1 { - return - } - - frame, _ = runtime.CallersFrames(pc).Next() - return frame, frame.PC != 0 -} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go deleted file mode 100644 index e9e66161..00000000 --- a/vendor/go.uber.org/zap/options.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" -) - -// An Option configures a Logger. -type Option interface { - apply(*Logger) -} - -// optionFunc wraps a func so it satisfies the Option interface. -type optionFunc func(*Logger) - -func (f optionFunc) apply(log *Logger) { - f(log) -} - -// WrapCore wraps or replaces the Logger's underlying zapcore.Core. -func WrapCore(f func(zapcore.Core) zapcore.Core) Option { - return optionFunc(func(log *Logger) { - log.core = f(log.core) - }) -} - -// Hooks registers functions which will be called each time the Logger writes -// out an Entry. Repeated use of Hooks is additive. -// -// Hooks are useful for simple side effects, like capturing metrics for the -// number of emitted logs. More complex side effects, including anything that -// requires access to the Entry's structured fields, should be implemented as -// a zapcore.Core instead. See zapcore.RegisterHooks for details. -func Hooks(hooks ...func(zapcore.Entry) error) Option { - return optionFunc(func(log *Logger) { - log.core = zapcore.RegisterHooks(log.core, hooks...) - }) -} - -// Fields adds fields to the Logger. -func Fields(fs ...Field) Option { - return optionFunc(func(log *Logger) { - log.core = log.core.With(fs) - }) -} - -// ErrorOutput sets the destination for errors generated by the Logger. Note -// that this option only affects internal errors; for sample code that sends -// error-level logs to a different location from info- and debug-level logs, -// see the package-level AdvancedConfiguration example. -// -// The supplied WriteSyncer must be safe for concurrent use. The Open and -// zapcore.Lock functions are the simplest ways to protect files with a mutex. -func ErrorOutput(w zapcore.WriteSyncer) Option { - return optionFunc(func(log *Logger) { - log.errorOutput = w - }) -} - -// Development puts the logger in development mode, which makes DPanic-level -// logs panic instead of simply logging an error. -func Development() Option { - return optionFunc(func(log *Logger) { - log.development = true - }) -} - -// AddCaller configures the Logger to annotate each message with the filename, -// line number, and function name of zap's caller. See also WithCaller. -func AddCaller() Option { - return WithCaller(true) -} - -// WithCaller configures the Logger to annotate each message with the filename, -// line number, and function name of zap's caller, or not, depending on the -// value of enabled. This is a generalized form of AddCaller. -func WithCaller(enabled bool) Option { - return optionFunc(func(log *Logger) { - log.addCaller = enabled - }) -} - -// AddCallerSkip increases the number of callers skipped by caller annotation -// (as enabled by the AddCaller option). When building wrappers around the -// Logger and SugaredLogger, supplying this Option prevents zap from always -// reporting the wrapper code as the caller. -func AddCallerSkip(skip int) Option { - return optionFunc(func(log *Logger) { - log.callerSkip += skip - }) -} - -// AddStacktrace configures the Logger to record a stack trace for all messages at -// or above a given level. -func AddStacktrace(lvl zapcore.LevelEnabler) Option { - return optionFunc(func(log *Logger) { - log.addStack = lvl - }) -} - -// IncreaseLevel increase the level of the logger. It has no effect if -// the passed in level tries to decrease the level of the logger. -func IncreaseLevel(lvl zapcore.LevelEnabler) Option { - return optionFunc(func(log *Logger) { - core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) - if err != nil { - fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) - } else { - log.core = core - } - }) -} - -// OnFatal sets the action to take on fatal logs. -func OnFatal(action zapcore.CheckWriteAction) Option { - return optionFunc(func(log *Logger) { - log.onFatal = action - }) -} - -// WithClock specifies the clock used by the logger to determine the current -// time for logged entries. Defaults to the system clock with time.Now. -func WithClock(clock zapcore.Clock) Option { - return optionFunc(func(log *Logger) { - log.clock = clock - }) -} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go deleted file mode 100644 index df46fa87..00000000 --- a/vendor/go.uber.org/zap/sink.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "errors" - "fmt" - "io" - "net/url" - "os" - "strings" - "sync" - - "go.uber.org/zap/zapcore" -) - -const schemeFile = "file" - -var ( - _sinkMutex sync.RWMutex - _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme -) - -func init() { - resetSinkRegistry() -} - -func resetSinkRegistry() { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - _sinkFactories = map[string]func(*url.URL) (Sink, error){ - schemeFile: newFileSink, - } -} - -// Sink defines the interface to write to and close logger destinations. -type Sink interface { - zapcore.WriteSyncer - io.Closer -} - -type nopCloserSink struct{ zapcore.WriteSyncer } - -func (nopCloserSink) Close() error { return nil } - -type errSinkNotFound struct { - scheme string -} - -func (e *errSinkNotFound) Error() string { - return fmt.Sprintf("no sink found for scheme %q", e.scheme) -} - -// RegisterSink registers a user-supplied factory for all sinks with a -// particular scheme. -// -// All schemes must be ASCII, valid under section 3.1 of RFC 3986 -// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already -// have a factory registered. Zap automatically registers a factory for the -// "file" scheme. -func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { - _sinkMutex.Lock() - defer _sinkMutex.Unlock() - - if scheme == "" { - return errors.New("can't register a sink factory for empty string") - } - normalized, err := normalizeScheme(scheme) - if err != nil { - return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) - } - if _, ok := _sinkFactories[normalized]; ok { - return fmt.Errorf("sink factory already registered for scheme %q", normalized) - } - _sinkFactories[normalized] = factory - return nil -} - -func newSink(rawURL string) (Sink, error) { - u, err := url.Parse(rawURL) - if err != nil { - return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) - } - if u.Scheme == "" { - u.Scheme = schemeFile - } - - _sinkMutex.RLock() - factory, ok := _sinkFactories[u.Scheme] - _sinkMutex.RUnlock() - if !ok { - return nil, &errSinkNotFound{u.Scheme} - } - return factory(u) -} - -func newFileSink(u *url.URL) (Sink, error) { - if u.User != nil { - return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) - } - if u.Fragment != "" { - return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) - } - if u.RawQuery != "" { - return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) - } - // Error messages are better if we check hostname and port separately. - if u.Port() != "" { - return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) - } - if hn := u.Hostname(); hn != "" && hn != "localhost" { - return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) - } - switch u.Path { - case "stdout": - return nopCloserSink{os.Stdout}, nil - case "stderr": - return nopCloserSink{os.Stderr}, nil - } - return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) -} - -func normalizeScheme(s string) (string, error) { - // https://tools.ietf.org/html/rfc3986#section-3.1 - s = strings.ToLower(s) - if first := s[0]; 'a' > first || 'z' < first { - return "", errors.New("must start with a letter") - } - for i := 1; i < len(s); i++ { // iterate over bytes, not runes - c := s[i] - switch { - case 'a' <= c && c <= 'z': - continue - case '0' <= c && c <= '9': - continue - case c == '.' || c == '+' || c == '-': - continue - } - return "", fmt.Errorf("may not contain %q", c) - } - return s, nil -} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go deleted file mode 100644 index 0cf8c1dd..00000000 --- a/vendor/go.uber.org/zap/stacktrace.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "runtime" - "sync" - - "go.uber.org/zap/internal/bufferpool" -) - -var ( - _stacktracePool = sync.Pool{ - New: func() interface{} { - return newProgramCounters(64) - }, - } -) - -func takeStacktrace(skip int) string { - buffer := bufferpool.Get() - defer buffer.Free() - programCounters := _stacktracePool.Get().(*programCounters) - defer _stacktracePool.Put(programCounters) - - var numFrames int - for { - // Skip the call to runtime.Callers and takeStacktrace so that the - // program counters start at the caller of takeStacktrace. - numFrames = runtime.Callers(skip+2, programCounters.pcs) - if numFrames < len(programCounters.pcs) { - break - } - // Don't put the too-short counter slice back into the pool; this lets - // the pool adjust if we consistently take deep stacktraces. - programCounters = newProgramCounters(len(programCounters.pcs) * 2) - } - - i := 0 - frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) - - // Note: On the last iteration, frames.Next() returns false, with a valid - // frame, but we ignore this frame. The last frame is a a runtime frame which - // adds noise, since it's only either runtime.main or runtime.goexit. - for frame, more := frames.Next(); more; frame, more = frames.Next() { - if i != 0 { - buffer.AppendByte('\n') - } - i++ - buffer.AppendString(frame.Function) - buffer.AppendByte('\n') - buffer.AppendByte('\t') - buffer.AppendString(frame.File) - buffer.AppendByte(':') - buffer.AppendInt(int64(frame.Line)) - } - - return buffer.String() -} - -type programCounters struct { - pcs []uintptr -} - -func newProgramCounters(size int) *programCounters { - return &programCounters{make([]uintptr, size)} -} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go deleted file mode 100644 index 0b965198..00000000 --- a/vendor/go.uber.org/zap/sugar.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -const ( - _oddNumberErrMsg = "Ignored key without a value." - _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." -) - -// A SugaredLogger wraps the base Logger functionality in a slower, but less -// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar -// method. -// -// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. -// For each log level, it exposes three methods: one for loosely-typed -// structured logging, one for println-style formatting, and one for -// printf-style formatting. For example, SugaredLoggers can produce InfoLevel -// output with Infow ("info with" structured context), Info, or Infof. -type SugaredLogger struct { - base *Logger -} - -// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring -// is quite inexpensive, so it's reasonable for a single application to use -// both Loggers and SugaredLoggers, converting between them on the boundaries -// of performance-sensitive code. -func (s *SugaredLogger) Desugar() *Logger { - base := s.base.clone() - base.callerSkip -= 2 - return base -} - -// Named adds a sub-scope to the logger's name. See Logger.Named for details. -func (s *SugaredLogger) Named(name string) *SugaredLogger { - return &SugaredLogger{base: s.base.Named(name)} -} - -// With adds a variadic number of fields to the logging context. It accepts a -// mix of strongly-typed Field objects and loosely-typed key-value pairs. When -// processing pairs, the first element of the pair is used as the field key -// and the second as the field value. -// -// For example, -// sugaredLogger.With( -// "hello", "world", -// "failure", errors.New("oh no"), -// Stack(), -// "count", 42, -// "user", User{Name: "alice"}, -// ) -// is the equivalent of -// unsugared.With( -// String("hello", "world"), -// String("failure", "oh no"), -// Stack(), -// Int("count", 42), -// Object("user", User{Name: "alice"}), -// ) -// -// Note that the keys in key-value pairs should be strings. In development, -// passing a non-string key panics. In production, the logger is more -// forgiving: a separate error is logged, but the key-value pair is skipped -// and execution continues. Passing an orphaned key triggers similar behavior: -// panics in development and errors in production. -func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { - return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} -} - -// Debug uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Debug(args ...interface{}) { - s.log(DebugLevel, "", args, nil) -} - -// Info uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Info(args ...interface{}) { - s.log(InfoLevel, "", args, nil) -} - -// Warn uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Warn(args ...interface{}) { - s.log(WarnLevel, "", args, nil) -} - -// Error uses fmt.Sprint to construct and log a message. -func (s *SugaredLogger) Error(args ...interface{}) { - s.log(ErrorLevel, "", args, nil) -} - -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanic(args ...interface{}) { - s.log(DPanicLevel, "", args, nil) -} - -// Panic uses fmt.Sprint to construct and log a message, then panics. -func (s *SugaredLogger) Panic(args ...interface{}) { - s.log(PanicLevel, "", args, nil) -} - -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. -func (s *SugaredLogger) Fatal(args ...interface{}) { - s.log(FatalLevel, "", args, nil) -} - -// Debugf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Debugf(template string, args ...interface{}) { - s.log(DebugLevel, template, args, nil) -} - -// Infof uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Infof(template string, args ...interface{}) { - s.log(InfoLevel, template, args, nil) -} - -// Warnf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Warnf(template string, args ...interface{}) { - s.log(WarnLevel, template, args, nil) -} - -// Errorf uses fmt.Sprintf to log a templated message. -func (s *SugaredLogger) Errorf(template string, args ...interface{}) { - s.log(ErrorLevel, template, args, nil) -} - -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) -func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { - s.log(DPanicLevel, template, args, nil) -} - -// Panicf uses fmt.Sprintf to log a templated message, then panics. -func (s *SugaredLogger) Panicf(template string, args ...interface{}) { - s.log(PanicLevel, template, args, nil) -} - -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. -func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { - s.log(FatalLevel, template, args, nil) -} - -// Debugw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -// -// When debug-level logging is disabled, this is much faster than -// s.With(keysAndValues).Debug(msg) -func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { - s.log(DebugLevel, msg, nil, keysAndValues) -} - -// Infow logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { - s.log(InfoLevel, msg, nil, keysAndValues) -} - -// Warnw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { - s.log(WarnLevel, msg, nil, keysAndValues) -} - -// Errorw logs a message with some additional context. The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { - s.log(ErrorLevel, msg, nil, keysAndValues) -} - -// DPanicw logs a message with some additional context. In development, the -// logger then panics. (See DPanicLevel for details.) The variadic key-value -// pairs are treated as they are in With. -func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { - s.log(DPanicLevel, msg, nil, keysAndValues) -} - -// Panicw logs a message with some additional context, then panics. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { - s.log(PanicLevel, msg, nil, keysAndValues) -} - -// Fatalw logs a message with some additional context, then calls os.Exit. The -// variadic key-value pairs are treated as they are in With. -func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { - s.log(FatalLevel, msg, nil, keysAndValues) -} - -// Sync flushes any buffered log entries. -func (s *SugaredLogger) Sync() error { - return s.base.Sync() -} - -func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { - // If logging at this level is completely disabled, skip the overhead of - // string formatting. - if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { - return - } - - msg := getMessage(template, fmtArgs) - if ce := s.base.Check(lvl, msg); ce != nil { - ce.Write(s.sweetenFields(context)...) - } -} - -// getMessage format with Sprint, Sprintf, or neither. -func getMessage(template string, fmtArgs []interface{}) string { - if len(fmtArgs) == 0 { - return template - } - - if template != "" { - return fmt.Sprintf(template, fmtArgs...) - } - - if len(fmtArgs) == 1 { - if str, ok := fmtArgs[0].(string); ok { - return str - } - } - return fmt.Sprint(fmtArgs...) -} - -func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { - if len(args) == 0 { - return nil - } - - // Allocate enough space for the worst case; if users pass only structured - // fields, we shouldn't penalize them with extra allocations. - fields := make([]Field, 0, len(args)) - var invalid invalidPairs - - for i := 0; i < len(args); { - // This is a strongly-typed field. Consume it and move on. - if f, ok := args[i].(Field); ok { - fields = append(fields, f) - i++ - continue - } - - // Make sure this element isn't a dangling key. - if i == len(args)-1 { - s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) - break - } - - // Consume this value and the next, treating them as a key-value pair. If the - // key isn't a string, add this pair to the slice of invalid pairs. - key, val := args[i], args[i+1] - if keyStr, ok := key.(string); !ok { - // Subsequent errors are likely, so allocate once up front. - if cap(invalid) == 0 { - invalid = make(invalidPairs, 0, len(args)/2) - } - invalid = append(invalid, invalidPair{i, key, val}) - } else { - fields = append(fields, Any(keyStr, val)) - } - i += 2 - } - - // If we encountered any invalid key-value pairs, log an error. - if len(invalid) > 0 { - s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) - } - return fields -} - -type invalidPair struct { - position int - key, value interface{} -} - -func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { - enc.AddInt64("position", int64(p.position)) - Any("key", p.key).AddTo(enc) - Any("value", p.value).AddTo(enc) - return nil -} - -type invalidPairs []invalidPair - -func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { - var err error - for i := range ps { - err = multierr.Append(err, enc.AppendObject(ps[i])) - } - return err -} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go deleted file mode 100644 index c5a1f162..00000000 --- a/vendor/go.uber.org/zap/time.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import "time" - -func timeToMillis(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond) -} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go deleted file mode 100644 index 86a709ab..00000000 --- a/vendor/go.uber.org/zap/writer.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zap - -import ( - "fmt" - "io" - "io/ioutil" - - "go.uber.org/zap/zapcore" - - "go.uber.org/multierr" -) - -// Open is a high-level wrapper that takes a variadic number of URLs, opens or -// creates each of the specified resources, and combines them into a locked -// WriteSyncer. It also returns any error encountered and a function to close -// any opened files. -// -// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a -// scheme and URLs with the "file" scheme. Third-party code may register -// factories for other schemes using RegisterSink. -// -// URLs with the "file" scheme must use absolute paths on the local -// filesystem. No user, password, port, fragments, or query parameters are -// allowed, and the hostname must be empty or "localhost". -// -// Since it's common to write logs to the local filesystem, URLs without a -// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without -// a scheme, the special paths "stdout" and "stderr" are interpreted as -// os.Stdout and os.Stderr. When specified without a scheme, relative file -// paths also work. -func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) - if err != nil { - return nil, nil, err - } - - writer := CombineWriteSyncers(writers...) - return writer, close, nil -} - -func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { - writers := make([]zapcore.WriteSyncer, 0, len(paths)) - closers := make([]io.Closer, 0, len(paths)) - close := func() { - for _, c := range closers { - c.Close() - } - } - - var openErr error - for _, path := range paths { - sink, err := newSink(path) - if err != nil { - openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) - continue - } - writers = append(writers, sink) - closers = append(closers, sink) - } - if openErr != nil { - close() - return writers, nil, openErr - } - - return writers, close, nil -} - -// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a -// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op -// WriteSyncer. -// -// It's provided purely as a convenience; the result is no different from -// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. -func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { - if len(writers) == 0 { - return zapcore.AddSync(ioutil.Discard) - } - return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) -} diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go deleted file mode 100644 index ef2f7d96..00000000 --- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bufio" - "sync" - "time" - - "go.uber.org/multierr" -) - -const ( - // _defaultBufferSize specifies the default size used by Buffer. - _defaultBufferSize = 256 * 1024 // 256 kB - - // _defaultFlushInterval specifies the default flush interval for - // Buffer. - _defaultFlushInterval = 30 * time.Second -) - -// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before -// flushing them to a wrapped WriteSyncer after reaching some limit, or at some -// fixed interval--whichever comes first. -// -// BufferedWriteSyncer is safe for concurrent use. You don't need to use -// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. -type BufferedWriteSyncer struct { - // WS is the WriteSyncer around which BufferedWriteSyncer will buffer - // writes. - // - // This field is required. - WS WriteSyncer - - // Size specifies the maximum amount of data the writer will buffered - // before flushing. - // - // Defaults to 256 kB if unspecified. - Size int - - // FlushInterval specifies how often the writer should flush data if - // there have been no writes. - // - // Defaults to 30 seconds if unspecified. - FlushInterval time.Duration - - // Clock, if specified, provides control of the source of time for the - // writer. - // - // Defaults to the system clock. - Clock Clock - - // unexported fields for state - mu sync.Mutex - initialized bool // whether initialize() has run - stopped bool // whether Stop() has run - writer *bufio.Writer - ticker *time.Ticker - stop chan struct{} // closed when flushLoop should stop - done chan struct{} // closed when flushLoop has stopped -} - -func (s *BufferedWriteSyncer) initialize() { - size := s.Size - if size == 0 { - size = _defaultBufferSize - } - - flushInterval := s.FlushInterval - if flushInterval == 0 { - flushInterval = _defaultFlushInterval - } - - if s.Clock == nil { - s.Clock = DefaultClock - } - - s.ticker = s.Clock.NewTicker(flushInterval) - s.writer = bufio.NewWriterSize(s.WS, size) - s.stop = make(chan struct{}) - s.done = make(chan struct{}) - s.initialized = true - go s.flushLoop() -} - -// Write writes log data into buffer syncer directly, multiple Write calls will be batched, -// and log data will be flushed to disk when the buffer is full or periodically. -func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if !s.initialized { - s.initialize() - } - - // To avoid partial writes from being flushed, we manually flush the existing buffer if: - // * The current write doesn't fit into the buffer fully, and - // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) - if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { - if err := s.writer.Flush(); err != nil { - return 0, err - } - } - - return s.writer.Write(bs) -} - -// Sync flushes buffered log data into disk directly. -func (s *BufferedWriteSyncer) Sync() error { - s.mu.Lock() - defer s.mu.Unlock() - - var err error - if s.initialized { - err = s.writer.Flush() - } - - return multierr.Append(err, s.WS.Sync()) -} - -// flushLoop flushes the buffer at the configured interval until Stop is -// called. -func (s *BufferedWriteSyncer) flushLoop() { - defer close(s.done) - - for { - select { - case <-s.ticker.C: - // we just simply ignore error here - // because the underlying bufio writer stores any errors - // and we return any error from Sync() as part of the close - _ = s.Sync() - case <-s.stop: - return - } - } -} - -// Stop closes the buffer, cleans up background goroutines, and flushes -// remaining unwritten data. -func (s *BufferedWriteSyncer) Stop() (err error) { - var stopped bool - - // Critical section. - func() { - s.mu.Lock() - defer s.mu.Unlock() - - if !s.initialized { - return - } - - stopped = s.stopped - if stopped { - return - } - s.stopped = true - - s.ticker.Stop() - close(s.stop) // tell flushLoop to stop - <-s.done // and wait until it has - }() - - // Don't call Sync on consecutive Stops. - if !stopped { - err = s.Sync() - } - - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go deleted file mode 100644 index d2ea95b3..00000000 --- a/vendor/go.uber.org/zap/zapcore/clock.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" -) - -// DefaultClock is the default clock used by Zap in operations that require -// time. This clock uses the system clock for all operations. -var DefaultClock = systemClock{} - -// Clock is a source of time for logged entries. -type Clock interface { - // Now returns the current local time. - Now() time.Time - - // NewTicker returns *time.Ticker that holds a channel - // that delivers "ticks" of a clock. - NewTicker(time.Duration) *time.Ticker -} - -// systemClock implements default Clock that uses system time. -type systemClock struct{} - -func (systemClock) Now() time.Time { - return time.Now() -} - -func (systemClock) NewTicker(duration time.Duration) *time.Ticker { - return time.NewTicker(duration) -} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go deleted file mode 100644 index 2307af40..00000000 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "sync" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} - -func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) -} - -func putSliceEncoder(e *sliceArrayEncoder) { - e.elems = e.elems[:0] - _sliceEncoderPool.Put(e) -} - -type consoleEncoder struct { - *jsonEncoder -} - -// NewConsoleEncoder creates an encoder whose output is designed for human - -// rather than machine - consumption. It serializes the core log entry data -// (message, level, timestamp, etc.) in a plain-text format and leaves the -// structured context as JSON. -// -// Note that although the console encoder doesn't use the keys specified in the -// encoder configuration, it will omit any element whose key is set to the empty -// string. -func NewConsoleEncoder(cfg EncoderConfig) Encoder { - if cfg.ConsoleSeparator == "" { - // Use a default delimiter of '\t' for backwards compatibility - cfg.ConsoleSeparator = "\t" - } - return consoleEncoder{newJSONEncoder(cfg, true)} -} - -func (c consoleEncoder) Clone() Encoder { - return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} -} - -func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - line := bufferpool.Get() - - // We don't want the entry's metadata to be quoted and escaped (if it's - // encoded as strings), which means that we can't use the JSON encoder. The - // simplest option is to use the memory encoder and fmt.Fprint. - // - // If this ever becomes a performance bottleneck, we can implement - // ArrayEncoder for our plain-text format. - arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { - c.EncodeTime(ent.Time, arr) - } - if c.LevelKey != "" && c.EncodeLevel != nil { - c.EncodeLevel(ent.Level, arr) - } - if ent.LoggerName != "" && c.NameKey != "" { - nameEncoder := c.EncodeName - - if nameEncoder == nil { - // Fall back to FullNameEncoder for backward compatibility. - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, arr) - } - if ent.Caller.Defined { - if c.CallerKey != "" && c.EncodeCaller != nil { - c.EncodeCaller(ent.Caller, arr) - } - if c.FunctionKey != "" { - arr.AppendString(ent.Caller.Function) - } - } - for i := range arr.elems { - if i > 0 { - line.AppendString(c.ConsoleSeparator) - } - fmt.Fprint(line, arr.elems[i]) - } - putSliceEncoder(arr) - - // Add the message itself. - if c.MessageKey != "" { - c.addSeparatorIfNecessary(line) - line.AppendString(ent.Message) - } - - // Add any structured context. - c.writeContext(line, fields) - - // If there's no stacktrace key, honor that; this allows users to force - // single-line output. - if ent.Stack != "" && c.StacktraceKey != "" { - line.AppendByte('\n') - line.AppendString(ent.Stack) - } - - if c.LineEnding != "" { - line.AppendString(c.LineEnding) - } else { - line.AppendString(DefaultLineEnding) - } - return line, nil -} - -func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { - context := c.jsonEncoder.Clone().(*jsonEncoder) - defer func() { - // putJSONEncoder assumes the buffer is still used, but we write out the buffer so - // we can free it. - context.buf.Free() - putJSONEncoder(context) - }() - - addFields(context, extra) - context.closeOpenNamespaces() - if context.buf.Len() == 0 { - return - } - - c.addSeparatorIfNecessary(line) - line.AppendByte('{') - line.Write(context.buf.Bytes()) - line.AppendByte('}') -} - -func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { - if line.Len() > 0 { - line.AppendString(c.ConsoleSeparator) - } -} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go deleted file mode 100644 index a1ef8b03..00000000 --- a/vendor/go.uber.org/zap/zapcore/core.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// Core is a minimal, fast logger interface. It's designed for library authors -// to wrap in a more user-friendly API. -type Core interface { - LevelEnabler - - // With adds structured context to the Core. - With([]Field) Core - // Check determines whether the supplied Entry should be logged (using the - // embedded LevelEnabler and possibly some extra logic). If the entry - // should be logged, the Core adds itself to the CheckedEntry and returns - // the result. - // - // Callers must use Check before calling Write. - Check(Entry, *CheckedEntry) *CheckedEntry - // Write serializes the Entry and any Fields supplied at the log site and - // writes them to their destination. - // - // If called, Write should always log the Entry and Fields; it should not - // replicate the logic of Check. - Write(Entry, []Field) error - // Sync flushes buffered logs (if any). - Sync() error -} - -type nopCore struct{} - -// NewNopCore returns a no-op Core. -func NewNopCore() Core { return nopCore{} } -func (nopCore) Enabled(Level) bool { return false } -func (n nopCore) With([]Field) Core { return n } -func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } -func (nopCore) Write(Entry, []Field) error { return nil } -func (nopCore) Sync() error { return nil } - -// NewCore creates a Core that writes logs to a WriteSyncer. -func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { - return &ioCore{ - LevelEnabler: enab, - enc: enc, - out: ws, - } -} - -type ioCore struct { - LevelEnabler - enc Encoder - out WriteSyncer -} - -func (c *ioCore) With(fields []Field) Core { - clone := c.clone() - addFields(clone.enc, fields) - return clone -} - -func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if c.Enabled(ent.Level) { - return ce.AddCore(ent, c) - } - return ce -} - -func (c *ioCore) Write(ent Entry, fields []Field) error { - buf, err := c.enc.EncodeEntry(ent, fields) - if err != nil { - return err - } - _, err = c.out.Write(buf.Bytes()) - buf.Free() - if err != nil { - return err - } - if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() - } - return nil -} - -func (c *ioCore) Sync() error { - return c.out.Sync() -} - -func (c *ioCore) clone() *ioCore { - return &ioCore{ - LevelEnabler: c.LevelEnabler, - enc: c.enc.Clone(), - out: c.out, - } -} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go deleted file mode 100644 index 31000e91..00000000 --- a/vendor/go.uber.org/zap/zapcore/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package zapcore defines and implements the low-level interfaces upon which -// zap is built. By providing alternate implementations of these interfaces, -// external packages can extend zap's capabilities. -package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go deleted file mode 100644 index 6601ca16..00000000 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "encoding/json" - "time" - - "go.uber.org/zap/buffer" -) - -// DefaultLineEnding defines the default line ending when writing logs. -// Alternate line endings specified in EncoderConfig can override this -// behavior. -const DefaultLineEnding = "\n" - -// OmitKey defines the key to use when callers want to remove a key from log output. -const OmitKey = "" - -// A LevelEncoder serializes a Level to a primitive type. -type LevelEncoder func(Level, PrimitiveArrayEncoder) - -// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, -// InfoLevel is serialized to "info". -func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.String()) -} - -// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. -// For example, InfoLevel is serialized to "info" and colored blue. -func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToLowercaseColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.String()) - } - enc.AppendString(s) -} - -// CapitalLevelEncoder serializes a Level to an all-caps string. For example, -// InfoLevel is serialized to "INFO". -func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - enc.AppendString(l.CapitalString()) -} - -// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. -// For example, InfoLevel is serialized to "INFO" and colored blue. -func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { - s, ok := _levelToCapitalColorString[l] - if !ok { - s = _unknownLevelColor.Add(l.CapitalString()) - } - enc.AppendString(s) -} - -// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to -// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, -// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else -// is unmarshaled to LowercaseLevelEncoder. -func (e *LevelEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "capital": - *e = CapitalLevelEncoder - case "capitalColor": - *e = CapitalColorLevelEncoder - case "color": - *e = LowercaseColorLevelEncoder - default: - *e = LowercaseLevelEncoder - } - return nil -} - -// A TimeEncoder serializes a time.Time to a primitive type. -type TimeEncoder func(time.Time, PrimitiveArrayEncoder) - -// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds -// since the Unix epoch. -func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - sec := float64(nanos) / float64(time.Second) - enc.AppendFloat64(sec) -} - -// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of -// milliseconds since the Unix epoch. -func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - nanos := t.UnixNano() - millis := float64(nanos) / float64(time.Millisecond) - enc.AppendFloat64(millis) -} - -// EpochNanosTimeEncoder serializes a time.Time to an integer number of -// nanoseconds since the Unix epoch. -func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - enc.AppendInt64(t.UnixNano()) -} - -func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { - type appendTimeEncoder interface { - AppendTimeLayout(time.Time, string) - } - - if enc, ok := enc.(appendTimeEncoder); ok { - enc.AppendTimeLayout(t, layout) - return - } - - enc.AppendString(t.Format(layout)) -} - -// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string -// with millisecond precision. -// -// If enc supports AppendTimeLayout(t time.Time,layout string), it's used -// instead of appending a pre-formatted string value. -func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) -} - -// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. -// -// If enc supports AppendTimeLayout(t time.Time,layout string), it's used -// instead of appending a pre-formatted string value. -func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, time.RFC3339, enc) -} - -// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string -// with nanosecond precision. -// -// If enc supports AppendTimeLayout(t time.Time,layout string), it's used -// instead of appending a pre-formatted string value. -func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, time.RFC3339Nano, enc) -} - -// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using -// given layout. -func TimeEncoderOfLayout(layout string) TimeEncoder { - return func(t time.Time, enc PrimitiveArrayEncoder) { - encodeTimeLayout(t, layout, enc) - } -} - -// UnmarshalText unmarshals text to a TimeEncoder. -// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. -// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. -// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. -// "millis" is unmarshaled to EpochMillisTimeEncoder. -// "nanos" is unmarshaled to EpochNanosEncoder. -// Anything else is unmarshaled to EpochTimeEncoder. -func (e *TimeEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "rfc3339nano", "RFC3339Nano": - *e = RFC3339NanoTimeEncoder - case "rfc3339", "RFC3339": - *e = RFC3339TimeEncoder - case "iso8601", "ISO8601": - *e = ISO8601TimeEncoder - case "millis": - *e = EpochMillisTimeEncoder - case "nanos": - *e = EpochNanosTimeEncoder - default: - *e = EpochTimeEncoder - } - return nil -} - -// UnmarshalYAML unmarshals YAML to a TimeEncoder. -// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. -// timeEncoder: -// layout: 06/01/02 03:04pm -// If value is string, it uses UnmarshalText. -// timeEncoder: iso8601 -func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { - var o struct { - Layout string `json:"layout" yaml:"layout"` - } - if err := unmarshal(&o); err == nil { - *e = TimeEncoderOfLayout(o.Layout) - return nil - } - - var s string - if err := unmarshal(&s); err != nil { - return err - } - return e.UnmarshalText([]byte(s)) -} - -// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. -func (e *TimeEncoder) UnmarshalJSON(data []byte) error { - return e.UnmarshalYAML(func(v interface{}) error { - return json.Unmarshal(data, v) - }) -} - -// A DurationEncoder serializes a time.Duration to a primitive type. -type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) - -// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. -func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendFloat64(float64(d) / float64(time.Second)) -} - -// NanosDurationEncoder serializes a time.Duration to an integer number of -// nanoseconds elapsed. -func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendInt64(int64(d)) -} - -// MillisDurationEncoder serializes a time.Duration to an integer number of -// milliseconds elapsed. -func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendInt64(d.Nanoseconds() / 1e6) -} - -// StringDurationEncoder serializes a time.Duration using its built-in String -// method. -func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { - enc.AppendString(d.String()) -} - -// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled -// to StringDurationEncoder, and anything else is unmarshaled to -// NanosDurationEncoder. -func (e *DurationEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "string": - *e = StringDurationEncoder - case "nanos": - *e = NanosDurationEncoder - case "ms": - *e = MillisDurationEncoder - default: - *e = SecondsDurationEncoder - } - return nil -} - -// A CallerEncoder serializes an EntryCaller to a primitive type. -type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) - -// FullCallerEncoder serializes a caller in /full/path/to/package/file:line -// format. -func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.String()) -} - -// ShortCallerEncoder serializes a caller in package/file:line format, trimming -// all but the final directory from the full path. -func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { - // TODO: consider using a byte-oriented API to save an allocation. - enc.AppendString(caller.TrimmedPath()) -} - -// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to -// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. -func (e *CallerEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullCallerEncoder - default: - *e = ShortCallerEncoder - } - return nil -} - -// A NameEncoder serializes a period-separated logger name to a primitive -// type. -type NameEncoder func(string, PrimitiveArrayEncoder) - -// FullNameEncoder serializes the logger name as-is. -func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { - enc.AppendString(loggerName) -} - -// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is -// unmarshaled to FullNameEncoder. -func (e *NameEncoder) UnmarshalText(text []byte) error { - switch string(text) { - case "full": - *e = FullNameEncoder - default: - *e = FullNameEncoder - } - return nil -} - -// An EncoderConfig allows users to configure the concrete encoders supplied by -// zapcore. -type EncoderConfig struct { - // Set the keys used for each log entry. If any key is empty, that portion - // of the entry is omitted. - MessageKey string `json:"messageKey" yaml:"messageKey"` - LevelKey string `json:"levelKey" yaml:"levelKey"` - TimeKey string `json:"timeKey" yaml:"timeKey"` - NameKey string `json:"nameKey" yaml:"nameKey"` - CallerKey string `json:"callerKey" yaml:"callerKey"` - FunctionKey string `json:"functionKey" yaml:"functionKey"` - StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` - LineEnding string `json:"lineEnding" yaml:"lineEnding"` - // Configure the primitive representations of common complex types. For - // example, some users may want all time.Times serialized as floating-point - // seconds since epoch, while others may prefer ISO8601 strings. - EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` - EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` - EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` - EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` - // Unlike the other primitive type encoders, EncodeName is optional. The - // zero value falls back to FullNameEncoder. - EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` - // Configures the field separator used by the console encoder. Defaults - // to tab. - ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` -} - -// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a -// map- or struct-like object to the logging context. Like maps, ObjectEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ObjectEncoder interface { - // Logging-specific marshalers. - AddArray(key string, marshaler ArrayMarshaler) error - AddObject(key string, marshaler ObjectMarshaler) error - - // Built-in types. - AddBinary(key string, value []byte) // for arbitrary bytes - AddByteString(key string, value []byte) // for UTF-8 encoded bytes - AddBool(key string, value bool) - AddComplex128(key string, value complex128) - AddComplex64(key string, value complex64) - AddDuration(key string, value time.Duration) - AddFloat64(key string, value float64) - AddFloat32(key string, value float32) - AddInt(key string, value int) - AddInt64(key string, value int64) - AddInt32(key string, value int32) - AddInt16(key string, value int16) - AddInt8(key string, value int8) - AddString(key, value string) - AddTime(key string, value time.Time) - AddUint(key string, value uint) - AddUint64(key string, value uint64) - AddUint32(key string, value uint32) - AddUint16(key string, value uint16) - AddUint8(key string, value uint8) - AddUintptr(key string, value uintptr) - - // AddReflected uses reflection to serialize arbitrary objects, so it can be - // slow and allocation-heavy. - AddReflected(key string, value interface{}) error - // OpenNamespace opens an isolated namespace where all subsequent fields will - // be added. Applications can use namespaces to prevent key collisions when - // injecting loggers into sub-components or third-party libraries. - OpenNamespace(key string) -} - -// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding -// array-like objects to the logging context. Of note, it supports mixed-type -// arrays even though they aren't typical in Go. Like slices, ArrayEncoders -// aren't safe for concurrent use (though typical use shouldn't require locks). -type ArrayEncoder interface { - // Built-in types. - PrimitiveArrayEncoder - - // Time-related types. - AppendDuration(time.Duration) - AppendTime(time.Time) - - // Logging-specific marshalers. - AppendArray(ArrayMarshaler) error - AppendObject(ObjectMarshaler) error - - // AppendReflected uses reflection to serialize arbitrary objects, so it's - // slow and allocation-heavy. - AppendReflected(value interface{}) error -} - -// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals -// only in Go's built-in types. It's included only so that Duration- and -// TimeEncoders cannot trigger infinite recursion. -type PrimitiveArrayEncoder interface { - // Built-in types. - AppendBool(bool) - AppendByteString([]byte) // for UTF-8 encoded bytes - AppendComplex128(complex128) - AppendComplex64(complex64) - AppendFloat64(float64) - AppendFloat32(float32) - AppendInt(int) - AppendInt64(int64) - AppendInt32(int32) - AppendInt16(int16) - AppendInt8(int8) - AppendString(string) - AppendUint(uint) - AppendUint64(uint64) - AppendUint32(uint32) - AppendUint16(uint16) - AppendUint8(uint8) - AppendUintptr(uintptr) -} - -// Encoder is a format-agnostic interface for all log entry marshalers. Since -// log encoders don't need to support the same wide range of use cases as -// general-purpose marshalers, it's possible to make them faster and -// lower-allocation. -// -// Implementations of the ObjectEncoder interface's methods can, of course, -// freely modify the receiver. However, the Clone and EncodeEntry methods will -// be called concurrently and shouldn't modify the receiver. -type Encoder interface { - ObjectEncoder - - // Clone copies the encoder, ensuring that adding fields to the copy doesn't - // affect the original. - Clone() Encoder - - // EncodeEntry encodes an entry and fields, along with any accumulated - // context, into a byte buffer and returns it. Any fields that are empty, - // including fields on the `Entry` type, should be omitted. - EncodeEntry(Entry, []Field) (*buffer.Buffer, error) -} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go deleted file mode 100644 index 0885505b..00000000 --- a/vendor/go.uber.org/zap/zapcore/entry.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "runtime" - "strings" - "sync" - "time" - - "go.uber.org/zap/internal/bufferpool" - "go.uber.org/zap/internal/exit" - - "go.uber.org/multierr" -) - -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) - -func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) - ce.reset() - return ce -} - -func putCheckedEntry(ce *CheckedEntry) { - if ce == nil { - return - } - _cePool.Put(ce) -} - -// NewEntryCaller makes an EntryCaller from the return signature of -// runtime.Caller. -func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { - if !ok { - return EntryCaller{} - } - return EntryCaller{ - PC: pc, - File: file, - Line: line, - Defined: true, - } -} - -// EntryCaller represents the caller of a logging function. -type EntryCaller struct { - Defined bool - PC uintptr - File string - Line int - Function string -} - -// String returns the full path and line number of the caller. -func (ec EntryCaller) String() string { - return ec.FullPath() -} - -// FullPath returns a /full/path/to/package/file:line description of the -// caller. -func (ec EntryCaller) FullPath() string { - if !ec.Defined { - return "undefined" - } - buf := bufferpool.Get() - buf.AppendString(ec.File) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// TrimmedPath returns a package/file:line description of the caller, -// preserving only the leaf directory name and file name. -func (ec EntryCaller) TrimmedPath() string { - if !ec.Defined { - return "undefined" - } - // nb. To make sure we trim the path correctly on Windows too, we - // counter-intuitively need to use '/' and *not* os.PathSeparator here, - // because the path given originates from Go stdlib, specifically - // runtime.Caller() which (as of Mar/17) returns forward slashes even on - // Windows. - // - // See https://github.com/golang/go/issues/3335 - // and https://github.com/golang/go/issues/18151 - // - // for discussion on the issue on Go side. - // - // Find the last separator. - // - idx := strings.LastIndexByte(ec.File, '/') - if idx == -1 { - return ec.FullPath() - } - // Find the penultimate separator. - idx = strings.LastIndexByte(ec.File[:idx], '/') - if idx == -1 { - return ec.FullPath() - } - buf := bufferpool.Get() - // Keep everything after the penultimate separator. - buf.AppendString(ec.File[idx+1:]) - buf.AppendByte(':') - buf.AppendInt(int64(ec.Line)) - caller := buf.String() - buf.Free() - return caller -} - -// An Entry represents a complete log message. The entry's structured context -// is already serialized, but the log level, time, message, and call site -// information are available for inspection and modification. Any fields left -// empty will be omitted when encoding. -// -// Entries are pooled, so any functions that accept them MUST be careful not to -// retain references to them. -type Entry struct { - Level Level - Time time.Time - LoggerName string - Message string - Caller EntryCaller - Stack string -} - -// CheckWriteAction indicates what action to take after a log entry is -// processed. Actions are ordered in increasing severity. -type CheckWriteAction uint8 - -const ( - // WriteThenNoop indicates that nothing special needs to be done. It's the - // default behavior. - WriteThenNoop CheckWriteAction = iota - // WriteThenGoexit runs runtime.Goexit after Write. - WriteThenGoexit - // WriteThenPanic causes a panic after Write. - WriteThenPanic - // WriteThenFatal causes a fatal os.Exit after Write. - WriteThenFatal -) - -// CheckedEntry is an Entry together with a collection of Cores that have -// already agreed to log it. -// -// CheckedEntry references should be created by calling AddCore or Should on a -// nil *CheckedEntry. References are returned to a pool after Write, and MUST -// NOT be retained after calling their Write method. -type CheckedEntry struct { - Entry - ErrorOutput WriteSyncer - dirty bool // best-effort detection of pool misuse - should CheckWriteAction - cores []Core -} - -func (ce *CheckedEntry) reset() { - ce.Entry = Entry{} - ce.ErrorOutput = nil - ce.dirty = false - ce.should = WriteThenNoop - for i := range ce.cores { - // don't keep references to cores - ce.cores[i] = nil - } - ce.cores = ce.cores[:0] -} - -// Write writes the entry to the stored Cores, returns any errors, and returns -// the CheckedEntry reference to a pool for immediate re-use. Finally, it -// executes any required CheckWriteAction. -func (ce *CheckedEntry) Write(fields ...Field) { - if ce == nil { - return - } - - if ce.dirty { - if ce.ErrorOutput != nil { - // Make a best effort to detect unsafe re-use of this CheckedEntry. - // If the entry is dirty, log an internal error; because the - // CheckedEntry is being used after it was returned to the pool, - // the message may be an amalgamation from multiple call sites. - fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) - ce.ErrorOutput.Sync() - } - return - } - ce.dirty = true - - var err error - for i := range ce.cores { - err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) - } - if err != nil && ce.ErrorOutput != nil { - fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) - ce.ErrorOutput.Sync() - } - - should, msg := ce.should, ce.Message - putCheckedEntry(ce) - - switch should { - case WriteThenPanic: - panic(msg) - case WriteThenFatal: - exit.Exit() - case WriteThenGoexit: - runtime.Goexit() - } -} - -// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be -// used by Core.Check implementations, and is safe to call on nil CheckedEntry -// references. -func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.cores = append(ce.cores, core) - return ce -} - -// Should sets this CheckedEntry's CheckWriteAction, which controls whether a -// Core will panic or fatal after writing this log entry. Like AddCore, it's -// safe to call on nil CheckedEntry references. -func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { - if ce == nil { - ce = getCheckedEntry() - ce.Entry = ent - } - ce.should = should - return ce -} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go deleted file mode 100644 index 74919b0c..00000000 --- a/vendor/go.uber.org/zap/zapcore/error.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "fmt" - "reflect" - "sync" -) - -// Encodes the given error into fields of an object. A field with the given -// name is added for the error message. -// -// If the error implements fmt.Formatter, a field with the name ${key}Verbose -// is also added with the full verbose error message. -// -// Finally, if the error implements errorGroup (from go.uber.org/multierr) or -// causer (from github.com/pkg/errors), a ${key}Causes field is added with an -// array of objects containing the errors this error was comprised of. -// -// { -// "error": err.Error(), -// "errorVerbose": fmt.Sprintf("%+v", err), -// "errorCauses": [ -// ... -// ], -// } -func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { - // Try to capture panics (from nil references or otherwise) when calling - // the Error() method - defer func() { - if rerr := recover(); rerr != nil { - // If it's a nil pointer, just say "". The likeliest causes are a - // error that fails to guard against nil or a nil pointer for a - // value receiver, and in either case, "" is a nice result. - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - enc.AddString(key, "") - return - } - - retErr = fmt.Errorf("PANIC=%v", rerr) - } - }() - - basic := err.Error() - enc.AddString(key, basic) - - switch e := err.(type) { - case errorGroup: - return enc.AddArray(key+"Causes", errArray(e.Errors())) - case fmt.Formatter: - verbose := fmt.Sprintf("%+v", e) - if verbose != basic { - // This is a rich error type, like those produced by - // github.com/pkg/errors. - enc.AddString(key+"Verbose", verbose) - } - } - return nil -} - -type errorGroup interface { - // Provides read-only access to the underlying list of errors, preferably - // without causing any allocs. - Errors() []error -} - -// Note that errArray and errArrayElem are very similar to the version -// implemented in the top-level error.go file. We can't re-use this because -// that would require exporting errArray as part of the zapcore API. - -// Encodes a list of errors using the standard error encoding logic. -type errArray []error - -func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { - for i := range errs { - if errs[i] == nil { - continue - } - - el := newErrArrayElem(errs[i]) - arr.AppendObject(el) - el.Free() - } - return nil -} - -var _errArrayElemPool = sync.Pool{New: func() interface{} { - return &errArrayElem{} -}} - -// Encodes any error into a {"error": ...} re-using the same errors logic. -// -// May be passed in place of an array to build a single-element array. -type errArrayElem struct{ err error } - -func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) - e.err = err - return e -} - -func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { - return arr.AppendObject(e) -} - -func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { - return encodeError("error", e.err, enc) -} - -func (e *errArrayElem) Free() { - e.err = nil - _errArrayElemPool.Put(e) -} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go deleted file mode 100644 index 95bdb0a1..00000000 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "fmt" - "math" - "reflect" - "time" -) - -// A FieldType indicates which member of the Field union struct should be used -// and how it should be serialized. -type FieldType uint8 - -const ( - // UnknownType is the default field type. Attempting to add it to an encoder will panic. - UnknownType FieldType = iota - // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. - ArrayMarshalerType - // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. - ObjectMarshalerType - // BinaryType indicates that the field carries an opaque binary blob. - BinaryType - // BoolType indicates that the field carries a bool. - BoolType - // ByteStringType indicates that the field carries UTF-8 encoded bytes. - ByteStringType - // Complex128Type indicates that the field carries a complex128. - Complex128Type - // Complex64Type indicates that the field carries a complex128. - Complex64Type - // DurationType indicates that the field carries a time.Duration. - DurationType - // Float64Type indicates that the field carries a float64. - Float64Type - // Float32Type indicates that the field carries a float32. - Float32Type - // Int64Type indicates that the field carries an int64. - Int64Type - // Int32Type indicates that the field carries an int32. - Int32Type - // Int16Type indicates that the field carries an int16. - Int16Type - // Int8Type indicates that the field carries an int8. - Int8Type - // StringType indicates that the field carries a string. - StringType - // TimeType indicates that the field carries a time.Time that is - // representable by a UnixNano() stored as an int64. - TimeType - // TimeFullType indicates that the field carries a time.Time stored as-is. - TimeFullType - // Uint64Type indicates that the field carries a uint64. - Uint64Type - // Uint32Type indicates that the field carries a uint32. - Uint32Type - // Uint16Type indicates that the field carries a uint16. - Uint16Type - // Uint8Type indicates that the field carries a uint8. - Uint8Type - // UintptrType indicates that the field carries a uintptr. - UintptrType - // ReflectType indicates that the field carries an interface{}, which should - // be serialized using reflection. - ReflectType - // NamespaceType signals the beginning of an isolated namespace. All - // subsequent fields should be added to the new namespace. - NamespaceType - // StringerType indicates that the field carries a fmt.Stringer. - StringerType - // ErrorType indicates that the field carries an error. - ErrorType - // SkipType indicates that the field is a no-op. - SkipType - - // InlineMarshalerType indicates that the field carries an ObjectMarshaler - // that should be inlined. - InlineMarshalerType -) - -// A Field is a marshaling operation used to add a key-value pair to a logger's -// context. Most fields are lazily marshaled, so it's inexpensive to add fields -// to disabled debug-level log statements. -type Field struct { - Key string - Type FieldType - Integer int64 - String string - Interface interface{} -} - -// AddTo exports a field through the ObjectEncoder interface. It's primarily -// useful to library authors, and shouldn't be necessary in most applications. -func (f Field) AddTo(enc ObjectEncoder) { - var err error - - switch f.Type { - case ArrayMarshalerType: - err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) - case ObjectMarshalerType: - err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) - case InlineMarshalerType: - err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) - case BinaryType: - enc.AddBinary(f.Key, f.Interface.([]byte)) - case BoolType: - enc.AddBool(f.Key, f.Integer == 1) - case ByteStringType: - enc.AddByteString(f.Key, f.Interface.([]byte)) - case Complex128Type: - enc.AddComplex128(f.Key, f.Interface.(complex128)) - case Complex64Type: - enc.AddComplex64(f.Key, f.Interface.(complex64)) - case DurationType: - enc.AddDuration(f.Key, time.Duration(f.Integer)) - case Float64Type: - enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) - case Float32Type: - enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) - case Int64Type: - enc.AddInt64(f.Key, f.Integer) - case Int32Type: - enc.AddInt32(f.Key, int32(f.Integer)) - case Int16Type: - enc.AddInt16(f.Key, int16(f.Integer)) - case Int8Type: - enc.AddInt8(f.Key, int8(f.Integer)) - case StringType: - enc.AddString(f.Key, f.String) - case TimeType: - if f.Interface != nil { - enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) - } else { - // Fall back to UTC if location is nil. - enc.AddTime(f.Key, time.Unix(0, f.Integer)) - } - case TimeFullType: - enc.AddTime(f.Key, f.Interface.(time.Time)) - case Uint64Type: - enc.AddUint64(f.Key, uint64(f.Integer)) - case Uint32Type: - enc.AddUint32(f.Key, uint32(f.Integer)) - case Uint16Type: - enc.AddUint16(f.Key, uint16(f.Integer)) - case Uint8Type: - enc.AddUint8(f.Key, uint8(f.Integer)) - case UintptrType: - enc.AddUintptr(f.Key, uintptr(f.Integer)) - case ReflectType: - err = enc.AddReflected(f.Key, f.Interface) - case NamespaceType: - enc.OpenNamespace(f.Key) - case StringerType: - err = encodeStringer(f.Key, f.Interface, enc) - case ErrorType: - err = encodeError(f.Key, f.Interface.(error), enc) - case SkipType: - break - default: - panic(fmt.Sprintf("unknown field type: %v", f)) - } - - if err != nil { - enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) - } -} - -// Equals returns whether two fields are equal. For non-primitive types such as -// errors, marshalers, or reflect types, it uses reflect.DeepEqual. -func (f Field) Equals(other Field) bool { - if f.Type != other.Type { - return false - } - if f.Key != other.Key { - return false - } - - switch f.Type { - case BinaryType, ByteStringType: - return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) - case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: - return reflect.DeepEqual(f.Interface, other.Interface) - default: - return f == other - } -} - -func addFields(enc ObjectEncoder, fields []Field) { - for i := range fields { - fields[i].AddTo(enc) - } -} - -func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { - // Try to capture panics (from nil references or otherwise) when calling - // the String() method, similar to https://golang.org/src/fmt/print.go#L540 - defer func() { - if err := recover(); err != nil { - // If it's a nil pointer, just say "". The likeliest causes are a - // Stringer that fails to guard against nil or a nil pointer for a - // value receiver, and in either case, "" is a nice result. - if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { - enc.AddString(key, "") - return - } - - retErr = fmt.Errorf("PANIC=%v", err) - } - }() - - enc.AddString(key, stringer.(fmt.Stringer).String()) - return nil -} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go deleted file mode 100644 index 5db4afb3..00000000 --- a/vendor/go.uber.org/zap/zapcore/hook.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type hooked struct { - Core - funcs []func(Entry) error -} - -// RegisterHooks wraps a Core and runs a collection of user-defined callback -// hooks each time a message is logged. Execution of the callbacks is blocking. -// -// This offers users an easy way to register simple callbacks (e.g., metrics -// collection) without implementing the full Core interface. -func RegisterHooks(core Core, hooks ...func(Entry) error) Core { - funcs := append([]func(Entry) error{}, hooks...) - return &hooked{ - Core: core, - funcs: funcs, - } -} - -func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - // Let the wrapped Core decide whether to log this message or not. This - // also gives the downstream a chance to register itself directly with the - // CheckedEntry. - if downstream := h.Core.Check(ent, ce); downstream != nil { - return downstream.AddCore(ent, h) - } - return ce -} - -func (h *hooked) With(fields []Field) Core { - return &hooked{ - Core: h.Core.With(fields), - funcs: h.funcs, - } -} - -func (h *hooked) Write(ent Entry, _ []Field) error { - // Since our downstream had a chance to register itself directly with the - // CheckedMessage, we don't need to call it here. - var err error - for i := range h.funcs { - err = multierr.Append(err, h.funcs[i](ent)) - } - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go deleted file mode 100644 index 5a174926..00000000 --- a/vendor/go.uber.org/zap/zapcore/increase_level.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "fmt" - -type levelFilterCore struct { - core Core - level LevelEnabler -} - -// NewIncreaseLevelCore creates a core that can be used to increase the level of -// an existing Core. It cannot be used to decrease the logging level, as it acts -// as a filter before calling the underlying core. If level decreases the log level, -// an error is returned. -func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { - for l := _maxLevel; l >= _minLevel; l-- { - if !core.Enabled(l) && level.Enabled(l) { - return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) - } - } - - return &levelFilterCore{core, level}, nil -} - -func (c *levelFilterCore) Enabled(lvl Level) bool { - return c.level.Enabled(lvl) -} - -func (c *levelFilterCore) With(fields []Field) Core { - return &levelFilterCore{c.core.With(fields), c.level} -} - -func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if !c.Enabled(ent.Level) { - return ce - } - - return c.core.Check(ent, ce) -} - -func (c *levelFilterCore) Write(ent Entry, fields []Field) error { - return c.core.Write(ent, fields) -} - -func (c *levelFilterCore) Sync() error { - return c.core.Sync() -} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go deleted file mode 100644 index af220d9b..00000000 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "encoding/base64" - "encoding/json" - "math" - "sync" - "time" - "unicode/utf8" - - "go.uber.org/zap/buffer" - "go.uber.org/zap/internal/bufferpool" -) - -// For JSON-escaping; see jsonEncoder.safeAddString below. -const _hex = "0123456789abcdef" - -var _jsonPool = sync.Pool{New: func() interface{} { - return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} - -func putJSONEncoder(enc *jsonEncoder) { - if enc.reflectBuf != nil { - enc.reflectBuf.Free() - } - enc.EncoderConfig = nil - enc.buf = nil - enc.spaced = false - enc.openNamespaces = 0 - enc.reflectBuf = nil - enc.reflectEnc = nil - _jsonPool.Put(enc) -} - -type jsonEncoder struct { - *EncoderConfig - buf *buffer.Buffer - spaced bool // include spaces after colons and commas - openNamespaces int - - // for encoding generic values by reflection - reflectBuf *buffer.Buffer - reflectEnc *json.Encoder -} - -// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder -// appropriately escapes all field keys and values. -// -// Note that the encoder doesn't deduplicate keys, so it's possible to produce -// a message like -// {"foo":"bar","foo":"baz"} -// This is permitted by the JSON specification, but not encouraged. Many -// libraries will ignore duplicate key-value pairs (typically keeping the last -// pair) when unmarshaling, but users should attempt to avoid adding duplicate -// keys. -func NewJSONEncoder(cfg EncoderConfig) Encoder { - return newJSONEncoder(cfg, false) -} - -func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { - return &jsonEncoder{ - EncoderConfig: &cfg, - buf: bufferpool.Get(), - spaced: spaced, - } -} - -func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { - enc.addKey(key) - return enc.AppendArray(arr) -} - -func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { - enc.addKey(key) - return enc.AppendObject(obj) -} - -func (enc *jsonEncoder) AddBinary(key string, val []byte) { - enc.AddString(key, base64.StdEncoding.EncodeToString(val)) -} - -func (enc *jsonEncoder) AddByteString(key string, val []byte) { - enc.addKey(key) - enc.AppendByteString(val) -} - -func (enc *jsonEncoder) AddBool(key string, val bool) { - enc.addKey(key) - enc.AppendBool(val) -} - -func (enc *jsonEncoder) AddComplex128(key string, val complex128) { - enc.addKey(key) - enc.AppendComplex128(val) -} - -func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { - enc.addKey(key) - enc.AppendDuration(val) -} - -func (enc *jsonEncoder) AddFloat64(key string, val float64) { - enc.addKey(key) - enc.AppendFloat64(val) -} - -func (enc *jsonEncoder) AddFloat32(key string, val float32) { - enc.addKey(key) - enc.AppendFloat32(val) -} - -func (enc *jsonEncoder) AddInt64(key string, val int64) { - enc.addKey(key) - enc.AppendInt64(val) -} - -func (enc *jsonEncoder) resetReflectBuf() { - if enc.reflectBuf == nil { - enc.reflectBuf = bufferpool.Get() - enc.reflectEnc = json.NewEncoder(enc.reflectBuf) - - // For consistency with our custom JSON encoder. - enc.reflectEnc.SetEscapeHTML(false) - } else { - enc.reflectBuf.Reset() - } -} - -var nullLiteralBytes = []byte("null") - -// Only invoke the standard JSON encoder if there is actually something to -// encode; otherwise write JSON null literal directly. -func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { - if obj == nil { - return nullLiteralBytes, nil - } - enc.resetReflectBuf() - if err := enc.reflectEnc.Encode(obj); err != nil { - return nil, err - } - enc.reflectBuf.TrimNewline() - return enc.reflectBuf.Bytes(), nil -} - -func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { - valueBytes, err := enc.encodeReflected(obj) - if err != nil { - return err - } - enc.addKey(key) - _, err = enc.buf.Write(valueBytes) - return err -} - -func (enc *jsonEncoder) OpenNamespace(key string) { - enc.addKey(key) - enc.buf.AppendByte('{') - enc.openNamespaces++ -} - -func (enc *jsonEncoder) AddString(key, val string) { - enc.addKey(key) - enc.AppendString(val) -} - -func (enc *jsonEncoder) AddTime(key string, val time.Time) { - enc.addKey(key) - enc.AppendTime(val) -} - -func (enc *jsonEncoder) AddUint64(key string, val uint64) { - enc.addKey(key) - enc.AppendUint64(val) -} - -func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('[') - err := arr.MarshalLogArray(enc) - enc.buf.AppendByte(']') - return err -} - -func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { - enc.addElementSeparator() - enc.buf.AppendByte('{') - err := obj.MarshalLogObject(enc) - enc.buf.AppendByte('}') - return err -} - -func (enc *jsonEncoder) AppendBool(val bool) { - enc.addElementSeparator() - enc.buf.AppendBool(val) -} - -func (enc *jsonEncoder) AppendByteString(val []byte) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddByteString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendComplex128(val complex128) { - enc.addElementSeparator() - // Cast to a platform-independent, fixed-size type. - r, i := float64(real(val)), float64(imag(val)) - enc.buf.AppendByte('"') - // Because we're always in a quoted string, we can use strconv without - // special-casing NaN and +/-Inf. - enc.buf.AppendFloat(r, 64) - // If imaginary part is less than 0, minus (-) sign is added by default - // by AppendFloat. - if i >= 0 { - enc.buf.AppendByte('+') - } - enc.buf.AppendFloat(i, 64) - enc.buf.AppendByte('i') - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendDuration(val time.Duration) { - cur := enc.buf.Len() - if e := enc.EncodeDuration; e != nil { - e(val, enc) - } - if cur == enc.buf.Len() { - // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep - // JSON valid. - enc.AppendInt64(int64(val)) - } -} - -func (enc *jsonEncoder) AppendInt64(val int64) { - enc.addElementSeparator() - enc.buf.AppendInt(val) -} - -func (enc *jsonEncoder) AppendReflected(val interface{}) error { - valueBytes, err := enc.encodeReflected(val) - if err != nil { - return err - } - enc.addElementSeparator() - _, err = enc.buf.Write(valueBytes) - return err -} - -func (enc *jsonEncoder) AppendString(val string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(val) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.buf.AppendTime(time, layout) - enc.buf.AppendByte('"') -} - -func (enc *jsonEncoder) AppendTime(val time.Time) { - cur := enc.buf.Len() - if e := enc.EncodeTime; e != nil { - e(val, enc) - } - if cur == enc.buf.Len() { - // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep - // output JSON valid. - enc.AppendInt64(val.UnixNano()) - } -} - -func (enc *jsonEncoder) AppendUint64(val uint64) { - enc.addElementSeparator() - enc.buf.AppendUint(val) -} - -func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } -func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } -func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } -func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } -func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } -func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } -func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } -func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } -func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } - -func (enc *jsonEncoder) Clone() Encoder { - clone := enc.clone() - clone.buf.Write(enc.buf.Bytes()) - return clone -} - -func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() - clone.EncoderConfig = enc.EncoderConfig - clone.spaced = enc.spaced - clone.openNamespaces = enc.openNamespaces - clone.buf = bufferpool.Get() - return clone -} - -func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { - final := enc.clone() - final.buf.AppendByte('{') - - if final.LevelKey != "" { - final.addKey(final.LevelKey) - cur := final.buf.Len() - final.EncodeLevel(ent.Level, final) - if cur == final.buf.Len() { - // User-supplied EncodeLevel was a no-op. Fall back to strings to keep - // output JSON valid. - final.AppendString(ent.Level.String()) - } - } - if final.TimeKey != "" { - final.AddTime(final.TimeKey, ent.Time) - } - if ent.LoggerName != "" && final.NameKey != "" { - final.addKey(final.NameKey) - cur := final.buf.Len() - nameEncoder := final.EncodeName - - // if no name encoder provided, fall back to FullNameEncoder for backwards - // compatibility - if nameEncoder == nil { - nameEncoder = FullNameEncoder - } - - nameEncoder(ent.LoggerName, final) - if cur == final.buf.Len() { - // User-supplied EncodeName was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.LoggerName) - } - } - if ent.Caller.Defined { - if final.CallerKey != "" { - final.addKey(final.CallerKey) - cur := final.buf.Len() - final.EncodeCaller(ent.Caller, final) - if cur == final.buf.Len() { - // User-supplied EncodeCaller was a no-op. Fall back to strings to - // keep output JSON valid. - final.AppendString(ent.Caller.String()) - } - } - if final.FunctionKey != "" { - final.addKey(final.FunctionKey) - final.AppendString(ent.Caller.Function) - } - } - if final.MessageKey != "" { - final.addKey(enc.MessageKey) - final.AppendString(ent.Message) - } - if enc.buf.Len() > 0 { - final.addElementSeparator() - final.buf.Write(enc.buf.Bytes()) - } - addFields(final, fields) - final.closeOpenNamespaces() - if ent.Stack != "" && final.StacktraceKey != "" { - final.AddString(final.StacktraceKey, ent.Stack) - } - final.buf.AppendByte('}') - if final.LineEnding != "" { - final.buf.AppendString(final.LineEnding) - } else { - final.buf.AppendString(DefaultLineEnding) - } - - ret := final.buf - putJSONEncoder(final) - return ret, nil -} - -func (enc *jsonEncoder) truncate() { - enc.buf.Reset() -} - -func (enc *jsonEncoder) closeOpenNamespaces() { - for i := 0; i < enc.openNamespaces; i++ { - enc.buf.AppendByte('}') - } -} - -func (enc *jsonEncoder) addKey(key string) { - enc.addElementSeparator() - enc.buf.AppendByte('"') - enc.safeAddString(key) - enc.buf.AppendByte('"') - enc.buf.AppendByte(':') - if enc.spaced { - enc.buf.AppendByte(' ') - } -} - -func (enc *jsonEncoder) addElementSeparator() { - last := enc.buf.Len() - 1 - if last < 0 { - return - } - switch enc.buf.Bytes()[last] { - case '{', '[', ':', ',', ' ': - return - default: - enc.buf.AppendByte(',') - if enc.spaced { - enc.buf.AppendByte(' ') - } - } -} - -func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { - enc.addElementSeparator() - switch { - case math.IsNaN(val): - enc.buf.AppendString(`"NaN"`) - case math.IsInf(val, 1): - enc.buf.AppendString(`"+Inf"`) - case math.IsInf(val, -1): - enc.buf.AppendString(`"-Inf"`) - default: - enc.buf.AppendFloat(val, bitSize) - } -} - -// safeAddString JSON-escapes a string and appends it to the internal buffer. -// Unlike the standard library's encoder, it doesn't attempt to protect the -// user from browser vulnerabilities or JSONP-related problems. -func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } -} - -// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. -func (enc *jsonEncoder) safeAddByteString(s []byte) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) - } - return true -} - -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false -} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go deleted file mode 100644 index e575c9f4..00000000 --- a/vendor/go.uber.org/zap/zapcore/level.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "bytes" - "errors" - "fmt" -) - -var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") - -// A Level is a logging priority. Higher levels are more important. -type Level int8 - -const ( - // DebugLevel logs are typically voluminous, and are usually disabled in - // production. - DebugLevel Level = iota - 1 - // InfoLevel is the default logging priority. - InfoLevel - // WarnLevel logs are more important than Info, but don't need individual - // human review. - WarnLevel - // ErrorLevel logs are high-priority. If an application is running smoothly, - // it shouldn't generate any error-level logs. - ErrorLevel - // DPanicLevel logs are particularly important errors. In development the - // logger panics after writing the message. - DPanicLevel - // PanicLevel logs a message, then panics. - PanicLevel - // FatalLevel logs a message, then calls os.Exit(1). - FatalLevel - - _minLevel = DebugLevel - _maxLevel = FatalLevel -) - -// String returns a lower-case ASCII representation of the log level. -func (l Level) String() string { - switch l { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warn" - case ErrorLevel: - return "error" - case DPanicLevel: - return "dpanic" - case PanicLevel: - return "panic" - case FatalLevel: - return "fatal" - default: - return fmt.Sprintf("Level(%d)", l) - } -} - -// CapitalString returns an all-caps ASCII representation of the log level. -func (l Level) CapitalString() string { - // Printing levels in all-caps is common enough that we should export this - // functionality. - switch l { - case DebugLevel: - return "DEBUG" - case InfoLevel: - return "INFO" - case WarnLevel: - return "WARN" - case ErrorLevel: - return "ERROR" - case DPanicLevel: - return "DPANIC" - case PanicLevel: - return "PANIC" - case FatalLevel: - return "FATAL" - default: - return fmt.Sprintf("LEVEL(%d)", l) - } -} - -// MarshalText marshals the Level to text. Note that the text representation -// drops the -Level suffix (see example). -func (l Level) MarshalText() ([]byte, error) { - return []byte(l.String()), nil -} - -// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText -// expects the text representation of a Level to drop the -Level suffix (see -// example). -// -// In particular, this makes it easy to configure logging levels using YAML, -// TOML, or JSON files. -func (l *Level) UnmarshalText(text []byte) error { - if l == nil { - return errUnmarshalNilLevel - } - if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { - return fmt.Errorf("unrecognized level: %q", text) - } - return nil -} - -func (l *Level) unmarshalText(text []byte) bool { - switch string(text) { - case "debug", "DEBUG": - *l = DebugLevel - case "info", "INFO", "": // make the zero value useful - *l = InfoLevel - case "warn", "WARN": - *l = WarnLevel - case "error", "ERROR": - *l = ErrorLevel - case "dpanic", "DPANIC": - *l = DPanicLevel - case "panic", "PANIC": - *l = PanicLevel - case "fatal", "FATAL": - *l = FatalLevel - default: - return false - } - return true -} - -// Set sets the level for the flag.Value interface. -func (l *Level) Set(s string) error { - return l.UnmarshalText([]byte(s)) -} - -// Get gets the level for the flag.Getter interface. -func (l *Level) Get() interface{} { - return *l -} - -// Enabled returns true if the given level is at or above this level. -func (l Level) Enabled(lvl Level) bool { - return lvl >= l -} - -// LevelEnabler decides whether a given logging level is enabled when logging a -// message. -// -// Enablers are intended to be used to implement deterministic filters; -// concerns like sampling are better implemented as a Core. -// -// Each concrete Level value implements a static LevelEnabler which returns -// true for itself and all higher logging levels. For example WarnLevel.Enabled() -// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and -// FatalLevel, but return false for InfoLevel and DebugLevel. -type LevelEnabler interface { - Enabled(Level) bool -} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go deleted file mode 100644 index 7af8dadc..00000000 --- a/vendor/go.uber.org/zap/zapcore/level_strings.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/zap/internal/color" - -var ( - _levelToColor = map[Level]color.Color{ - DebugLevel: color.Magenta, - InfoLevel: color.Blue, - WarnLevel: color.Yellow, - ErrorLevel: color.Red, - DPanicLevel: color.Red, - PanicLevel: color.Red, - FatalLevel: color.Red, - } - _unknownLevelColor = color.Red - - _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) - _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) -) - -func init() { - for level, color := range _levelToColor { - _levelToLowercaseColorString[level] = color.Add(level.String()) - _levelToCapitalColorString[level] = color.Add(level.CapitalString()) - } -} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go deleted file mode 100644 index c3c55ba0..00000000 --- a/vendor/go.uber.org/zap/zapcore/marshaler.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -// ObjectMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -// -// Note: ObjectMarshaler is only used when zap.Object is used or when -// passed directly to zap.Any. It is not used when reflection-based -// encoding is used. -type ObjectMarshaler interface { - MarshalLogObject(ObjectEncoder) error -} - -// ObjectMarshalerFunc is a type adapter that turns a function into an -// ObjectMarshaler. -type ObjectMarshalerFunc func(ObjectEncoder) error - -// MarshalLogObject calls the underlying function. -func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { - return f(enc) -} - -// ArrayMarshaler allows user-defined types to efficiently add themselves to the -// logging context, and to selectively omit information which shouldn't be -// included in logs (e.g., passwords). -// -// Note: ArrayMarshaler is only used when zap.Array is used or when -// passed directly to zap.Any. It is not used when reflection-based -// encoding is used. -type ArrayMarshaler interface { - MarshalLogArray(ArrayEncoder) error -} - -// ArrayMarshalerFunc is a type adapter that turns a function into an -// ArrayMarshaler. -type ArrayMarshalerFunc func(ArrayEncoder) error - -// MarshalLogArray calls the underlying function. -func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { - return f(enc) -} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go deleted file mode 100644 index dfead082..00000000 --- a/vendor/go.uber.org/zap/zapcore/memory_encoder.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "time" - -// MapObjectEncoder is an ObjectEncoder backed by a simple -// map[string]interface{}. It's not fast enough for production use, but it's -// helpful in tests. -type MapObjectEncoder struct { - // Fields contains the entire encoded log context. - Fields map[string]interface{} - // cur is a pointer to the namespace we're currently writing to. - cur map[string]interface{} -} - -// NewMapObjectEncoder creates a new map-backed ObjectEncoder. -func NewMapObjectEncoder() *MapObjectEncoder { - m := make(map[string]interface{}) - return &MapObjectEncoder{ - Fields: m, - cur: m, - } -} - -// AddArray implements ObjectEncoder. -func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { - arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} - err := v.MarshalLogArray(arr) - m.cur[key] = arr.elems - return err -} - -// AddObject implements ObjectEncoder. -func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { - newMap := NewMapObjectEncoder() - m.cur[k] = newMap.Fields - return v.MarshalLogObject(newMap) -} - -// AddBinary implements ObjectEncoder. -func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } - -// AddByteString implements ObjectEncoder. -func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } - -// AddBool implements ObjectEncoder. -func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } - -// AddDuration implements ObjectEncoder. -func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } - -// AddComplex128 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } - -// AddComplex64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } - -// AddFloat64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } - -// AddFloat32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } - -// AddInt implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } - -// AddInt64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } - -// AddInt32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } - -// AddInt16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } - -// AddInt8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } - -// AddString implements ObjectEncoder. -func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } - -// AddTime implements ObjectEncoder. -func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } - -// AddUint implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } - -// AddUint64 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } - -// AddUint32 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } - -// AddUint16 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } - -// AddUint8 implements ObjectEncoder. -func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } - -// AddUintptr implements ObjectEncoder. -func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } - -// AddReflected implements ObjectEncoder. -func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { - m.cur[k] = v - return nil -} - -// OpenNamespace implements ObjectEncoder. -func (m *MapObjectEncoder) OpenNamespace(k string) { - ns := make(map[string]interface{}) - m.cur[k] = ns - m.cur = ns -} - -// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like -// the MapObjectEncoder, it's not designed for production use. -type sliceArrayEncoder struct { - elems []interface{} -} - -func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { - enc := &sliceArrayEncoder{} - err := v.MarshalLogArray(enc) - s.elems = append(s.elems, enc.elems) - return err -} - -func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { - m := NewMapObjectEncoder() - err := v.MarshalLogObject(m) - s.elems = append(s.elems, m.Fields) - return err -} - -func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { - s.elems = append(s.elems, v) - return nil -} - -func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } -func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } -func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go deleted file mode 100644 index 31ed96e1..00000000 --- a/vendor/go.uber.org/zap/zapcore/sampler.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "time" - - "go.uber.org/atomic" -) - -const ( - _numLevels = _maxLevel - _minLevel + 1 - _countersPerLevel = 4096 -) - -type counter struct { - resetAt atomic.Int64 - counter atomic.Uint64 -} - -type counters [_numLevels][_countersPerLevel]counter - -func newCounters() *counters { - return &counters{} -} - -func (cs *counters) get(lvl Level, key string) *counter { - i := lvl - _minLevel - j := fnv32a(key) % _countersPerLevel - return &cs[i][j] -} - -// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc -func fnv32a(s string) uint32 { - const ( - offset32 = 2166136261 - prime32 = 16777619 - ) - hash := uint32(offset32) - for i := 0; i < len(s); i++ { - hash ^= uint32(s[i]) - hash *= prime32 - } - return hash -} - -func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { - tn := t.UnixNano() - resetAfter := c.resetAt.Load() - if resetAfter > tn { - return c.counter.Inc() - } - - c.counter.Store(1) - - newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { - // We raced with another goroutine trying to reset, and it also reset - // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() - } - - return 1 -} - -// SamplingDecision is a decision represented as a bit field made by sampler. -// More decisions may be added in the future. -type SamplingDecision uint32 - -const ( - // LogDropped indicates that the Sampler dropped a log entry. - LogDropped SamplingDecision = 1 << iota - // LogSampled indicates that the Sampler sampled a log entry. - LogSampled -) - -// optionFunc wraps a func so it satisfies the SamplerOption interface. -type optionFunc func(*sampler) - -func (f optionFunc) apply(s *sampler) { - f(s) -} - -// SamplerOption configures a Sampler. -type SamplerOption interface { - apply(*sampler) -} - -// nopSamplingHook is the default hook used by sampler. -func nopSamplingHook(Entry, SamplingDecision) {} - -// SamplerHook registers a function which will be called when Sampler makes a -// decision. -// -// This hook may be used to get visibility into the performance of the sampler. -// For example, use it to track metrics of dropped versus sampled logs. -// -// var dropped atomic.Int64 -// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { -// if dec&zapcore.LogDropped > 0 { -// dropped.Inc() -// } -// }) -func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { - return optionFunc(func(s *sampler) { - s.hook = hook - }) -} - -// NewSamplerWithOptions creates a Core that samples incoming entries, which -// caps the CPU and I/O load of logging while attempting to preserve a -// representative subset of your logs. -// -// Zap samples by logging the first N entries with a given level and message -// each tick. If more Entries with the same level and message are seen during -// the same interval, every Mth message is logged and the rest are dropped. -// -// Sampler can be configured to report sampling decisions with the SamplerHook -// option. -// -// Keep in mind that zap's sampling implementation is optimized for speed over -// absolute precision; under load, each tick may be slightly over- or -// under-sampled. -func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { - s := &sampler{ - Core: core, - tick: tick, - counts: newCounters(), - first: uint64(first), - thereafter: uint64(thereafter), - hook: nopSamplingHook, - } - for _, opt := range opts { - opt.apply(s) - } - - return s -} - -type sampler struct { - Core - - counts *counters - tick time.Duration - first, thereafter uint64 - hook func(Entry, SamplingDecision) -} - -// NewSampler creates a Core that samples incoming entries, which -// caps the CPU and I/O load of logging while attempting to preserve a -// representative subset of your logs. -// -// Zap samples by logging the first N entries with a given level and message -// each tick. If more Entries with the same level and message are seen during -// the same interval, every Mth message is logged and the rest are dropped. -// -// Keep in mind that zap's sampling implementation is optimized for speed over -// absolute precision; under load, each tick may be slightly over- or -// under-sampled. -// -// Deprecated: use NewSamplerWithOptions. -func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { - return NewSamplerWithOptions(core, tick, first, thereafter) -} - -func (s *sampler) With(fields []Field) Core { - return &sampler{ - Core: s.Core.With(fields), - tick: s.tick, - counts: s.counts, - first: s.first, - thereafter: s.thereafter, - hook: s.hook, - } -} - -func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - if !s.Enabled(ent.Level) { - return ce - } - - if ent.Level >= _minLevel && ent.Level <= _maxLevel { - counter := s.counts.get(ent.Level, ent.Message) - n := counter.IncCheckReset(ent.Time, s.tick) - if n > s.first && (n-s.first)%s.thereafter != 0 { - s.hook(ent, LogDropped) - return ce - } - s.hook(ent, LogSampled) - } - return s.Core.Check(ent, ce) -} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go deleted file mode 100644 index 07a32eef..00000000 --- a/vendor/go.uber.org/zap/zapcore/tee.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import "go.uber.org/multierr" - -type multiCore []Core - -// NewTee creates a Core that duplicates log entries into two or more -// underlying Cores. -// -// Calling it with a single Core returns the input unchanged, and calling -// it with no input returns a no-op Core. -func NewTee(cores ...Core) Core { - switch len(cores) { - case 0: - return NewNopCore() - case 1: - return cores[0] - default: - return multiCore(cores) - } -} - -func (mc multiCore) With(fields []Field) Core { - clone := make(multiCore, len(mc)) - for i := range mc { - clone[i] = mc[i].With(fields) - } - return clone -} - -func (mc multiCore) Enabled(lvl Level) bool { - for i := range mc { - if mc[i].Enabled(lvl) { - return true - } - } - return false -} - -func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { - for i := range mc { - ce = mc[i].Check(ent, ce) - } - return ce -} - -func (mc multiCore) Write(ent Entry, fields []Field) error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Write(ent, fields)) - } - return err -} - -func (mc multiCore) Sync() error { - var err error - for i := range mc { - err = multierr.Append(err, mc[i].Sync()) - } - return err -} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go deleted file mode 100644 index d4a1af3d..00000000 --- a/vendor/go.uber.org/zap/zapcore/write_syncer.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2016 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package zapcore - -import ( - "io" - "sync" - - "go.uber.org/multierr" -) - -// A WriteSyncer is an io.Writer that can also flush any buffered data. Note -// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. -type WriteSyncer interface { - io.Writer - Sync() error -} - -// AddSync converts an io.Writer to a WriteSyncer. It attempts to be -// intelligent: if the concrete type of the io.Writer implements WriteSyncer, -// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. -func AddSync(w io.Writer) WriteSyncer { - switch w := w.(type) { - case WriteSyncer: - return w - default: - return writerWrapper{w} - } -} - -type lockedWriteSyncer struct { - sync.Mutex - ws WriteSyncer -} - -// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In -// particular, *os.Files must be locked before use. -func Lock(ws WriteSyncer) WriteSyncer { - if _, ok := ws.(*lockedWriteSyncer); ok { - // no need to layer on another lock - return ws - } - return &lockedWriteSyncer{ws: ws} -} - -func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { - s.Lock() - n, err := s.ws.Write(bs) - s.Unlock() - return n, err -} - -func (s *lockedWriteSyncer) Sync() error { - s.Lock() - err := s.ws.Sync() - s.Unlock() - return err -} - -type writerWrapper struct { - io.Writer -} - -func (w writerWrapper) Sync() error { - return nil -} - -type multiWriteSyncer []WriteSyncer - -// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes -// and sync calls, much like io.MultiWriter. -func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { - if len(ws) == 1 { - return ws[0] - } - return multiWriteSyncer(ws) -} - -// See https://golang.org/src/io/multi.go -// When not all underlying syncers write the same number of bytes, -// the smallest number is returned even though Write() is called on -// all of them. -func (ws multiWriteSyncer) Write(p []byte) (int, error) { - var writeErr error - nWritten := 0 - for _, w := range ws { - n, err := w.Write(p) - writeErr = multierr.Append(writeErr, err) - if nWritten == 0 && n != 0 { - nWritten = n - } else if n < nWritten { - nWritten = n - } - } - return nWritten, writeErr -} - -func (ws multiWriteSyncer) Sync() error { - var err error - for _, w := range ws { - err = multierr.Append(err, w.Sync()) - } - return err -} diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 2b00ddba..00000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e97..00000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc311609..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index aeb73f81..00000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n++ - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n++ - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f195..00000000 --- a/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 213bf204..00000000 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -// -// Blowfish is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d0407759..00000000 --- a/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go deleted file mode 100644 index 94c71ac1..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s deleted file mode 100644 index 63cae9e6..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego - -#include "textflag.h" - -#define NUM_ROUNDS 10 - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD dst+0(FP), R1 - MOVD src+24(FP), R2 - MOVD src_len+32(FP), R3 - MOVD key+48(FP), R4 - MOVD nonce+56(FP), R6 - MOVD counter+64(FP), R7 - - MOVD $·constants(SB), R10 - MOVD $·incRotMatrix(SB), R11 - - MOVW (R7), R20 - - AND $~255, R3, R13 - ADD R2, R13, R12 // R12 for block end - AND $255, R3, R13 -loop: - MOVD $NUM_ROUNDS, R21 - VLD1 (R11), [V30.S4, V31.S4] - - // load contants - // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] - WORD $0x4D60E940 - - // load keys - // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] - WORD $0x4DFFE884 - // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] - WORD $0x4DFFE888 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V12.S4] - WORD $0x4D40C8EC - - // VLD3R (R6), [V13.S4, V14.S4, V15.S4] - WORD $0x4D40E8CD - - // update counter - VADD V30.S4, V12.S4, V12.S4 - -chacha: - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) - VADD V8.S4, V12.S4, V8.S4 - VADD V9.S4, V13.S4, V9.S4 - VADD V10.S4, V14.S4, V10.S4 - VADD V11.S4, V15.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $12, V16.S4, V4.S4 - VSHL $12, V17.S4, V5.S4 - VSHL $12, V18.S4, V6.S4 - VSHL $12, V19.S4, V7.S4 - VSRI $20, V16.S4, V4.S4 - VSRI $20, V17.S4, V5.S4 - VSRI $20, V18.S4, V6.S4 - VSRI $20, V19.S4, V7.S4 - - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) - VADD V12.S4, V8.S4, V8.S4 - VADD V13.S4, V9.S4, V9.S4 - VADD V14.S4, V10.S4, V10.S4 - VADD V15.S4, V11.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $7, V16.S4, V4.S4 - VSHL $7, V17.S4, V5.S4 - VSHL $7, V18.S4, V6.S4 - VSHL $7, V19.S4, V7.S4 - VSRI $25, V16.S4, V4.S4 - VSRI $25, V17.S4, V5.S4 - VSRI $25, V18.S4, V6.S4 - VSRI $25, V19.S4, V7.S4 - - // V0..V3 += V5..V7, V4 - // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) - VADD V0.S4, V5.S4, V0.S4 - VADD V1.S4, V6.S4, V1.S4 - VADD V2.S4, V7.S4, V2.S4 - VADD V3.S4, V4.S4, V3.S4 - VEOR V15.B16, V0.B16, V15.B16 - VEOR V12.B16, V1.B16, V12.B16 - VEOR V13.B16, V2.B16, V13.B16 - VEOR V14.B16, V3.B16, V14.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 12) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $12, V16.S4, V5.S4 - VSHL $12, V17.S4, V6.S4 - VSHL $12, V18.S4, V7.S4 - VSHL $12, V19.S4, V4.S4 - VSRI $20, V16.S4, V5.S4 - VSRI $20, V17.S4, V6.S4 - VSRI $20, V18.S4, V7.S4 - VSRI $20, V19.S4, V4.S4 - - // V0 += V5; V15 <<<= ((V0 XOR V15), 8) - // ... - VADD V5.S4, V0.S4, V0.S4 - VADD V6.S4, V1.S4, V1.S4 - VADD V7.S4, V2.S4, V2.S4 - VADD V4.S4, V3.S4, V3.S4 - VEOR V0.B16, V15.B16, V15.B16 - VEOR V1.B16, V12.B16, V12.B16 - VEOR V2.B16, V13.B16, V13.B16 - VEOR V3.B16, V14.B16, V14.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 7) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $7, V16.S4, V5.S4 - VSHL $7, V17.S4, V6.S4 - VSHL $7, V18.S4, V7.S4 - VSHL $7, V19.S4, V4.S4 - VSRI $25, V16.S4, V5.S4 - VSRI $25, V17.S4, V6.S4 - VSRI $25, V18.S4, V7.S4 - VSRI $25, V19.S4, V4.S4 - - SUB $1, R21 - CBNZ R21, chacha - - // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] - WORD $0x4D60E950 - - // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] - WORD $0x4DFFE894 - VADD V30.S4, V12.S4, V12.S4 - VADD V16.S4, V0.S4, V0.S4 - VADD V17.S4, V1.S4, V1.S4 - VADD V18.S4, V2.S4, V2.S4 - VADD V19.S4, V3.S4, V3.S4 - // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] - WORD $0x4DFFE898 - // restore R4 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V28.S4] - WORD $0x4D40C8FC - // VLD3R (R6), [V29.S4, V30.S4, V31.S4] - WORD $0x4D40E8DD - - VADD V20.S4, V4.S4, V4.S4 - VADD V21.S4, V5.S4, V5.S4 - VADD V22.S4, V6.S4, V6.S4 - VADD V23.S4, V7.S4, V7.S4 - VADD V24.S4, V8.S4, V8.S4 - VADD V25.S4, V9.S4, V9.S4 - VADD V26.S4, V10.S4, V10.S4 - VADD V27.S4, V11.S4, V11.S4 - VADD V28.S4, V12.S4, V12.S4 - VADD V29.S4, V13.S4, V13.S4 - VADD V30.S4, V14.S4, V14.S4 - VADD V31.S4, V15.S4, V15.S4 - - VZIP1 V1.S4, V0.S4, V16.S4 - VZIP2 V1.S4, V0.S4, V17.S4 - VZIP1 V3.S4, V2.S4, V18.S4 - VZIP2 V3.S4, V2.S4, V19.S4 - VZIP1 V5.S4, V4.S4, V20.S4 - VZIP2 V5.S4, V4.S4, V21.S4 - VZIP1 V7.S4, V6.S4, V22.S4 - VZIP2 V7.S4, V6.S4, V23.S4 - VZIP1 V9.S4, V8.S4, V24.S4 - VZIP2 V9.S4, V8.S4, V25.S4 - VZIP1 V11.S4, V10.S4, V26.S4 - VZIP2 V11.S4, V10.S4, V27.S4 - VZIP1 V13.S4, V12.S4, V28.S4 - VZIP2 V13.S4, V12.S4, V29.S4 - VZIP1 V15.S4, V14.S4, V30.S4 - VZIP2 V15.S4, V14.S4, V31.S4 - VZIP1 V18.D2, V16.D2, V0.D2 - VZIP2 V18.D2, V16.D2, V4.D2 - VZIP1 V19.D2, V17.D2, V8.D2 - VZIP2 V19.D2, V17.D2, V12.D2 - VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] - - VZIP1 V22.D2, V20.D2, V1.D2 - VZIP2 V22.D2, V20.D2, V5.D2 - VZIP1 V23.D2, V21.D2, V9.D2 - VZIP2 V23.D2, V21.D2, V13.D2 - VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] - VZIP1 V26.D2, V24.D2, V2.D2 - VZIP2 V26.D2, V24.D2, V6.D2 - VZIP1 V27.D2, V25.D2, V10.D2 - VZIP2 V27.D2, V25.D2, V14.D2 - VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] - VZIP1 V30.D2, V28.D2, V3.D2 - VZIP2 V30.D2, V28.D2, V7.D2 - VZIP1 V31.D2, V29.D2, V11.D2 - VZIP2 V31.D2, V29.D2, V15.D2 - VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] - VEOR V0.B16, V16.B16, V16.B16 - VEOR V1.B16, V17.B16, V17.B16 - VEOR V2.B16, V18.B16, V18.B16 - VEOR V3.B16, V19.B16, V19.B16 - VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) - VEOR V4.B16, V20.B16, V20.B16 - VEOR V5.B16, V21.B16, V21.B16 - VEOR V6.B16, V22.B16, V22.B16 - VEOR V7.B16, V23.B16, V23.B16 - VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) - VEOR V8.B16, V24.B16, V24.B16 - VEOR V9.B16, V25.B16, V25.B16 - VEOR V10.B16, V26.B16, V26.B16 - VEOR V11.B16, V27.B16, V27.B16 - VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) - VEOR V12.B16, V28.B16, V28.B16 - VEOR V13.B16, V29.B16, V29.B16 - VEOR V14.B16, V30.B16, V30.B16 - VEOR V15.B16, V31.B16, V31.B16 - VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) - - ADD $4, R20 - MOVW R20, (R7) // update counter - - CMP R2, R12 - BGT loop - - RET - - -DATA ·constants+0x00(SB)/4, $0x61707865 -DATA ·constants+0x04(SB)/4, $0x3320646e -DATA ·constants+0x08(SB)/4, $0x79622d32 -DATA ·constants+0x0c(SB)/4, $0x6b206574 -GLOBL ·constants(SB), NOPTR|RODATA, $32 - -DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 -DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 -DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 -DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 -DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 -DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 -DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B -DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F -GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go deleted file mode 100644 index a2ecf5c3..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms -// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. -package chacha20 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - "math/bits" - - "golang.org/x/crypto/internal/subtle" -) - -const ( - // KeySize is the size of the key used by this cipher, in bytes. - KeySize = 32 - - // NonceSize is the size of the nonce used with the standard variant of this - // cipher, in bytes. - // - // Note that this is too short to be safely generated at random if the same - // key is reused more than 2³² times. - NonceSize = 12 - - // NonceSizeX is the size of the nonce used with the XChaCha20 variant of - // this cipher, in bytes. - NonceSizeX = 24 -) - -// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key -// and nonce. A *Cipher implements the cipher.Stream interface. -type Cipher struct { - // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter - // (incremented after each block), and 3 of nonce. - key [8]uint32 - counter uint32 - nonce [3]uint32 - - // The last len bytes of buf are leftover key stream bytes from the previous - // XORKeyStream invocation. The size of buf depends on how many blocks are - // computed at a time by xorKeyStreamBlocks. - buf [bufSize]byte - len int - - // overflow is set when the counter overflowed, no more blocks can be - // generated, and the next XORKeyStream call should panic. - overflow bool - - // The counter-independent results of the first round are cached after they - // are computed the first time. - precompDone bool - p1, p5, p9, p13 uint32 - p2, p6, p10, p14 uint32 - p3, p7, p11, p15 uint32 -} - -var _ cipher.Stream = (*Cipher)(nil) - -// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given -// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, -// the XChaCha20 construction will be used. It returns an error if key or nonce -// have any other length. -// -// Note that ChaCha20, like all stream ciphers, is not authenticated and allows -// attackers to silently tamper with the plaintext. For this reason, it is more -// appropriate as a building block than as a standalone encryption mechanism. -// Instead, consider using package golang.org/x/crypto/chacha20poly1305. -func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { - // This function is split into a wrapper so that the Cipher allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - c := &Cipher{} - return newUnauthenticatedCipher(c, key, nonce) -} - -func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong key size") - } - if len(nonce) == NonceSizeX { - // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a - // derived key, allowing it to operate on a nonce of 24 bytes. See - // draft-irtf-cfrg-xchacha-01, Section 2.3. - key, _ = HChaCha20(key, nonce[0:16]) - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - nonce = cNonce - } else if len(nonce) != NonceSize { - return nil, errors.New("chacha20: wrong nonce size") - } - - key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint - c.key = [8]uint32{ - binary.LittleEndian.Uint32(key[0:4]), - binary.LittleEndian.Uint32(key[4:8]), - binary.LittleEndian.Uint32(key[8:12]), - binary.LittleEndian.Uint32(key[12:16]), - binary.LittleEndian.Uint32(key[16:20]), - binary.LittleEndian.Uint32(key[20:24]), - binary.LittleEndian.Uint32(key[24:28]), - binary.LittleEndian.Uint32(key[28:32]), - } - c.nonce = [3]uint32{ - binary.LittleEndian.Uint32(nonce[0:4]), - binary.LittleEndian.Uint32(nonce[4:8]), - binary.LittleEndian.Uint32(nonce[8:12]), - } - return c, nil -} - -// The constant first 4 words of the ChaCha20 state. -const ( - j0 uint32 = 0x61707865 // expa - j1 uint32 = 0x3320646e // nd 3 - j2 uint32 = 0x79622d32 // 2-by - j3 uint32 = 0x6b206574 // te k -) - -const blockSize = 64 - -// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. -// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 -// words each round, in columnar or diagonal groups of 4 at a time. -func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { - a += b - d ^= a - d = bits.RotateLeft32(d, 16) - c += d - b ^= c - b = bits.RotateLeft32(b, 12) - a += b - d ^= a - d = bits.RotateLeft32(d, 8) - c += d - b ^= c - b = bits.RotateLeft32(b, 7) - return a, b, c, d -} - -// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will -// behave as if (64 * counter) bytes had been encrypted so far. -// -// To prevent accidental counter reuse, SetCounter panics if counter is less -// than the current value. -// -// Note that the execution time of XORKeyStream is not independent of the -// counter value. -func (s *Cipher) SetCounter(counter uint32) { - // Internally, s may buffer multiple blocks, which complicates this - // implementation slightly. When checking whether the counter has rolled - // back, we must use both s.counter and s.len to determine how many blocks - // we have already output. - outputCounter := s.counter - uint32(s.len)/blockSize - if s.overflow || counter < outputCounter { - panic("chacha20: SetCounter attempted to rollback counter") - } - - // In the general case, we set the new counter value and reset s.len to 0, - // causing the next call to XORKeyStream to refill the buffer. However, if - // we're advancing within the existing buffer, we can save work by simply - // setting s.len. - if counter < s.counter { - s.len = int(s.counter-counter) * blockSize - } else { - s.counter = counter - s.len = 0 - } -} - -// XORKeyStream XORs each byte in the given slice with a byte from the -// cipher's key stream. Dst and src must overlap entirely or not at all. -// -// If len(dst) < len(src), XORKeyStream will panic. It is acceptable -// to pass a dst bigger than src, and in that case, XORKeyStream will -// only update dst[:len(src)] and will not touch the rest of dst. -// -// Multiple calls to XORKeyStream behave as if the concatenation of -// the src buffers was passed in a single run. That is, Cipher -// maintains state and does not reset at each XORKeyStream call. -func (s *Cipher) XORKeyStream(dst, src []byte) { - if len(src) == 0 { - return - } - if len(dst) < len(src) { - panic("chacha20: output smaller than input") - } - dst = dst[:len(src)] - if subtle.InexactOverlap(dst, src) { - panic("chacha20: invalid buffer overlap") - } - - // First, drain any remaining key stream from a previous XORKeyStream. - if s.len != 0 { - keyStream := s.buf[bufSize-s.len:] - if len(src) < len(keyStream) { - keyStream = keyStream[:len(src)] - } - _ = src[len(keyStream)-1] // bounds check elimination hint - for i, b := range keyStream { - dst[i] = src[i] ^ b - } - s.len -= len(keyStream) - dst, src = dst[len(keyStream):], src[len(keyStream):] - } - if len(src) == 0 { - return - } - - // If we'd need to let the counter overflow and keep generating output, - // panic immediately. If instead we'd only reach the last block, remember - // not to generate any more output after the buffer is drained. - numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize - if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { - panic("chacha20: counter overflow") - } else if uint64(s.counter)+numBlocks == 1<<32 { - s.overflow = true - } - - // xorKeyStreamBlocks implementations expect input lengths that are a - // multiple of bufSize. Platform-specific ones process multiple blocks at a - // time, so have bufSizes that are a multiple of blockSize. - - full := len(src) - len(src)%bufSize - if full > 0 { - s.xorKeyStreamBlocks(dst[:full], src[:full]) - } - dst, src = dst[full:], src[full:] - - // If using a multi-block xorKeyStreamBlocks would overflow, use the generic - // one that does one block at a time. - const blocksPerBuf = bufSize / blockSize - if uint64(s.counter)+blocksPerBuf > 1<<32 { - s.buf = [bufSize]byte{} - numBlocks := (len(src) + blockSize - 1) / blockSize - buf := s.buf[bufSize-numBlocks*blockSize:] - copy(buf, src) - s.xorKeyStreamBlocksGeneric(buf, buf) - s.len = len(buf) - copy(dst, buf) - return - } - - // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and - // keep the leftover keystream for the next XORKeyStream invocation. - if len(src) > 0 { - s.buf = [bufSize]byte{} - copy(s.buf[:], src) - s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) - s.len = bufSize - copy(dst, s.buf[:]) - } -} - -func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { - if len(dst) != len(src) || len(dst)%blockSize != 0 { - panic("chacha20: internal error: wrong dst and/or src length") - } - - // To generate each block of key stream, the initial cipher state - // (represented below) is passed through 20 rounds of shuffling, - // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) - // or by diagonals (like 1, 6, 11, 12). - // - // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc - // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk - // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk - // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn - // - // c=constant k=key b=blockcount n=nonce - var ( - c0, c1, c2, c3 = j0, j1, j2, j3 - c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] - c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] - _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] - ) - - // Three quarters of the first round don't depend on the counter, so we can - // calculate them here, and reuse them for multiple blocks in the loop, and - // for future XORKeyStream invocations. - if !s.precompDone { - s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) - s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) - s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) - s.precompDone = true - } - - // A condition of len(src) > 0 would be sufficient, but this also - // acts as a bounds check elimination hint. - for len(src) >= 64 && len(dst) >= 64 { - // The remainder of the first column round. - fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) - - // The second diagonal round. - x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) - x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) - x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) - x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) - - // The remaining 18 rounds. - for i := 0; i < 9; i++ { - // Column round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Diagonal round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - // Add back the initial state to generate the key stream, then - // XOR the key stream with the source and write out the result. - addXor(dst[0:4], src[0:4], x0, c0) - addXor(dst[4:8], src[4:8], x1, c1) - addXor(dst[8:12], src[8:12], x2, c2) - addXor(dst[12:16], src[12:16], x3, c3) - addXor(dst[16:20], src[16:20], x4, c4) - addXor(dst[20:24], src[20:24], x5, c5) - addXor(dst[24:28], src[24:28], x6, c6) - addXor(dst[28:32], src[28:32], x7, c7) - addXor(dst[32:36], src[32:36], x8, c8) - addXor(dst[36:40], src[36:40], x9, c9) - addXor(dst[40:44], src[40:44], x10, c10) - addXor(dst[44:48], src[44:48], x11, c11) - addXor(dst[48:52], src[48:52], x12, s.counter) - addXor(dst[52:56], src[52:56], x13, c13) - addXor(dst[56:60], src[56:60], x14, c14) - addXor(dst[60:64], src[60:64], x15, c15) - - s.counter += 1 - - src, dst = src[blockSize:], dst[blockSize:] - } -} - -// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes -// key and a 16 bytes nonce. It returns an error if key or nonce have any other -// length. It is used as part of the XChaCha20 construction. -func HChaCha20(key, nonce []byte) ([]byte, error) { - // This function is split into a wrapper so that the slice allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - out := make([]byte, 32) - return hChaCha20(out, key, nonce) -} - -func hChaCha20(out, key, nonce []byte) ([]byte, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong HChaCha20 key size") - } - if len(nonce) != 16 { - return nil, errors.New("chacha20: wrong HChaCha20 nonce size") - } - - x0, x1, x2, x3 := j0, j1, j2, j3 - x4 := binary.LittleEndian.Uint32(key[0:4]) - x5 := binary.LittleEndian.Uint32(key[4:8]) - x6 := binary.LittleEndian.Uint32(key[8:12]) - x7 := binary.LittleEndian.Uint32(key[12:16]) - x8 := binary.LittleEndian.Uint32(key[16:20]) - x9 := binary.LittleEndian.Uint32(key[20:24]) - x10 := binary.LittleEndian.Uint32(key[24:28]) - x11 := binary.LittleEndian.Uint32(key[28:32]) - x12 := binary.LittleEndian.Uint32(nonce[0:4]) - x13 := binary.LittleEndian.Uint32(nonce[4:8]) - x14 := binary.LittleEndian.Uint32(nonce[8:12]) - x15 := binary.LittleEndian.Uint32(nonce[12:16]) - - for i := 0; i < 10; i++ { - // Diagonal round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Column round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - _ = out[31] // bounds check elimination hint - binary.LittleEndian.PutUint32(out[0:4], x0) - binary.LittleEndian.PutUint32(out[4:8], x1) - binary.LittleEndian.PutUint32(out[8:12], x2) - binary.LittleEndian.PutUint32(out[12:16], x3) - binary.LittleEndian.PutUint32(out[16:20], x12) - binary.LittleEndian.PutUint32(out[20:24], x13) - binary.LittleEndian.PutUint32(out[24:28], x14) - binary.LittleEndian.PutUint32(out[28:32], x15) - return out, nil -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go deleted file mode 100644 index 025b4989..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego - -package chacha20 - -const bufSize = blockSize - -func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { - s.xorKeyStreamBlocksGeneric(dst, src) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go deleted file mode 100644 index da420b2e..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s deleted file mode 100644 index 5c0fed26..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on CRYPTOGAMS code with the following comment: -// # ==================================================================== -// # Written by Andy Polyakov for the OpenSSL -// # project. The module is, however, dual licensed under OpenSSL and -// # CRYPTOGAMS licenses depending on where you obtain it. For further -// # details see http://www.openssl.org/~appro/cryptogams/. -// # ==================================================================== - -// Code for the perl script that generates the ppc64 assembler -// can be found in the cryptogams repository at the link below. It is based on -// the original from openssl. - -// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 - -// The differences in this and the original implementation are -// due to the calling conventions and initialization of constants. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -#define OUT R3 -#define INP R4 -#define LEN R5 -#define KEY R6 -#define CNT R7 -#define TMP R15 - -#define CONSTBASE R16 -#define BLOCKS R17 - -DATA consts<>+0x00(SB)/8, $0x3320646e61707865 -DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 -DATA consts<>+0x10(SB)/8, $0x0000000000000001 -DATA consts<>+0x18(SB)/8, $0x0000000000000000 -DATA consts<>+0x20(SB)/8, $0x0000000000000004 -DATA consts<>+0x28(SB)/8, $0x0000000000000000 -DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d -DATA consts<>+0x38(SB)/8, $0x0203000106070405 -DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c -DATA consts<>+0x48(SB)/8, $0x0102030005060704 -DATA consts<>+0x50(SB)/8, $0x6170786561707865 -DATA consts<>+0x58(SB)/8, $0x6170786561707865 -DATA consts<>+0x60(SB)/8, $0x3320646e3320646e -DATA consts<>+0x68(SB)/8, $0x3320646e3320646e -DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x90(SB)/8, $0x0000000100000000 -DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 - -//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) -TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 - MOVD out+0(FP), OUT - MOVD inp+8(FP), INP - MOVD len+16(FP), LEN - MOVD key+24(FP), KEY - MOVD counter+32(FP), CNT - - // Addressing for constants - MOVD $consts<>+0x00(SB), CONSTBASE - MOVD $16, R8 - MOVD $32, R9 - MOVD $48, R10 - MOVD $64, R11 - SRD $6, LEN, BLOCKS - // V16 - LXVW4X (CONSTBASE)(R0), VS48 - ADD $80,CONSTBASE - - // Load key into V17,V18 - LXVW4X (KEY)(R0), VS49 - LXVW4X (KEY)(R8), VS50 - - // Load CNT, NONCE into V19 - LXVW4X (CNT)(R0), VS51 - - // Clear V27 - VXOR V27, V27, V27 - - // V28 - LXVW4X (CONSTBASE)(R11), VS60 - - // splat slot from V19 -> V26 - VSPLTW $0, V19, V26 - - VSLDOI $4, V19, V27, V19 - VSLDOI $12, V27, V19, V19 - - VADDUWM V26, V28, V26 - - MOVD $10, R14 - MOVD R14, CTR - -loop_outer_vsx: - // V0, V1, V2, V3 - LXVW4X (R0)(CONSTBASE), VS32 - LXVW4X (R8)(CONSTBASE), VS33 - LXVW4X (R9)(CONSTBASE), VS34 - LXVW4X (R10)(CONSTBASE), VS35 - - // splat values from V17, V18 into V4-V11 - VSPLTW $0, V17, V4 - VSPLTW $1, V17, V5 - VSPLTW $2, V17, V6 - VSPLTW $3, V17, V7 - VSPLTW $0, V18, V8 - VSPLTW $1, V18, V9 - VSPLTW $2, V18, V10 - VSPLTW $3, V18, V11 - - // VOR - VOR V26, V26, V12 - - // splat values from V19 -> V13, V14, V15 - VSPLTW $1, V19, V13 - VSPLTW $2, V19, V14 - VSPLTW $3, V19, V15 - - // splat const values - VSPLTISW $-16, V27 - VSPLTISW $12, V28 - VSPLTISW $8, V29 - VSPLTISW $7, V30 - -loop_vsx: - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V28, V4 - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V30, V4 - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - VRLW V4, V28, V4 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - VRLW V4, V30, V4 - BC 16, LT, loop_vsx - - VADDUWM V12, V26, V12 - - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 - - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 - - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 - - XXPERMDI VS32, VS34, $0, VS33 - XXPERMDI VS32, VS34, $3, VS35 - XXPERMDI VS59, VS60, $0, VS32 - XXPERMDI VS59, VS60, $3, VS34 - - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 - - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 - - XXPERMDI VS36, VS38, $0, VS37 - XXPERMDI VS36, VS38, $3, VS39 - XXPERMDI VS61, VS62, $0, VS36 - XXPERMDI VS61, VS62, $3, VS38 - - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 - - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 - - XXPERMDI VS40, VS42, $0, VS41 - XXPERMDI VS40, VS42, $3, VS43 - XXPERMDI VS59, VS60, $0, VS40 - XXPERMDI VS59, VS60, $3, VS42 - - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 - - VSPLTISW $4, V27 - VADDUWM V26, V27, V26 - - XXPERMDI VS44, VS46, $0, VS45 - XXPERMDI VS44, VS46, $3, VS47 - XXPERMDI VS61, VS62, $0, VS44 - XXPERMDI VS61, VS62, $3, VS46 - - VADDUWM V0, V16, V0 - VADDUWM V4, V17, V4 - VADDUWM V8, V18, V8 - VADDUWM V12, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - // Bottom of loop - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V1, V16, V0 - VADDUWM V5, V17, V4 - VADDUWM V9, V18, V8 - VADDUWM V13, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - VXOR V27, V0, V27 - - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(V10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V2, V16, V0 - VADDUWM V6, V17, V4 - VADDUWM V10, V18, V8 - VADDUWM V14, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V3, V16, V0 - VADDUWM V7, V17, V4 - VADDUWM V11, V18, V8 - VADDUWM V15, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - - MOVD $10, R14 - MOVD R14, CTR - BNE loop_outer_vsx - -done_vsx: - // Increment counter by number of 64 byte blocks - MOVD (CNT), R14 - ADD BLOCKS, R14 - MOVD R14, (CNT) - RET - -tail_vsx: - ADD $32, R1, R11 - MOVD LEN, CTR - - // Save values on stack to copy from - STXVW4X VS32, (R11)(R0) - STXVW4X VS36, (R11)(R8) - STXVW4X VS40, (R11)(R9) - STXVW4X VS44, (R11)(R10) - ADD $-1, R11, R12 - ADD $-1, INP - ADD $-1, OUT - -looptail_vsx: - // Copying the result to OUT - // in bytes. - MOVBZU 1(R12), KEY - MOVBZU 1(INP), TMP - XOR KEY, TMP, KEY - MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx - - // Clear the stack values - STXVW4X VS48, (R11)(R0) - STXVW4X VS48, (R11)(R8) - STXVW4X VS48, (R11)(R9) - STXVW4X VS48, (R11)(R10) - BR done_vsx diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go deleted file mode 100644 index c5898db4..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package chacha20 - -import "golang.org/x/sys/cpu" - -var haveAsm = cpu.S390X.HasVX - -const bufSize = 256 - -// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only -// be called when the vector facility is available. Implementation in asm_s390x.s. -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - if cpu.S390X.HasVX { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) - } else { - c.xorKeyStreamBlocksGeneric(dst, src) - } -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s deleted file mode 100644 index f3ef5a01..00000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "go_asm.h" -#include "textflag.h" - -// This is an implementation of the ChaCha20 encryption algorithm as -// specified in RFC 7539. It uses vector instructions to compute -// 4 keystream blocks in parallel (256 bytes) which are then XORed -// with the bytes in the input slice. - -GLOBL ·constants<>(SB), RODATA|NOPTR, $32 -// BSWAP: swap bytes in each 4-byte element -DATA ·constants<>+0x00(SB)/4, $0x03020100 -DATA ·constants<>+0x04(SB)/4, $0x07060504 -DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 -DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c -// J0: [j0, j1, j2, j3] -DATA ·constants<>+0x10(SB)/4, $0x61707865 -DATA ·constants<>+0x14(SB)/4, $0x3320646e -DATA ·constants<>+0x18(SB)/4, $0x79622d32 -DATA ·constants<>+0x1c(SB)/4, $0x6b206574 - -#define BSWAP V5 -#define J0 V6 -#define KEY0 V7 -#define KEY1 V8 -#define NONCE V9 -#define CTR V10 -#define M0 V11 -#define M1 V12 -#define M2 V13 -#define M3 V14 -#define INC V15 -#define X0 V16 -#define X1 V17 -#define X2 V18 -#define X3 V19 -#define X4 V20 -#define X5 V21 -#define X6 V22 -#define X7 V23 -#define X8 V24 -#define X9 V25 -#define X10 V26 -#define X11 V27 -#define X12 V28 -#define X13 V29 -#define X14 V30 -#define X15 V31 - -#define NUM_ROUNDS 20 - -#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $16, a2, a2 \ - VERLLF $16, b2, b2 \ - VERLLF $16, c2, c2 \ - VERLLF $16, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $12, a1, a1 \ - VERLLF $12, b1, b1 \ - VERLLF $12, c1, c1 \ - VERLLF $12, d1, d1 \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $8, a2, a2 \ - VERLLF $8, b2, b2 \ - VERLLF $8, c2, c2 \ - VERLLF $8, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $7, a1, a1 \ - VERLLF $7, b1, b1 \ - VERLLF $7, c1, c1 \ - VERLLF $7, d1, d1 - -#define PERMUTE(mask, v0, v1, v2, v3) \ - VPERM v0, v0, mask, v0 \ - VPERM v1, v1, mask, v1 \ - VPERM v2, v2, mask, v2 \ - VPERM v3, v3, mask, v3 - -#define ADDV(x, v0, v1, v2, v3) \ - VAF x, v0, v0 \ - VAF x, v1, v1 \ - VAF x, v2, v2 \ - VAF x, v3, v3 - -#define XORV(off, dst, src, v0, v1, v2, v3) \ - VLM off(src), M0, M3 \ - PERMUTE(BSWAP, v0, v1, v2, v3) \ - VX v0, M0, M0 \ - VX v1, M1, M1 \ - VX v2, M2, M2 \ - VX v3, M3, M3 \ - VSTM M0, M3, off(dst) - -#define SHUFFLE(a, b, c, d, t, u, v, w) \ - VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} - VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} - VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} - VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} - VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} - VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} - VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} - VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD $·constants<>(SB), R1 - MOVD dst+0(FP), R2 // R2=&dst[0] - LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) - MOVD key+48(FP), R5 // R5=key - MOVD nonce+56(FP), R6 // R6=nonce - MOVD counter+64(FP), R7 // R7=counter - - // load BSWAP and J0 - VLM (R1), BSWAP, J0 - - // setup - MOVD $95, R0 - VLM (R5), KEY0, KEY1 - VLL R0, (R6), NONCE - VZERO M0 - VLEIB $7, $32, M0 - VSRLB M0, NONCE, NONCE - - // initialize counter values - VLREPF (R7), CTR - VZERO INC - VLEIF $1, $1, INC - VLEIF $2, $2, INC - VLEIF $3, $3, INC - VAF INC, CTR, CTR - VREPIF $4, INC - -chacha: - VREPF $0, J0, X0 - VREPF $1, J0, X1 - VREPF $2, J0, X2 - VREPF $3, J0, X3 - VREPF $0, KEY0, X4 - VREPF $1, KEY0, X5 - VREPF $2, KEY0, X6 - VREPF $3, KEY0, X7 - VREPF $0, KEY1, X8 - VREPF $1, KEY1, X9 - VREPF $2, KEY1, X10 - VREPF $3, KEY1, X11 - VLR CTR, X12 - VREPF $1, NONCE, X13 - VREPF $2, NONCE, X14 - VREPF $3, NONCE, X15 - - MOVD $(NUM_ROUNDS/2), R1 - -loop: - ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) - ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) - - ADD $-1, R1 - BNE loop - - // decrement length - ADD $-256, R4 - - // rearrange vectors - SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) - ADDV(J0, X0, X1, X2, X3) - SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) - ADDV(KEY0, X4, X5, X6, X7) - SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) - ADDV(KEY1, X8, X9, X10, X11) - VAF CTR, X12, X12 - SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) - ADDV(NONCE, X12, X13, X14, X15) - - // increment counters - VAF INC, CTR, CTR - - // xor keystream with plaintext - XORV(0*64, R2, R3, X0, X4, X8, X12) - XORV(1*64, R2, R3, X1, X5, X9, X13) - XORV(2*64, R2, R3, X2, X6, X10, X14) - XORV(3*64, R2, R3, X3, X7, X11, X15) - - // increment pointers - MOVD $256(R2), R2 - MOVD $256(R3), R3 - - CMPBNE R4, $0, chacha - - VSTEF $0, CTR, (R7) - RET diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go deleted file mode 100644 index c2d04851..00000000 --- a/vendor/golang.org/x/crypto/chacha20/xor.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found src the LICENSE file. - -package chacha20 - -import "runtime" - -// Platforms that have fast unaligned 32-bit little endian accesses. -const unaligned = runtime.GOARCH == "386" || - runtime.GOARCH == "amd64" || - runtime.GOARCH == "arm64" || - runtime.GOARCH == "ppc64le" || - runtime.GOARCH == "s390x" - -// addXor reads a little endian uint32 from src, XORs it with (a + b) and -// places the result in little endian byte order in dst. -func addXor(dst, src []byte, a, b uint32) { - _, _ = src[3], dst[3] // bounds check elimination hint - if unaligned { - // The compiler should optimize this code into - // 32-bit unaligned little endian loads and stores. - // TODO: delete once the compiler does a reliably - // good job with the generic code below. - // See issue #25111 for more details. - v := uint32(src[0]) - v |= uint32(src[1]) << 8 - v |= uint32(src[2]) << 16 - v |= uint32(src[3]) << 24 - v ^= a + b - dst[0] = byte(v) - dst[1] = byte(v >> 8) - dst[2] = byte(v >> 16) - dst[3] = byte(v >> 24) - } else { - a += b - dst[0] = src[0] ^ byte(a) - dst[1] = src[1] ^ byte(a>>8) - dst[2] = src[2] ^ byte(a>>16) - dst[3] = src[3] ^ byte(a>>24) - } -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go deleted file mode 100644 index 0d7bac3f..00000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its -// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and -// draft-irtf-cfrg-xchacha-01. -package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" - -import ( - "crypto/cipher" - "errors" -) - -const ( - // KeySize is the size of the key used by this AEAD, in bytes. - KeySize = 32 - - // NonceSize is the size of the nonce used with the standard variant of this - // AEAD, in bytes. - // - // Note that this is too short to be safely generated at random if the same - // key is reused more than 2³² times. - NonceSize = 12 - - // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 - // variant of this AEAD, in bytes. - NonceSizeX = 24 -) - -type chacha20poly1305 struct { - key [KeySize]byte -} - -// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. -func New(key []byte) (cipher.AEAD, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20poly1305: bad key length") - } - ret := new(chacha20poly1305) - copy(ret.key[:], key) - return ret, nil -} - -func (c *chacha20poly1305) NonceSize() int { - return NonceSize -} - -func (c *chacha20poly1305) Overhead() int { - return 16 -} - -func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if len(nonce) != NonceSize { - panic("chacha20poly1305: bad nonce length passed to Seal") - } - - if uint64(len(plaintext)) > (1<<38)-64 { - panic("chacha20poly1305: plaintext too large") - } - - return c.seal(dst, nonce, plaintext, additionalData) -} - -var errOpen = errors.New("chacha20poly1305: message authentication failed") - -func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if len(nonce) != NonceSize { - panic("chacha20poly1305: bad nonce length passed to Open") - } - if len(ciphertext) < 16 { - return nil, errOpen - } - if uint64(len(ciphertext)) > (1<<38)-48 { - panic("chacha20poly1305: ciphertext too large") - } - - return c.open(dst, nonce, ciphertext, additionalData) -} - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go deleted file mode 100644 index 25959b9a..00000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package chacha20poly1305 - -import ( - "encoding/binary" - - "golang.org/x/crypto/internal/subtle" - "golang.org/x/sys/cpu" -) - -//go:noescape -func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool - -//go:noescape -func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) - -var ( - useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 -) - -// setupState writes a ChaCha20 input matrix to state. See -// https://tools.ietf.org/html/rfc7539#section-2.3. -func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { - state[0] = 0x61707865 - state[1] = 0x3320646e - state[2] = 0x79622d32 - state[3] = 0x6b206574 - - state[4] = binary.LittleEndian.Uint32(key[0:4]) - state[5] = binary.LittleEndian.Uint32(key[4:8]) - state[6] = binary.LittleEndian.Uint32(key[8:12]) - state[7] = binary.LittleEndian.Uint32(key[12:16]) - state[8] = binary.LittleEndian.Uint32(key[16:20]) - state[9] = binary.LittleEndian.Uint32(key[20:24]) - state[10] = binary.LittleEndian.Uint32(key[24:28]) - state[11] = binary.LittleEndian.Uint32(key[28:32]) - - state[12] = 0 - state[13] = binary.LittleEndian.Uint32(nonce[0:4]) - state[14] = binary.LittleEndian.Uint32(nonce[4:8]) - state[15] = binary.LittleEndian.Uint32(nonce[8:12]) -} - -func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { - if !cpu.X86.HasSSSE3 { - return c.sealGeneric(dst, nonce, plaintext, additionalData) - } - - var state [16]uint32 - setupState(&state, &c.key, nonce) - - ret, out := sliceForAppend(dst, len(plaintext)+16) - if subtle.InexactOverlap(out, plaintext) { - panic("chacha20poly1305: invalid buffer overlap") - } - chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) - return ret -} - -func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if !cpu.X86.HasSSSE3 { - return c.openGeneric(dst, nonce, ciphertext, additionalData) - } - - var state [16]uint32 - setupState(&state, &c.key, nonce) - - ciphertext = ciphertext[:len(ciphertext)-16] - ret, out := sliceForAppend(dst, len(ciphertext)) - if subtle.InexactOverlap(out, ciphertext) { - panic("chacha20poly1305: invalid buffer overlap") - } - if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { - for i := range out { - out[i] = 0 - } - return nil, errOpen - } - - return ret, nil -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s deleted file mode 100644 index 867c181a..00000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ /dev/null @@ -1,2696 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" -// General register allocation -#define oup DI -#define inp SI -#define inl BX -#define adp CX // free to reuse, after we hash the additional data -#define keyp R8 // free to reuse, when we copy the key to stack -#define itr2 R9 // general iterator -#define itr1 CX // general iterator -#define acc0 R10 -#define acc1 R11 -#define acc2 R12 -#define t0 R13 -#define t1 R14 -#define t2 R15 -#define t3 R8 -// Register and stack allocation for the SSE code -#define rStore (0*16)(BP) -#define sStore (1*16)(BP) -#define state1Store (2*16)(BP) -#define state2Store (3*16)(BP) -#define tmpStore (4*16)(BP) -#define ctr0Store (5*16)(BP) -#define ctr1Store (6*16)(BP) -#define ctr2Store (7*16)(BP) -#define ctr3Store (8*16)(BP) -#define A0 X0 -#define A1 X1 -#define A2 X2 -#define B0 X3 -#define B1 X4 -#define B2 X5 -#define C0 X6 -#define C1 X7 -#define C2 X8 -#define D0 X9 -#define D1 X10 -#define D2 X11 -#define T0 X12 -#define T1 X13 -#define T2 X14 -#define T3 X15 -#define A3 T0 -#define B3 T1 -#define C3 T2 -#define D3 T3 -// Register and stack allocation for the AVX2 code -#define rsStoreAVX2 (0*32)(BP) -#define state1StoreAVX2 (1*32)(BP) -#define state2StoreAVX2 (2*32)(BP) -#define ctr0StoreAVX2 (3*32)(BP) -#define ctr1StoreAVX2 (4*32)(BP) -#define ctr2StoreAVX2 (5*32)(BP) -#define ctr3StoreAVX2 (6*32)(BP) -#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack -#define AA0 Y0 -#define AA1 Y5 -#define AA2 Y6 -#define AA3 Y7 -#define BB0 Y14 -#define BB1 Y9 -#define BB2 Y10 -#define BB3 Y11 -#define CC0 Y12 -#define CC1 Y13 -#define CC2 Y8 -#define CC3 Y15 -#define DD0 Y4 -#define DD1 Y1 -#define DD2 Y2 -#define DD3 Y3 -#define TT0 DD3 -#define TT1 AA3 -#define TT2 BB3 -#define TT3 CC3 -// ChaCha20 constants -DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 -DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 -// <<< 16 with PSHUFB -DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A -// <<< 8 with PSHUFB -DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B - -DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 -DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 - -DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 -DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 -// Poly1305 key clamp -DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA ·sseIncMask<>+0x00(SB)/8, $0x1 -DATA ·sseIncMask<>+0x08(SB)/8, $0x0 -// To load/store the last < 16 bytes in a buffer -DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff -DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff - -GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 -GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 -GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 -// No PALIGNR in Go ASM yet (but VPALIGNR is present). -#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 -#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 -#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 -#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 -#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 -#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 -#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 -#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 -#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 -#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 -#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 -#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 -#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 -#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 -#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 -#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 -#define shiftC0Right shiftC0Left -#define shiftC1Right shiftC1Left -#define shiftC2Right shiftC2Left -#define shiftC3Right shiftC3Left -#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 -#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 -#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 -#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 -// Some macros -#define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B - -#define chachaQR_AVX2(A, B, C, D, T) \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B - -#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 -#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX -#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 -#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 - -#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 -#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 - -#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage -#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage -// ---------------------------------------------------------------------------- -TEXT polyHashADInternal<>(SB), NOSPLIT, $0 - // adp points to beginning of additional data - // itr2 holds ad length - XORQ acc0, acc0 - XORQ acc1, acc1 - XORQ acc2, acc2 - CMPQ itr2, $13 - JNE hashADLoop - -openFastTLSAD: - // Special treatment for the TLS case of 13 bytes - MOVQ (adp), acc0 - MOVQ 5(adp), acc1 - SHRQ $24, acc1 - MOVQ $1, acc2 - polyMul - RET - -hashADLoop: - // Hash in 16 byte chunks - CMPQ itr2, $16 - JB hashADTail - polyAdd(0(adp)) - LEAQ (1*16)(adp), adp - SUBQ $16, itr2 - polyMul - JMP hashADLoop - -hashADTail: - CMPQ itr2, $0 - JE hashADDone - - // Hash last < 16 byte tail - XORQ t0, t0 - XORQ t1, t1 - XORQ t2, t2 - ADDQ itr2, adp - -hashADTailLoop: - SHLQ $8, t0, t1 - SHLQ $8, t0 - MOVB -1(adp), t2 - XORQ t2, t0 - DECQ adp - DECQ itr2 - JNE hashADTailLoop - -hashADTailFinish: - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Finished AD -hashADDone: - RET - -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Open(dst, key, src, ad []byte) bool -TEXT ·chacha20Poly1305Open(SB), 0, $288-97 - // For aligned stack access - MOVQ SP, BP - ADDQ $32, BP - ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - // Check for AVX2 support - CMPB ·useAVX2(SB), $1 - JE chacha20Poly1305Open_AVX2 - - // Special optimization, for very short buffers - CMPQ inl, $128 - JBE openSSE128 // About 16% faster - - // For long buffers, prepare the poly key first - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - MOVO D0, T1 - - // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - MOVO D0, ctr3Store - MOVQ $10, itr2 - -openSSEPreparePolyKey: - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - DECQ itr2 - JNE openSSEPreparePolyKey - - // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 - - // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore; MOVO B0, sStore - - // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - -openSSEMainLoop: - CMPQ inl, $256 - JB openSSEMainLoopDone - - // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - - // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 - MOVQ $4, itr1 - MOVQ inp, itr2 - -openSSEInternalLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(itr2)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(itr2), itr2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr1 - JGE openSSEInternalLoop - - polyAdd(0(itr2)) - polyMul - LEAQ (2*8)(itr2), itr2 - - CMPQ itr1, $-6 - JG openSSEInternalLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - - // Load - xor - store - MOVO D3, tmpStore - MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) - MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) - MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) - MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) - MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) - MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) - MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) - MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) - MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) - MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) - MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) - MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) - MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) - MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) - LEAQ 256(inp), inp - LEAQ 256(oup), oup - SUBQ $256, inl - JMP openSSEMainLoop - -openSSEMainLoopDone: - // Handle the various tail sizes efficiently - TESTQ inl, inl - JE openSSEFinalize - CMPQ inl, $64 - JBE openSSETail64 - CMPQ inl, $128 - JBE openSSETail128 - CMPQ inl, $192 - JBE openSSETail192 - JMP openSSETail256 - -openSSEFinalize: - // Hash in the PT, AAD lengths - ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 - polyMul - - // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 - - // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 - - // Finally, constant time compare to the tag at the end of the message - XORQ AX, AX - MOVQ $1, DX - XORQ (0*8)(inp), acc0 - XORQ (1*8)(inp), acc1 - ORQ acc1, acc0 - CMOVQEQ DX, AX - - // Return true iff tags are equal - MOVB AX, ret+96(FP) - RET - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 129 bytes -openSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 - -openSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE openSSE128InnerCipherLoop - - // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - - // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore; MOVOU B0, sStore - - // Hash - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - -openSSE128Open: - CMPQ inl, $16 - JB openSSETail16 - SUBQ $16, inl - - // Load for hashing - polyAdd(0(inp)) - - // Load for decryption - MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - polyMul - - // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 - JMP openSSE128Open - -openSSETail16: - TESTQ inl, inl - JE openSSEFinalize - - // We can safely load the CT from the end, because it is padded with the MAC - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVOU (inp), T0 - ADDQ inl, inp - PAND -16(t0)(itr2*1), T0 - MOVO T0, 0+tmpStore - MOVQ T0, t0 - MOVQ 8+tmpStore, t1 - PXOR A1, T0 - - // We can only store one byte at a time, since plaintext can be shorter than 16 bytes -openSSETail16Store: - MOVQ T0, t3 - MOVB t3, (oup) - PSRLDQ $1, T0 - INCQ oup - DECQ inl - JNE openSSETail16Store - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - JMP openSSEFinalize - -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of ciphertext -openSSETail64: - // Need to decrypt up to 64 bytes - prepare single block - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - XORQ itr2, itr2 - MOVQ inl, itr1 - CMPQ itr1, $16 - JB openSSETail64LoopB - -openSSETail64LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - SUBQ $16, itr1 - -openSSETail64LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - - CMPQ itr1, $16 - JAE openSSETail64LoopA - - CMPQ itr2, $160 - JNE openSSETail64LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 - -openSSETail64DecLoop: - CMPQ inl, $16 - JB openSSETail64DecLoopDone - SUBQ $16, inl - MOVOU (inp), T0 - PXOR T0, A0 - MOVOU A0, (oup) - LEAQ 16(inp), inp - LEAQ 16(oup), oup - MOVO B0, A0 - MOVO C0, B0 - MOVO D0, C0 - JMP openSSETail64DecLoop - -openSSETail64DecLoopDone: - MOVO A0, A1 - JMP openSSETail16 - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext -openSSETail128: - // Need to decrypt up to 128 bytes - prepare two blocks - MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - -openSSETail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - -openSSETail128LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - CMPQ itr2, itr1 - JB openSSETail128LoopA - - CMPQ itr2, $160 - JNE openSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr1Store, D0; PADDL ctr0Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - - SUBQ $64, inl - LEAQ 64(inp), inp - LEAQ 64(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of ciphertext -openSSETail192: - // Need to decrypt up to 192 bytes - prepare three blocks - MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store - MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store - - MOVQ inl, itr1 - MOVQ $160, itr2 - CMPQ itr1, $160 - CMOVQGT itr2, itr1 - ANDQ $-16, itr1 - XORQ itr2, itr2 - -openSSLTail192LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - -openSSLTail192LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - CMPQ itr2, itr1 - JB openSSLTail192LoopA - - CMPQ itr2, $160 - JNE openSSLTail192LoopB - - CMPQ inl, $176 - JB openSSLTail192Store - - polyAdd(160(inp)) - polyMul - - CMPQ inl, $192 - JB openSSLTail192Store - - polyAdd(176(inp)) - polyMul - -openSSLTail192Store: - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 - MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) - - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - SUBQ $128, inl - LEAQ 128(inp), inp - LEAQ 128(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext -openSSETail256: - // Need to decrypt up to 256 bytes - prepare four blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - XORQ itr2, itr2 - -openSSETail256Loop: - // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication - polyAdd(0(inp)(itr2*1)) - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulStage3 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - ADDQ $2*8, itr2 - CMPQ itr2, $160 - JB openSSETail256Loop - MOVQ inl, itr1 - ANDQ $-16, itr1 - -openSSETail256HashLoop: - polyAdd(0(inp)(itr2*1)) - polyMul - ADDQ $2*8, itr2 - CMPQ itr2, itr1 - JB openSSETail256HashLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore - - // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - LEAQ 192(inp), inp - LEAQ 192(oup), oup - SUBQ $192, inl - MOVO A3, A0 - MOVO B3, B0 - MOVO C3, C0 - MOVO tmpStore, D0 - - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- -chacha20Poly1305Open_AVX2: - VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 - - // Special optimization, for very short buffers - CMPQ inl, $192 - JBE openAVX2192 - CMPQ inl, $320 - JBE openAVX2320 - - // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, state2StoreAVX2 - VMOVDQA DD0, ctr3StoreAVX2 - MOVQ $10, itr2 - -openAVX2PreparePolyKey: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - DECQ itr2 - JNE openAVX2PreparePolyKey - - VPADDD ·chacha20Constants<>(SB), AA0, AA0 - VPADDD state1StoreAVX2, BB0, BB0 - VPADDD state2StoreAVX2, CC0, CC0 - VPADDD ctr3StoreAVX2, DD0, DD0 - - VPERM2I128 $0x02, AA0, BB0, TT0 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for the first 64 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - - // Hash AD + first 64 bytes - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 - -openAVX2InitialHash64: - polyAdd(0(inp)(itr1*1)) - polyMulAVX2 - ADDQ $16, itr1 - CMPQ itr1, $64 - JNE openAVX2InitialHash64 - - // Decrypt the first 64 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), BB0, BB0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU BB0, (1*32)(oup) - LEAQ (2*32)(inp), inp - LEAQ (2*32)(oup), oup - SUBQ $64, inl - -openAVX2MainLoop: - CMPQ inl, $512 - JB openAVX2MainLoopDone - - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - -openAVX2InternalLoop: - // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications - // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext - polyAdd(0*8(inp)(itr1*1)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(inp)(itr1*1)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(inp)(itr1*1)) - LEAQ (6*8)(itr1), itr1 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - CMPQ itr1, $480 - JNE openAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - - // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(480(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - // and here - polyAdd(496(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - LEAQ (32*16)(oup), oup - SUBQ $(32*16), inl - JMP openAVX2MainLoop - -openAVX2MainLoopDone: - // Handle the various tail sizes efficiently - TESTQ inl, inl - JE openSSEFinalize - CMPQ inl, $128 - JBE openAVX2Tail128 - CMPQ inl, $256 - JBE openAVX2Tail256 - CMPQ inl, $384 - JBE openAVX2Tail384 - JMP openAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes -openAVX2192: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 - -openAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 - JNE openAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - -openAVX2ShortOpen: - // Hash - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - -openAVX2ShortOpenLoop: - CMPQ inl, $32 - JB openAVX2ShortTail32 - SUBQ $32, inl - - // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - polyAdd(2*8(inp)) - polyMulAVX2 - - // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - - // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 - JMP openAVX2ShortOpenLoop - -openAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 - JB openAVX2ShortDone - - SUBQ $16, inl - - // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - - // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 - -openAVX2ShortDone: - VZEROUPPER - JMP openSSETail16 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes -openAVX2320: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 - -openAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 - JNE openAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 - - // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 - JMP openAVX2ShortOpen - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext -openAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD1 - VMOVDQA DD1, DD0 - - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - TESTQ itr1, itr1 - JE openAVX2Tail128LoopB - -openAVX2Tail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMulAVX2 - -openAVX2Tail128LoopB: - ADDQ $16, itr2 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail128LoopA - CMPQ itr2, $160 - JNE openAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC1, CC1 - VPADDD DD0, DD1, DD1 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - -openAVX2TailLoop: - CMPQ inl, $32 - JB openAVX2Tail - SUBQ $32, inl - - // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - JMP openAVX2TailLoop - -openAVX2Tail: - CMPQ inl, $16 - VMOVDQA A0, A1 - JB openAVX2TailDone - SUBQ $16, inl - - // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 - -openAVX2TailDone: - VZEROUPPER - JMP openSSETail16 - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext -openAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare four blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 - - // Compute the number of iterations that will hash data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $128, itr1 - SHRQ $4, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - -openAVX2Tail256LoopA: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - - // Perform ChaCha rounds, while hashing the remaining input -openAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail256LoopA - - CMPQ itr2, $10 - JNE openAVX2Tail256LoopB - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - - // Hash the remainder of data (if any) -openAVX2Tail256Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail256HashEnd - polyAdd (0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail256Hash - -// Store 128 bytes safely, then go to store loop -openAVX2Tail256HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - - VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 - VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) - LEAQ (4*32)(inp), inp - LEAQ (4*32)(oup), oup - SUBQ $4*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext -openAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare six blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, ctr0StoreAVX2 - VMOVDQA DD1, ctr1StoreAVX2 - VMOVDQA DD2, ctr2StoreAVX2 - - // Compute the number of iterations that will hash two blocks of data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $256, itr1 - SHRQ $4, itr1 - ADDQ $6, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - - // Perform ChaCha rounds, while hashing the remaining input -openAVX2Tail384LoopB: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - -openAVX2Tail384LoopA: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - - CMPQ itr2, itr1 - JB openAVX2Tail384LoopB - - CMPQ itr2, $10 - JNE openAVX2Tail384LoopA - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - -openAVX2Tail384Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail384HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail384Hash - -// Store 256 bytes safely, then go to store loop -openAVX2Tail384HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - LEAQ (8*32)(inp), inp - LEAQ (8*32)(oup), oup - SUBQ $8*32, inl - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext -openAVX2Tail512: - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - MOVQ inp, itr2 - -openAVX2Tail512LoopB: - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ (2*8)(itr2), itr2 - -openAVX2Tail512LoopA: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(itr2)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(itr2)) - polyMulAVX2 - LEAQ (4*8)(itr2), itr2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - INCQ itr1 - CMPQ itr1, $4 - JLT openAVX2Tail512LoopB - - CMPQ itr1, $10 - JNE openAVX2Tail512LoopA - - MOVQ inl, itr1 - SUBQ $384, itr1 - ANDQ $-16, itr1 - -openAVX2Tail512HashLoop: - TESTQ itr1, itr1 - JE openAVX2Tail512HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - SUBQ $16, itr1 - JMP openAVX2Tail512HashLoop - -openAVX2Tail512HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - LEAQ (12*32)(inp), inp - LEAQ (12*32)(oup), oup - SUBQ $12*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Seal(dst, key, src, ad []byte) -TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 - // For aligned stack access - MOVQ SP, BP - ADDQ $32, BP - ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - CMPB ·useAVX2(SB), $1 - JE chacha20Poly1305Seal_AVX2 - - // Special optimization, for very short buffers - CMPQ inl, $128 - JBE sealSSE128 // About 15% faster - - // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - - // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - - // Load state, increment counter blocks - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - MOVQ $10, itr2 - -sealSSEIntroLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JNE sealSSEIntroLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - - // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore - MOVO B0, sStore - - // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) - - MOVQ $128, itr1 - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 - - CMPQ inl, $64 - JBE sealSSE128SealHash - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) - - ADDQ $64, itr1 - SUBQ $64, inl - LEAQ 64(inp), inp - - MOVQ $2, itr1 - MOVQ $8, itr2 - - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - CMPQ inl, $192 - JBE sealSSETail192 - -sealSSEMainLoop: - // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 - - // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - -sealSSEInnerLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(oup)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(oup), oup - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JGE sealSSEInnerLoop - polyAdd(0(oup)) - polyMul - LEAQ (2*8)(oup), oup - DECQ itr1 - JG sealSSEInnerLoop - - // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore - - // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVO tmpStore, D3 - - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - ADDQ $192, inp - MOVQ $192, itr1 - SUBQ $192, inl - MOVO A3, A1 - MOVO B3, B1 - MOVO C3, C1 - MOVO D3, D1 - CMPQ inl, $64 - JBE sealSSE128SealHash - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) - LEAQ 64(inp), inp - SUBQ $64, inl - MOVQ $6, itr1 - MOVQ $4, itr2 - CMPQ inl, $192 - JG sealSSEMainLoop - - MOVQ inl, itr1 - TESTQ inl, inl - JE sealSSE128SealHash - MOVQ $6, itr1 - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - JMP sealSSETail192 - -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of plaintext -sealSSETail64: - // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A1 - MOVO state1Store, B1 - MOVO state2Store, C1 - MOVO ctr3Store, D1 - PADDL ·sseIncMask<>(SB), D1 - MOVO D1, ctr0Store - -sealSSETail64LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealSSETail64LoopB: - chachaQR(A1, B1, C1, D1, T1) - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A1, B1, C1, D1, T1) - shiftB1Right; shiftC1Right; shiftD1Right - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - DECQ itr1 - JG sealSSETail64LoopA - - DECQ itr2 - JGE sealSSETail64LoopB - PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B1 - PADDL state2Store, C1 - PADDL ctr0Store, D1 - - JMP sealSSE128Seal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of plaintext -sealSSETail128: - // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - -sealSSETail128LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealSSETail128LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - DECQ itr1 - JG sealSSETail128LoopA - - DECQ itr2 - JGE sealSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr0Store, D0; PADDL ctr1Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - - MOVQ $64, itr1 - LEAQ 64(inp), inp - SUBQ $64, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of plaintext -sealSSETail192: - // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store - -sealSSETail192LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealSSETail192LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - DECQ itr1 - JG sealSSETail192LoopA - - DECQ itr2 - JGE sealSSETail192LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - MOVO A2, A1 - MOVO B2, B1 - MOVO C2, C1 - MOVO D2, D1 - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special seal optimization for buffers smaller than 129 bytes -sealSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 - -sealSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE sealSSE128InnerCipherLoop - - // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore - MOVOU B0, sStore - - // Hash - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 - -sealSSE128SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealSSE128Seal - polyAdd(0(oup)) - polyMul - - SUBQ $16, itr1 - ADDQ $16, oup - - JMP sealSSE128SealHash - -sealSSE128Seal: - CMPQ inl, $16 - JB sealSSETail - SUBQ $16, inl - - // Load for decryption - MOVOU (inp), T0 - PXOR T0, A1 - MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - - // Extract for hashing - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 - JMP sealSSE128Seal - -sealSSETail: - TESTQ inl, inl - JE sealSSEFinalize - - // We can only load the PT one byte at a time to avoid read after end of buffer - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVQ inl, itr1 - LEAQ -1(inp)(inl*1), inp - XORQ t2, t2 - XORQ t3, t3 - XORQ AX, AX - -sealSSETailLoadLoop: - SHLQ $8, t2, t3 - SHLQ $8, t2 - MOVB (inp), AX - XORQ AX, t2 - LEAQ -1(inp), inp - DECQ itr1 - JNE sealSSETailLoadLoop - MOVQ t2, 0+tmpStore - MOVQ t3, 8+tmpStore - PXOR 0+tmpStore, A1 - MOVOU A1, (oup) - MOVOU -16(t0)(itr2*1), T0 - PAND T0, A1 - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - ADDQ inl, oup - -sealSSEFinalize: - // Hash in the buffer lengths - ADDQ ad_len+80(FP), acc0 - ADCQ src_len+56(FP), acc1 - ADCQ $1, acc2 - polyMul - - // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 - - // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 - - // Finally store the tag at the end of the message - MOVQ acc0, (0*8)(oup) - MOVQ acc1, (1*8)(oup) - RET - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- -chacha20Poly1305Seal_AVX2: - VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 - - // Special optimizations, for very short buffers - CMPQ inl, $192 - JBE seal192AVX2 // 33% faster - CMPQ inl, $320 - JBE seal320AVX2 // 17% faster - - // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 - VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr2 - -sealAVX2IntroLoop: - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr2 - JNE sealAVX2IntroLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - - VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 - VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key - VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), DD0, DD0 - VMOVDQA DD0, rsStoreAVX2 - - // Hash AD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - // Can store at least 320 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), CC0, CC0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU CC0, (1*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 - VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 - VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) - - MOVQ $320, itr1 - SUBQ $320, inl - LEAQ 320(inp), inp - - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 - CMPQ inl, $128 - JBE sealAVX2SealHash - - VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 - VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVQ $8, itr1 - MOVQ $2, itr2 - - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - CMPQ inl, $512 - JBE sealAVX2Tail512 - - // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - - SUBQ $16, oup // Adjust the pointer - MOVQ $9, itr1 - JMP sealAVX2InternalLoopStart - -sealAVX2MainLoop: - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr1 - -sealAVX2InternalLoop: - polyAdd(0*8(oup)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - -sealAVX2InternalLoopStart: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(oup)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(oup)) - LEAQ (6*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr1 - JNE sealAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - - // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - // and here - polyAdd(-2*8(oup)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - SUBQ $(32*16), inl - CMPQ inl, $512 - JG sealAVX2MainLoop - - // Tail can only hash 480 bytes - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ 32(oup), oup - - MOVQ $10, itr1 - MOVQ $0, itr2 - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - JMP sealAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes -seal192AVX2: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 - -sealAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 - JNE sealAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - - // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - -sealAVX2ShortSeal: - // Hash aad - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 - -sealAVX2SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealAVX2ShortSealLoop - polyAdd(0(oup)) - polyMul - SUBQ $16, itr1 - ADDQ $16, oup - JMP sealAVX2SealHash - -sealAVX2ShortSealLoop: - CMPQ inl, $32 - JB sealAVX2ShortTail32 - SUBQ $32, inl - - // Load for encryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - - // Now can hash - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (1*32)(oup), oup - - // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 - JMP sealAVX2ShortSealLoop - -sealAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 - JB sealAVX2ShortDone - - SUBQ $16, inl - - // Load for encryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - - // Hash - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 - -sealAVX2ShortDone: - VZEROUPPER - JMP sealSSETail - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes -seal320AVX2: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 - -sealAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 - JNE sealAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 - - // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 - - // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 - JMP sealAVX2ShortSeal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext -sealAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0 - VMOVDQA state1StoreAVX2, BB0 - VMOVDQA state2StoreAVX2, CC0 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VMOVDQA DD0, DD1 - -sealAVX2Tail128LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail128LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $4, DD0, DD0, DD0 - DECQ itr1 - JG sealAVX2Tail128LoopA - DECQ itr2 - JGE sealAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA1 - VPADDD state1StoreAVX2, BB0, BB1 - VPADDD state2StoreAVX2, CC0, CC1 - VPADDD DD1, DD0, DD1 - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - JMP sealAVX2ShortSealLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext -sealAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 - -sealAVX2Tail256LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr1 - JG sealAVX2Tail256LoopA - DECQ itr2 - JGE sealAVX2Tail256LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext -sealAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 - -sealAVX2Tail384LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail384LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr1 - JG sealAVX2Tail384LoopA - DECQ itr2 - JGE sealAVX2Tail384LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0 - VPERM2I128 $0x02, CC1, DD1, TT1 - VPERM2I128 $0x13, AA1, BB1, TT2 - VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - MOVQ $256, itr1 - LEAQ 256(inp), inp - SUBQ $256, inl - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext -sealAVX2Tail512: - // Need to decrypt up to 512 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - -sealAVX2Tail512LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - -sealAVX2Tail512LoopB: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(oup)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - - DECQ itr1 - JG sealAVX2Tail512LoopA - DECQ itr2 - JGE sealAVX2Tail512LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3 - VPXOR (0*32)(inp), CC3, CC3 - VMOVDQU CC3, (0*32)(oup) - VPERM2I128 $0x02, CC0, DD0, CC3 - VPXOR (1*32)(inp), CC3, CC3 - VMOVDQU CC3, (1*32)(oup) - VPERM2I128 $0x13, AA0, BB0, CC3 - VPXOR (2*32)(inp), CC3, CC3 - VMOVDQU CC3, (2*32)(oup) - VPERM2I128 $0x13, CC0, DD0, CC3 - VPXOR (3*32)(inp), CC3, CC3 - VMOVDQU CC3, (3*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - - MOVQ $384, itr1 - LEAQ 384(inp), inp - SUBQ $384, inl - VPERM2I128 $0x02, AA3, BB3, AA0 - VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 - VPERM2I128 $0x13, AA3, BB3, CC0 - VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go deleted file mode 100644 index fe191d39..00000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chacha20poly1305 - -import ( - "encoding/binary" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/internal/subtle" - "golang.org/x/crypto/poly1305" -) - -func writeWithPadding(p *poly1305.MAC, b []byte) { - p.Write(b) - if rem := len(b) % 16; rem != 0 { - var buf [16]byte - padLen := 16 - rem - p.Write(buf[:padLen]) - } -} - -func writeUint64(p *poly1305.MAC, n int) { - var buf [8]byte - binary.LittleEndian.PutUint64(buf[:], uint64(n)) - p.Write(buf[:]) -} - -func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { - ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) - ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] - if subtle.InexactOverlap(out, plaintext) { - panic("chacha20poly1305: invalid buffer overlap") - } - - var polyKey [32]byte - s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) - s.XORKeyStream(polyKey[:], polyKey[:]) - s.SetCounter(1) // set the counter to 1, skipping 32 bytes - s.XORKeyStream(ciphertext, plaintext) - - p := poly1305.New(&polyKey) - writeWithPadding(p, additionalData) - writeWithPadding(p, ciphertext) - writeUint64(p, len(additionalData)) - writeUint64(p, len(plaintext)) - p.Sum(tag[:0]) - - return ret -} - -func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - tag := ciphertext[len(ciphertext)-16:] - ciphertext = ciphertext[:len(ciphertext)-16] - - var polyKey [32]byte - s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) - s.XORKeyStream(polyKey[:], polyKey[:]) - s.SetCounter(1) // set the counter to 1, skipping 32 bytes - - p := poly1305.New(&polyKey) - writeWithPadding(p, additionalData) - writeWithPadding(p, ciphertext) - writeUint64(p, len(additionalData)) - writeUint64(p, len(ciphertext)) - - ret, out := sliceForAppend(dst, len(ciphertext)) - if subtle.InexactOverlap(out, ciphertext) { - panic("chacha20poly1305: invalid buffer overlap") - } - if !p.Verify(tag) { - for i := range out { - out[i] = 0 - } - return nil, errOpen - } - - s.XORKeyStream(out, ciphertext) - return ret, nil -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go deleted file mode 100644 index f832b33d..00000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package chacha20poly1305 - -func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { - return c.sealGeneric(dst, nonce, plaintext, additionalData) -} - -func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - return c.openGeneric(dst, nonce, ciphertext, additionalData) -} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go deleted file mode 100644 index d9d46b96..00000000 --- a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package chacha20poly1305 - -import ( - "crypto/cipher" - "errors" - - "golang.org/x/crypto/chacha20" -) - -type xchacha20poly1305 struct { - key [KeySize]byte -} - -// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. -// -// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, -// suitable to be generated randomly without risk of collisions. It should be -// preferred when nonce uniqueness cannot be trivially ensured, or whenever -// nonces are randomly generated. -func NewX(key []byte) (cipher.AEAD, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20poly1305: bad key length") - } - ret := new(xchacha20poly1305) - copy(ret.key[:], key) - return ret, nil -} - -func (*xchacha20poly1305) NonceSize() int { - return NonceSizeX -} - -func (*xchacha20poly1305) Overhead() int { - return 16 -} - -func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if len(nonce) != NonceSizeX { - panic("chacha20poly1305: bad nonce length passed to Seal") - } - - // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no - // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, - // the second half of the counter is not available. This is unlikely to be - // an issue because the cipher.AEAD API requires the entire message to be in - // memory, and the counter overflows at 256 GB. - if uint64(len(plaintext)) > (1<<38)-64 { - panic("chacha20poly1305: plaintext too large") - } - - c := new(chacha20poly1305) - hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) - copy(c.key[:], hKey) - - // The first 4 bytes of the final nonce are unused counter space. - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - - return c.seal(dst, cNonce[:], plaintext, additionalData) -} - -func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if len(nonce) != NonceSizeX { - panic("chacha20poly1305: bad nonce length passed to Open") - } - if len(ciphertext) < 16 { - return nil, errOpen - } - if uint64(len(ciphertext)) > (1<<38)-48 { - panic("chacha20poly1305: ciphertext too large") - } - - c := new(chacha20poly1305) - hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) - copy(c.key[:], hKey) - - // The first 4 bytes of the final nonce are unused counter space. - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - - return c.open(dst, cNonce[:], ciphertext, additionalData) -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go deleted file mode 100644 index 83c776de..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ /dev/null @@ -1,804 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - encoding_asn1 "encoding/asn1" - "fmt" - "math/big" - "reflect" - "time" - - "golang.org/x/crypto/cryptobyte/asn1" -) - -// This file contains ASN.1-related methods for String and Builder. - -// Builder - -// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Int64(v int64) { - b.addASN1Signed(asn1.INTEGER, v) -} - -// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the -// given tag. -func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { - b.addASN1Signed(tag, v) -} - -// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. -func (b *Builder) AddASN1Enum(v int64) { - b.addASN1Signed(asn1.ENUM, v) -} - -func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { - b.AddASN1(tag, func(c *Builder) { - length := 1 - for i := v; i >= 0x80 || i < -0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1Uint64(v uint64) { - b.AddASN1(asn1.INTEGER, func(c *Builder) { - length := 1 - for i := v; i >= 0x80; i >>= 8 { - length++ - } - - for ; length > 0; length-- { - i := v >> uint((length-1)*8) & 0xff - c.AddUint8(uint8(i)) - } - }) -} - -// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. -func (b *Builder) AddASN1BigInt(n *big.Int) { - if b.err != nil { - return - } - - b.AddASN1(asn1.INTEGER, func(c *Builder) { - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement form. So we - // invert and subtract 1. If the most-significant-bit isn't set then - // we'll need to pad the beginning with 0xff in order to keep the number - // negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - c.add(0xff) - } - c.add(bytes...) - } else if n.Sign() == 0 { - c.add(0) - } else { - bytes := n.Bytes() - if bytes[0]&0x80 != 0 { - c.add(0) - } - c.add(bytes...) - } - }) -} - -// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. -func (b *Builder) AddASN1OctetString(bytes []byte) { - b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { - c.AddBytes(bytes) - }) -} - -const generalizedTimeFormatStr = "20060102150405Z0700" - -// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. -func (b *Builder) AddASN1GeneralizedTime(t time.Time) { - if t.Year() < 0 || t.Year() > 9999 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) - return - } - b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { - c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) - }) -} - -// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. -func (b *Builder) AddASN1UTCTime(t time.Time) { - b.AddASN1(asn1.UTCTime, func(c *Builder) { - // As utilized by the X.509 profile, UTCTime can only - // represent the years 1950 through 2049. - if t.Year() < 1950 || t.Year() >= 2050 { - b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) - return - } - c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) - }) -} - -// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not -// support BIT STRINGs that are not a whole number of bytes. -func (b *Builder) AddASN1BitString(data []byte) { - b.AddASN1(asn1.BIT_STRING, func(b *Builder) { - b.AddUint8(0) - b.AddBytes(data) - }) -} - -func (b *Builder) addBase128Int(n int64) { - var length int - if n == 0 { - length = 1 - } else { - for i := n; i > 0; i >>= 7 { - length++ - } - } - - for i := length - 1; i >= 0; i-- { - o := byte(n >> uint(i*7)) - o &= 0x7f - if i != 0 { - o |= 0x80 - } - - b.add(o) - } -} - -func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { - if len(oid) < 2 { - return false - } - - if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { - return false - } - - for _, v := range oid { - if v < 0 { - return false - } - } - - return true -} - -func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { - b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { - if !isValidOID(oid) { - b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) - return - } - - b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) - for _, v := range oid[2:] { - b.addBase128Int(int64(v)) - } - }) -} - -func (b *Builder) AddASN1Boolean(v bool) { - b.AddASN1(asn1.BOOLEAN, func(b *Builder) { - if v { - b.AddUint8(0xff) - } else { - b.AddUint8(0) - } - }) -} - -func (b *Builder) AddASN1NULL() { - b.add(uint8(asn1.NULL), 0) -} - -// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if -// successful or records an error if one occurred. -func (b *Builder) MarshalASN1(v interface{}) { - // NOTE(martinkr): This is somewhat of a hack to allow propagation of - // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a - // value embedded into a struct, its tag information is lost. - if b.err != nil { - return - } - bytes, err := encoding_asn1.Marshal(v) - if err != nil { - b.err = err - return - } - b.AddBytes(bytes) -} - -// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. -// Tags greater than 30 are not supported and result in an error (i.e. -// low-tag-number form only). The child builder passed to the -// BuilderContinuation can be used to build the content of the ASN.1 object. -func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { - if b.err != nil { - return - } - // Identifiers with the low five bits set indicate high-tag-number format - // (two or more octets), which we don't support. - if tag&0x1f == 0x1f { - b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) - return - } - b.AddUint8(uint8(tag)) - b.addLengthPrefixed(1, true, f) -} - -// String - -// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean -// representation into out and advances. It reports whether the read -// was successful. -func (s *String) ReadASN1Boolean(out *bool) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { - return false - } - - switch bytes[0] { - case 0: - *out = false - case 0xff: - *out = true - default: - return false - } - - return true -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() - -// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does -// not point to an integer or to a big.Int, it panics. It reports whether the -// read was successful. -func (s *String) ReadASN1Integer(out interface{}) bool { - if reflect.TypeOf(out).Kind() != reflect.Ptr { - panic("out is not a pointer") - } - switch reflect.ValueOf(out).Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var i int64 - if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { - return false - } - reflect.ValueOf(out).Elem().SetInt(i) - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - var u uint64 - if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { - return false - } - reflect.ValueOf(out).Elem().SetUint(u) - return true - case reflect.Struct: - if reflect.TypeOf(out).Elem() == bigIntType { - return s.readASN1BigInt(out.(*big.Int)) - } - } - panic("out does not point to an integer type") -} - -func checkASN1Integer(bytes []byte) bool { - if len(bytes) == 0 { - // An INTEGER is encoded with at least one octet. - return false - } - if len(bytes) == 1 { - return true - } - if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { - // Value is not minimally encoded. - return false - } - return true -} - -var bigOne = big.NewInt(1) - -func (s *String) readASN1BigInt(out *big.Int) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { - return false - } - if bytes[0]&0x80 == 0x80 { - // Negative number. - neg := make([]byte, len(bytes)) - for i, b := range bytes { - neg[i] = ^b - } - out.SetBytes(neg) - out.Add(out, bigOne) - out.Neg(out) - } else { - out.SetBytes(bytes) - } - return true -} - -func (s *String) readASN1Int64(out *int64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { - return false - } - return true -} - -func asn1Signed(out *int64, n []byte) bool { - length := len(n) - if length > 8 { - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= int64(n[i]) - } - // Shift up and down in order to sign extend the result. - *out <<= 64 - uint8(length)*8 - *out >>= 64 - uint8(length)*8 - return true -} - -func (s *String) readASN1Uint64(out *uint64) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { - return false - } - return true -} - -func asn1Unsigned(out *uint64, n []byte) bool { - length := len(n) - if length > 9 || length == 9 && n[0] != 0 { - // Too large for uint64. - return false - } - if n[0]&0x80 != 0 { - // Negative number. - return false - } - for i := 0; i < length; i++ { - *out <<= 8 - *out |= uint64(n[i]) - } - return true -} - -// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out -// and advances. It reports whether the read was successful and resulted in a -// value that can be represented in an int64. -func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { - var bytes String - return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) -} - -// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports -// whether the read was successful. -func (s *String) ReadASN1Enum(out *int) bool { - var bytes String - var i int64 - if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { - return false - } - if int64(int(i)) != i { - return false - } - *out = int(i) - return true -} - -func (s *String) readBase128Int(out *int) bool { - ret := 0 - for i := 0; len(*s) > 0; i++ { - if i == 4 { - return false - } - ret <<= 7 - b := s.read(1)[0] - ret |= int(b & 0x7f) - if b&0x80 == 0 { - *out = ret - return true - } - } - return false // truncated -} - -// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { - return false - } - - // In the worst case, we get two elements from the first byte (which is - // encoded differently) and then every varint is a single byte long. - components := make([]int, len(bytes)+1) - - // The first varint is 40*value1 + value2: - // According to this packing, value1 can take the values 0, 1 and 2 only. - // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, - // then there are no restrictions on value2. - var v int - if !bytes.readBase128Int(&v) { - return false - } - if v < 80 { - components[0] = v / 40 - components[1] = v % 40 - } else { - components[0] = 2 - components[1] = v - 80 - } - - i := 2 - for ; len(bytes) > 0; i++ { - if !bytes.readBase128Int(&v) { - return false - } - components[i] = v - } - *out = components[:i] - return true -} - -// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and -// advances. It reports whether the read was successful. -func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { - return false - } - t := string(bytes) - res, err := time.Parse(generalizedTimeFormatStr, t) - if err != nil { - return false - } - if serialized := res.Format(generalizedTimeFormatStr); serialized != t { - return false - } - *out = res - return true -} - -const defaultUTCTimeFormatStr = "060102150405Z0700" - -// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1UTCTime(out *time.Time) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.UTCTime) { - return false - } - t := string(bytes) - - formatStr := defaultUTCTimeFormatStr - var err error - res, err := time.Parse(formatStr, t) - if err != nil { - // Fallback to minute precision if we can't parse second - // precision. If we are following X.509 or X.690 we shouldn't - // support this, but we do. - formatStr = "0601021504Z0700" - res, err = time.Parse(formatStr, t) - } - if err != nil { - return false - } - - if serialized := res.Format(formatStr); serialized != t { - return false - } - - if res.Year() >= 2050 { - // UTCTime interprets the low order digits 50-99 as 1950-99. - // This only applies to its use in the X.509 profile. - // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 - res = res.AddDate(-100, 0, 0) - } - *out = res - return true -} - -// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. -// It reports whether the read was successful. -func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { - var bytes String - if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || - len(bytes)*8/8 != len(bytes) { - return false - } - - paddingBits := uint8(bytes[0]) - bytes = bytes[1:] - if paddingBits > 7 || - len(bytes) == 0 && paddingBits != 0 || - len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { - return false - } - - lenBytes := String((*s)[2 : 2+lenLen]) - if !lenBytes.readUnsigned(&len32, int(lenLen)) { - return false - } - - // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length - // with the minimum number of octets. - if len32 < 128 { - // Length should have used short-form encoding. - return false - } - if len32>>((lenLen-1)*8) == 0 { - // Leading octet is 0. Length should have been at least one byte shorter. - return false - } - - headerLen = 2 + uint32(lenLen) - if headerLen+len32 < len32 { - // Overflow. - return false - } - length = headerLen + len32 - } - - if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { - return false - } - if skipHeader && !out.Skip(int(headerLen)) { - panic("cryptobyte: internal error") - } - - return true -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go deleted file mode 100644 index cda8e3ed..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package asn1 contains supporting types for parsing and building ASN.1 -// messages with the cryptobyte package. -package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" - -// Tag represents an ASN.1 identifier octet, consisting of a tag number -// (indicating a type) and class (such as context-specific or constructed). -// -// Methods in the cryptobyte package only support the low-tag-number form, i.e. -// a single identifier octet with bits 7-8 encoding the class and bits 1-6 -// encoding the tag number. -type Tag uint8 - -const ( - classConstructed = 0x20 - classContextSpecific = 0x80 -) - -// Constructed returns t with the constructed class bit set. -func (t Tag) Constructed() Tag { return t | classConstructed } - -// ContextSpecific returns t with the context-specific class bit set. -func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } - -// The following is a list of standard tag and class combinations. -const ( - BOOLEAN = Tag(1) - INTEGER = Tag(2) - BIT_STRING = Tag(3) - OCTET_STRING = Tag(4) - NULL = Tag(5) - OBJECT_IDENTIFIER = Tag(6) - ENUM = Tag(10) - UTF8String = Tag(12) - SEQUENCE = Tag(16 | classConstructed) - SET = Tag(17 | classConstructed) - PrintableString = Tag(19) - T61String = Tag(20) - IA5String = Tag(22) - UTCTime = Tag(23) - GeneralizedTime = Tag(24) - GeneralString = Tag(27) -) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go deleted file mode 100644 index ca7b1db5..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/builder.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cryptobyte - -import ( - "errors" - "fmt" -) - -// A Builder builds byte strings from fixed-length and length-prefixed values. -// Builders either allocate space as needed, or are ‘fixed’, which means that -// they write into a given buffer and produce an error if it's exhausted. -// -// The zero value is a usable Builder that allocates space as needed. -// -// Simple values are marshaled and appended to a Builder using methods on the -// Builder. Length-prefixed values are marshaled by providing a -// BuilderContinuation, which is a function that writes the inner contents of -// the value to a given Builder. See the documentation for BuilderContinuation -// for details. -type Builder struct { - err error - result []byte - fixedSize bool - child *Builder - offset int - pendingLenLen int - pendingIsASN1 bool - inContinuation *bool -} - -// NewBuilder creates a Builder that appends its output to the given buffer. -// Like append(), the slice will be reallocated if its capacity is exceeded. -// Use Bytes to get the final buffer. -func NewBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - } -} - -// NewFixedBuilder creates a Builder that appends its output into the given -// buffer. This builder does not reallocate the output buffer. Writes that -// would exceed the buffer's capacity are treated as an error. -func NewFixedBuilder(buffer []byte) *Builder { - return &Builder{ - result: buffer, - fixedSize: true, - } -} - -// SetError sets the value to be returned as the error from Bytes. Writes -// performed after calling SetError are ignored. -func (b *Builder) SetError(err error) { - b.err = err -} - -// Bytes returns the bytes written by the builder or an error if one has -// occurred during building. -func (b *Builder) Bytes() ([]byte, error) { - if b.err != nil { - return nil, b.err - } - return b.result[b.offset:], nil -} - -// BytesOrPanic returns the bytes written by the builder or panics if an error -// has occurred during building. -func (b *Builder) BytesOrPanic() []byte { - if b.err != nil { - panic(b.err) - } - return b.result[b.offset:] -} - -// AddUint8 appends an 8-bit value to the byte string. -func (b *Builder) AddUint8(v uint8) { - b.add(byte(v)) -} - -// AddUint16 appends a big-endian, 16-bit value to the byte string. -func (b *Builder) AddUint16(v uint16) { - b.add(byte(v>>8), byte(v)) -} - -// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest -// byte of the 32-bit input value is silently truncated. -func (b *Builder) AddUint24(v uint32) { - b.add(byte(v>>16), byte(v>>8), byte(v)) -} - -// AddUint32 appends a big-endian, 32-bit value to the byte string. -func (b *Builder) AddUint32(v uint32) { - b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -// AddBytes appends a sequence of bytes to the byte string. -func (b *Builder) AddBytes(v []byte) { - b.add(v...) -} - -// BuilderContinuation is a continuation-passing interface for building -// length-prefixed byte sequences. Builder methods for length-prefixed -// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation -// supplied to them. The child builder passed to the continuation can be used -// to build the content of the length-prefixed sequence. For example: -// -// parent := cryptobyte.NewBuilder() -// parent.AddUint8LengthPrefixed(func (child *Builder) { -// child.AddUint8(42) -// child.AddUint8LengthPrefixed(func (grandchild *Builder) { -// grandchild.AddUint8(5) -// }) -// }) -// -// It is an error to write more bytes to the child than allowed by the reserved -// length prefix. After the continuation returns, the child must be considered -// invalid, i.e. users must not store any copies or references of the child -// that outlive the continuation. -// -// If the continuation panics with a value of type BuildError then the inner -// error will be returned as the error from Bytes. If the child panics -// otherwise then Bytes will repanic with the same value. -type BuilderContinuation func(child *Builder) - -// BuildError wraps an error. If a BuilderContinuation panics with this value, -// the panic will be recovered and the inner error will be returned from -// Builder.Bytes. -type BuildError struct { - Err error -} - -// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. -func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(1, false, f) -} - -// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. -func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(2, false, f) -} - -// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. -func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(3, false, f) -} - -// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. -func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { - b.addLengthPrefixed(4, false, f) -} - -func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { - if !*b.inContinuation { - *b.inContinuation = true - - defer func() { - *b.inContinuation = false - - r := recover() - if r == nil { - return - } - - if buildError, ok := r.(BuildError); ok { - b.err = buildError.Err - } else { - panic(r) - } - }() - } - - f(arg) -} - -func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { - // Subsequent writes can be ignored if the builder has encountered an error. - if b.err != nil { - return - } - - offset := len(b.result) - b.add(make([]byte, lenLen)...) - - if b.inContinuation == nil { - b.inContinuation = new(bool) - } - - b.child = &Builder{ - result: b.result, - fixedSize: b.fixedSize, - offset: offset, - pendingLenLen: lenLen, - pendingIsASN1: isASN1, - inContinuation: b.inContinuation, - } - - b.callContinuation(f, b.child) - b.flushChild() - if b.child != nil { - panic("cryptobyte: internal error") - } -} - -func (b *Builder) flushChild() { - if b.child == nil { - return - } - b.child.flushChild() - child := b.child - b.child = nil - - if child.err != nil { - b.err = child.err - return - } - - length := len(child.result) - child.pendingLenLen - child.offset - - if length < 0 { - panic("cryptobyte: internal error") // result unexpectedly shrunk - } - - if child.pendingIsASN1 { - // For ASN.1, we reserved a single byte for the length. If that turned out - // to be incorrect, we have to move the contents along in order to make - // space. - if child.pendingLenLen != 1 { - panic("cryptobyte: internal error") - } - var lenLen, lenByte uint8 - if int64(length) > 0xfffffffe { - b.err = errors.New("pending ASN.1 child too long") - return - } else if length > 0xffffff { - lenLen = 5 - lenByte = 0x80 | 4 - } else if length > 0xffff { - lenLen = 4 - lenByte = 0x80 | 3 - } else if length > 0xff { - lenLen = 3 - lenByte = 0x80 | 2 - } else if length > 0x7f { - lenLen = 2 - lenByte = 0x80 | 1 - } else { - lenLen = 1 - lenByte = uint8(length) - length = 0 - } - - // Insert the initial length byte, make space for successive length bytes, - // and adjust the offset. - child.result[child.offset] = lenByte - extraBytes := int(lenLen - 1) - if extraBytes != 0 { - child.add(make([]byte, extraBytes)...) - childStart := child.offset + child.pendingLenLen - copy(child.result[childStart+extraBytes:], child.result[childStart:]) - } - child.offset++ - child.pendingLenLen = extraBytes - } - - l := length - for i := child.pendingLenLen - 1; i >= 0; i-- { - child.result[child.offset+i] = uint8(l) - l >>= 8 - } - if l != 0 { - b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) - return - } - - if b.fixedSize && &b.result[0] != &child.result[0] { - panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") - } - - b.result = child.result -} - -func (b *Builder) add(bytes ...byte) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted write while child is pending") - } - if len(b.result)+len(bytes) < len(bytes) { - b.err = errors.New("cryptobyte: length overflow") - } - if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { - b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") - return - } - b.result = append(b.result, bytes...) -} - -// Unwrite rolls back n bytes written directly to the Builder. An attempt by a -// child builder passed to a continuation to unwrite bytes from its parent will -// panic. -func (b *Builder) Unwrite(n int) { - if b.err != nil { - return - } - if b.child != nil { - panic("cryptobyte: attempted unwrite while child is pending") - } - length := len(b.result) - b.pendingLenLen - b.offset - if length < 0 { - panic("cryptobyte: internal error") - } - if n > length { - panic("cryptobyte: attempted to unwrite more than was written") - } - b.result = b.result[:len(b.result)-n] -} - -// A MarshalingValue marshals itself into a Builder. -type MarshalingValue interface { - // Marshal is called by Builder.AddValue. It receives a pointer to a builder - // to marshal itself into. It may return an error that occurred during - // marshaling, such as unset or invalid values. - Marshal(b *Builder) error -} - -// AddValue calls Marshal on v, passing a pointer to the builder to append to. -// If Marshal returns an error, it is set on the Builder so that subsequent -// appends don't have an effect. -func (b *Builder) AddValue(v MarshalingValue) { - err := v.Marshal(b) - if err != nil { - b.err = err - } -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go deleted file mode 100644 index 589d297e..00000000 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cryptobyte contains types that help with parsing and constructing -// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage -// contains useful ASN.1 constants.) -// -// The String type is for parsing. It wraps a []byte slice and provides helper -// functions for consuming structures, value by value. -// -// The Builder type is for constructing messages. It providers helper functions -// for appending values and also for appending length-prefixed submessages – -// without having to worry about calculating the length prefix ahead of time. -// -// See the documentation and examples for the Builder and String types to get -// started. -package cryptobyte // import "golang.org/x/crypto/cryptobyte" - -// String represents a string of bytes. It provides methods for parsing -// fixed-length and length-prefixed values from it. -type String []byte - -// read advances a String by n bytes and returns them. If less than n bytes -// remain, it returns nil. -func (s *String) read(n int) []byte { - if len(*s) < n || n < 0 { - return nil - } - v := (*s)[:n] - *s = (*s)[n:] - return v -} - -// Skip advances the String by n byte and reports whether it was successful. -func (s *String) Skip(n int) bool { - return s.read(n) != nil -} - -// ReadUint8 decodes an 8-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint8(out *uint8) bool { - v := s.read(1) - if v == nil { - return false - } - *out = uint8(v[0]) - return true -} - -// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint16(out *uint16) bool { - v := s.read(2) - if v == nil { - return false - } - *out = uint16(v[0])<<8 | uint16(v[1]) - return true -} - -// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint24(out *uint32) bool { - v := s.read(3) - if v == nil { - return false - } - *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) - return true -} - -// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. -// It reports whether the read was successful. -func (s *String) ReadUint32(out *uint32) bool { - v := s.read(4) - if v == nil { - return false - } - *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) - return true -} - -func (s *String) readUnsigned(out *uint32, length int) bool { - v := s.read(length) - if v == nil { - return false - } - var result uint32 - for i := 0; i < length; i++ { - result <<= 8 - result |= uint32(v[i]) - } - *out = result - return true -} - -func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { - lenBytes := s.read(lenLen) - if lenBytes == nil { - return false - } - var length uint32 - for _, b := range lenBytes { - length = length << 8 - length = length | uint32(b) - } - v := s.read(int(length)) - if v == nil { - return false - } - *outChild = v - return true -} - -// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value -// into out and advances over it. It reports whether the read was successful. -func (s *String) ReadUint8LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(1, out) -} - -// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit -// length-prefixed value into out and advances over it. It reports whether the -// read was successful. -func (s *String) ReadUint16LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(2, out) -} - -// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit -// length-prefixed value into out and advances over it. It reports whether -// the read was successful. -func (s *String) ReadUint24LengthPrefixed(out *String) bool { - return s.readLengthPrefixed(3, out) -} - -// ReadBytes reads n bytes into out and advances over them. It reports -// whether the read was successful. -func (s *String) ReadBytes(out *[]byte, n int) bool { - v := s.read(n) - if v == nil { - return false - } - *out = v - return true -} - -// CopyBytes copies len(out) bytes into out and advances over them. It reports -// whether the copy operation was successful -func (s *String) CopyBytes(out []byte) bool { - n := len(out) - v := s.read(n) - if v == nil { - return false - } - return copy(out, v) == n -} - -// Empty reports whether the string does not contain any bytes. -func (s String) Empty() bool { - return len(s) == 0 -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index cda3fdd3..00000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. -package curve25519 // import "golang.org/x/crypto/curve25519" - -import ( - "crypto/subtle" - "fmt" - - "golang.org/x/crypto/curve25519/internal/field" -) - -// ScalarMult sets dst to the product scalar * point. -// -// Deprecated: when provided a low-order point, ScalarMult will set dst to all -// zeroes, irrespective of the scalar. Instead, use the X25519 function, which -// will return an error. -func ScalarMult(dst, scalar, point *[32]byte) { - var e [32]byte - - copy(e[:], scalar[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element - x1.SetBytes(point[:]) - x2.One() - x3.Set(&x1) - z3.One() - - swap := 0 - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int(b) - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - swap = int(b) - - tmp0.Subtract(&x3, &z3) - tmp1.Subtract(&x2, &z2) - x2.Add(&x2, &z2) - z2.Add(&x3, &z3) - z3.Multiply(&tmp0, &x2) - z2.Multiply(&z2, &tmp1) - tmp0.Square(&tmp1) - tmp1.Square(&x2) - x3.Add(&z3, &z2) - z2.Subtract(&z3, &z2) - x2.Multiply(&tmp1, &tmp0) - tmp1.Subtract(&tmp1, &tmp0) - z2.Square(&z2) - - z3.Mult32(&tmp1, 121666) - x3.Square(&x3) - tmp0.Add(&tmp0, &z3) - z3.Multiply(&x1, &z2) - z2.Multiply(&tmp1, &tmp0) - } - - x2.Swap(&x3, swap) - z2.Swap(&z3, swap) - - z2.Invert(&z2) - x2.Multiply(&x2, &z2) - copy(dst[:], x2.Bytes()) -} - -// ScalarBaseMult sets dst to the product scalar * base where base is the -// standard generator. -// -// It is recommended to use the X25519 function with Basepoint instead, as -// copying into fixed size arrays can lead to unexpected bugs. -func ScalarBaseMult(dst, scalar *[32]byte) { - ScalarMult(dst, scalar, &basePoint) -} - -const ( - // ScalarSize is the size of the scalar input to X25519. - ScalarSize = 32 - // PointSize is the size of the point input to X25519. - PointSize = 32 -) - -// Basepoint is the canonical Curve25519 generator. -var Basepoint []byte - -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -func init() { Basepoint = basePoint[:] } - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} - -// X25519 returns the result of the scalar multiplication (scalar * point), -// according to RFC 7748, Section 5. scalar, point and the return value are -// slices of 32 bytes. -// -// scalar can be generated at random, for example with crypto/rand. point should -// be either Basepoint or the output of another X25519 call. -// -// If point is Basepoint (but not if it's a different slice with the same -// contents) a precomputed implementation might be used for performance. -func X25519(scalar, point []byte) ([]byte, error) { - // Outline the body of function, to let the allocation be inlined in the - // caller, and possibly avoid escaping to the heap. - var dst [32]byte - return x25519(&dst, scalar, point) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) - } - if l := len(point); l != 32 { - return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - checkBasepoint() - ScalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - ScalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, fmt.Errorf("bad input point: low order point") - } - } - return dst[:], nil -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/README b/vendor/golang.org/x/crypto/curve25519/internal/field/README deleted file mode 100644 index e25bca7d..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/README +++ /dev/null @@ -1,7 +0,0 @@ -This package is kept in sync with crypto/ed25519/internal/edwards25519/field in -the standard library. - -If there are any changes in the standard library that need to be synced to this -package, run sync.sh. It will not overwrite any local changes made since the -previous sync, so it's ok to land changes in this package first, and then sync -to the standard library later. diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go deleted file mode 100644 index ca841ad9..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package field implements fast arithmetic modulo 2^255-19. -package field - -import ( - "crypto/subtle" - "encoding/binary" - "math/bits" -) - -// Element represents an element of the field GF(2^255-19). Note that this -// is not a cryptographically secure group, and should only be used to interact -// with edwards25519.Point coordinates. -// -// This type works similarly to math/big.Int, and all arguments and receivers -// are allowed to alias. -// -// The zero value is a valid zero element. -type Element struct { - // An element t represents the integer - // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 - // - // Between operations, all limbs are expected to be lower than 2^52. - l0 uint64 - l1 uint64 - l2 uint64 - l3 uint64 - l4 uint64 -} - -const maskLow51Bits uint64 = (1 << 51) - 1 - -var feZero = &Element{0, 0, 0, 0, 0} - -// Zero sets v = 0, and returns v. -func (v *Element) Zero() *Element { - *v = *feZero - return v -} - -var feOne = &Element{1, 0, 0, 0, 0} - -// One sets v = 1, and returns v. -func (v *Element) One() *Element { - *v = *feOne - return v -} - -// reduce reduces v modulo 2^255 - 19 and returns it. -func (v *Element) reduce() *Element { - v.carryPropagate() - - // After the light reduction we now have a field element representation - // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. - - // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, - // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. - c := (v.l0 + 19) >> 51 - c = (v.l1 + c) >> 51 - c = (v.l2 + c) >> 51 - c = (v.l3 + c) >> 51 - c = (v.l4 + c) >> 51 - - // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's - // effectively applying the reduction identity to the carry. - v.l0 += 19 * c - - v.l1 += v.l0 >> 51 - v.l0 = v.l0 & maskLow51Bits - v.l2 += v.l1 >> 51 - v.l1 = v.l1 & maskLow51Bits - v.l3 += v.l2 >> 51 - v.l2 = v.l2 & maskLow51Bits - v.l4 += v.l3 >> 51 - v.l3 = v.l3 & maskLow51Bits - // no additional carry - v.l4 = v.l4 & maskLow51Bits - - return v -} - -// Add sets v = a + b, and returns v. -func (v *Element) Add(a, b *Element) *Element { - v.l0 = a.l0 + b.l0 - v.l1 = a.l1 + b.l1 - v.l2 = a.l2 + b.l2 - v.l3 = a.l3 + b.l3 - v.l4 = a.l4 + b.l4 - // Using the generic implementation here is actually faster than the - // assembly. Probably because the body of this function is so simple that - // the compiler can figure out better optimizations by inlining the carry - // propagation. TODO - return v.carryPropagateGeneric() -} - -// Subtract sets v = a - b, and returns v. -func (v *Element) Subtract(a, b *Element) *Element { - // We first add 2 * p, to guarantee the subtraction won't underflow, and - // then subtract b (which can be up to 2^255 + 2^13 * 19). - v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 - v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 - v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 - v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 - v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 - return v.carryPropagate() -} - -// Negate sets v = -a, and returns v. -func (v *Element) Negate(a *Element) *Element { - return v.Subtract(feZero, a) -} - -// Invert sets v = 1/z mod p, and returns v. -// -// If z == 0, Invert returns v = 0. -func (v *Element) Invert(z *Element) *Element { - // Inversion is implemented as exponentiation with exponent p − 2. It uses the - // same sequence of 255 squarings and 11 multiplications as [Curve25519]. - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element - - z2.Square(z) // 2 - t.Square(&z2) // 4 - t.Square(&t) // 8 - z9.Multiply(&t, z) // 9 - z11.Multiply(&z9, &z2) // 11 - t.Square(&z11) // 22 - z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 - - t.Square(&z2_5_0) // 2^6 - 2^1 - for i := 0; i < 4; i++ { - t.Square(&t) // 2^10 - 2^5 - } - z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 - - t.Square(&z2_10_0) // 2^11 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^20 - 2^10 - } - z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 - - t.Square(&z2_20_0) // 2^21 - 2^1 - for i := 0; i < 19; i++ { - t.Square(&t) // 2^40 - 2^20 - } - t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 - - t.Square(&t) // 2^41 - 2^1 - for i := 0; i < 9; i++ { - t.Square(&t) // 2^50 - 2^10 - } - z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 - - t.Square(&z2_50_0) // 2^51 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^100 - 2^50 - } - z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 - - t.Square(&z2_100_0) // 2^101 - 2^1 - for i := 0; i < 99; i++ { - t.Square(&t) // 2^200 - 2^100 - } - t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 - - t.Square(&t) // 2^201 - 2^1 - for i := 0; i < 49; i++ { - t.Square(&t) // 2^250 - 2^50 - } - t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 - - t.Square(&t) // 2^251 - 2^1 - t.Square(&t) // 2^252 - 2^2 - t.Square(&t) // 2^253 - 2^3 - t.Square(&t) // 2^254 - 2^4 - t.Square(&t) // 2^255 - 2^5 - - return v.Multiply(&t, &z11) // 2^255 - 21 -} - -// Set sets v = a, and returns v. -func (v *Element) Set(a *Element) *Element { - *v = *a - return v -} - -// SetBytes sets v to x, which must be a 32-byte little-endian encoding. -// -// Consistent with RFC 7748, the most significant bit (the high bit of the -// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) -// are accepted. Note that this is laxer than specified by RFC 8032. -func (v *Element) SetBytes(x []byte) *Element { - if len(x) != 32 { - panic("edwards25519: invalid field element input size") - } - - // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). - v.l0 = binary.LittleEndian.Uint64(x[0:8]) - v.l0 &= maskLow51Bits - // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). - v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 - v.l1 &= maskLow51Bits - // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). - v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 - v.l2 &= maskLow51Bits - // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). - v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 - v.l3 &= maskLow51Bits - // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). - // Note: not bytes 25:33, shift 4, to avoid overread. - v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 - v.l4 &= maskLow51Bits - - return v -} - -// Bytes returns the canonical 32-byte little-endian encoding of v. -func (v *Element) Bytes() []byte { - // This function is outlined to make the allocations inline in the caller - // rather than happen on the heap. - var out [32]byte - return v.bytes(&out) -} - -func (v *Element) bytes(out *[32]byte) []byte { - t := *v - t.reduce() - - var buf [8]byte - for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { - bitsOffset := i * 51 - binary.LittleEndian.PutUint64(buf[:], l<= len(out) { - break - } - out[off] |= bb - } - } - - return out[:] -} - -// Equal returns 1 if v and u are equal, and 0 otherwise. -func (v *Element) Equal(u *Element) int { - sa, sv := u.Bytes(), v.Bytes() - return subtle.ConstantTimeCompare(sa, sv) -} - -// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. -func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } - -// Select sets v to a if cond == 1, and to b if cond == 0. -func (v *Element) Select(a, b *Element, cond int) *Element { - m := mask64Bits(cond) - v.l0 = (m & a.l0) | (^m & b.l0) - v.l1 = (m & a.l1) | (^m & b.l1) - v.l2 = (m & a.l2) | (^m & b.l2) - v.l3 = (m & a.l3) | (^m & b.l3) - v.l4 = (m & a.l4) | (^m & b.l4) - return v -} - -// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. -func (v *Element) Swap(u *Element, cond int) { - m := mask64Bits(cond) - t := m & (v.l0 ^ u.l0) - v.l0 ^= t - u.l0 ^= t - t = m & (v.l1 ^ u.l1) - v.l1 ^= t - u.l1 ^= t - t = m & (v.l2 ^ u.l2) - v.l2 ^= t - u.l2 ^= t - t = m & (v.l3 ^ u.l3) - v.l3 ^= t - u.l3 ^= t - t = m & (v.l4 ^ u.l4) - v.l4 ^= t - u.l4 ^= t -} - -// IsNegative returns 1 if v is negative, and 0 otherwise. -func (v *Element) IsNegative() int { - return int(v.Bytes()[0] & 1) -} - -// Absolute sets v to |u|, and returns v. -func (v *Element) Absolute(u *Element) *Element { - return v.Select(new(Element).Negate(u), u, u.IsNegative()) -} - -// Multiply sets v = x * y, and returns v. -func (v *Element) Multiply(x, y *Element) *Element { - feMul(v, x, y) - return v -} - -// Square sets v = x * x, and returns v. -func (v *Element) Square(x *Element) *Element { - feSquare(v, x) - return v -} - -// Mult32 sets v = x * y, and returns v. -func (v *Element) Mult32(x *Element, y uint32) *Element { - x0lo, x0hi := mul51(x.l0, y) - x1lo, x1hi := mul51(x.l1, y) - x2lo, x2hi := mul51(x.l2, y) - x3lo, x3hi := mul51(x.l3, y) - x4lo, x4hi := mul51(x.l4, y) - v.l0 = x0lo + 19*x4hi // carried over per the reduction identity - v.l1 = x1lo + x0hi - v.l2 = x2lo + x1hi - v.l3 = x3lo + x2hi - v.l4 = x4lo + x3hi - // The hi portions are going to be only 32 bits, plus any previous excess, - // so we can skip the carry propagation. - return v -} - -// mul51 returns lo + hi * 2âµÂ¹ = a * b. -func mul51(a uint64, b uint32) (lo uint64, hi uint64) { - mh, ml := bits.Mul64(a, uint64(b)) - lo = ml & maskLow51Bits - hi = (mh << 13) | (ml >> 51) - return -} - -// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. -func (v *Element) Pow22523(x *Element) *Element { - var t0, t1, t2 Element - - t0.Square(x) // x^2 - t1.Square(&t0) // x^4 - t1.Square(&t1) // x^8 - t1.Multiply(x, &t1) // x^9 - t0.Multiply(&t0, &t1) // x^11 - t0.Square(&t0) // x^22 - t0.Multiply(&t1, &t0) // x^31 - t1.Square(&t0) // x^62 - for i := 1; i < 5; i++ { // x^992 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 - t1.Square(&t0) // 2^11 - 2 - for i := 1; i < 10; i++ { // 2^20 - 2^10 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^20 - 1 - t2.Square(&t1) // 2^21 - 2 - for i := 1; i < 20; i++ { // 2^40 - 2^20 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^40 - 1 - t1.Square(&t1) // 2^41 - 2 - for i := 1; i < 10; i++ { // 2^50 - 2^10 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^50 - 1 - t1.Square(&t0) // 2^51 - 2 - for i := 1; i < 50; i++ { // 2^100 - 2^50 - t1.Square(&t1) - } - t1.Multiply(&t1, &t0) // 2^100 - 1 - t2.Square(&t1) // 2^101 - 2 - for i := 1; i < 100; i++ { // 2^200 - 2^100 - t2.Square(&t2) - } - t1.Multiply(&t2, &t1) // 2^200 - 1 - t1.Square(&t1) // 2^201 - 2 - for i := 1; i < 50; i++ { // 2^250 - 2^50 - t1.Square(&t1) - } - t0.Multiply(&t1, &t0) // 2^250 - 1 - t0.Square(&t0) // 2^251 - 2 - t0.Square(&t0) // 2^252 - 4 - return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) -} - -// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. -var sqrtM1 = &Element{1718705420411056, 234908883556509, - 2233514472574048, 2117202627021982, 765476049583133} - -// SqrtRatio sets r to the non-negative square root of the ratio of u and v. -// -// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio -// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, -// and returns r and 0. -func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { - var a, b Element - - // r = (u * v3) * (u * v7)^((p-5)/8) - v2 := a.Square(v) - uv3 := b.Multiply(u, b.Multiply(v2, v)) - uv7 := a.Multiply(uv3, a.Square(v2)) - r.Multiply(uv3, r.Pow22523(uv7)) - - check := a.Multiply(v, a.Square(r)) // check = v * r^2 - - uNeg := b.Negate(u) - correctSignSqrt := check.Equal(u) - flippedSignSqrt := check.Equal(uNeg) - flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) - - rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r - // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) - r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) - - r.Absolute(r) // Choose the nonnegative square root. - return r, correctSignSqrt | flippedSignSqrt -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go deleted file mode 100644 index 44dc8e8c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -// +build amd64,gc,!purego - -package field - -// feMul sets out = a * b. It works like feMulGeneric. -//go:noescape -func feMul(out *Element, a *Element, b *Element) - -// feSquare sets out = a * a. It works like feSquareGeneric. -//go:noescape -func feSquare(out *Element, a *Element) diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s deleted file mode 100644 index 293f013c..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s +++ /dev/null @@ -1,379 +0,0 @@ -// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -// func feMul(out *Element, a *Element, b *Element) -TEXT ·feMul(SB), NOSPLIT, $0-24 - MOVQ a+8(FP), CX - MOVQ b+16(FP), BX - - // r0 = a0×b0 - MOVQ (CX), AX - MULQ (BX) - MOVQ AX, DI - MOVQ DX, SI - - // r0 += 19×a1×b4 - MOVQ 8(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a2×b3 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a3×b2 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r0 += 19×a4×b1 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 8(BX) - ADDQ AX, DI - ADCQ DX, SI - - // r1 = a0×b1 - MOVQ (CX), AX - MULQ 8(BX) - MOVQ AX, R9 - MOVQ DX, R8 - - // r1 += a1×b0 - MOVQ 8(CX), AX - MULQ (BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a2×b4 - MOVQ 16(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a3×b3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r1 += 19×a4×b2 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 16(BX) - ADDQ AX, R9 - ADCQ DX, R8 - - // r2 = a0×b2 - MOVQ (CX), AX - MULQ 16(BX) - MOVQ AX, R11 - MOVQ DX, R10 - - // r2 += a1×b1 - MOVQ 8(CX), AX - MULQ 8(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += a2×b0 - MOVQ 16(CX), AX - MULQ (BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a3×b4 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r2 += 19×a4×b3 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(BX) - ADDQ AX, R11 - ADCQ DX, R10 - - // r3 = a0×b3 - MOVQ (CX), AX - MULQ 24(BX) - MOVQ AX, R13 - MOVQ DX, R12 - - // r3 += a1×b2 - MOVQ 8(CX), AX - MULQ 16(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a2×b1 - MOVQ 16(CX), AX - MULQ 8(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += a3×b0 - MOVQ 24(CX), AX - MULQ (BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r3 += 19×a4×b4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(BX) - ADDQ AX, R13 - ADCQ DX, R12 - - // r4 = a0×b4 - MOVQ (CX), AX - MULQ 32(BX) - MOVQ AX, R15 - MOVQ DX, R14 - - // r4 += a1×b3 - MOVQ 8(CX), AX - MULQ 24(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a2×b2 - MOVQ 16(CX), AX - MULQ 16(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a3×b1 - MOVQ 24(CX), AX - MULQ 8(BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // r4 += a4×b0 - MOVQ 32(CX), AX - MULQ (BX) - ADDQ AX, R15 - ADCQ DX, R14 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, DI, SI - SHLQ $0x0d, R9, R8 - SHLQ $0x0d, R11, R10 - SHLQ $0x0d, R13, R12 - SHLQ $0x0d, R15, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Second reduction chain (carryPropagate) - MOVQ DI, SI - SHRQ $0x33, SI - MOVQ R9, R8 - SHRQ $0x33, R8 - MOVQ R11, R10 - SHRQ $0x33, R10 - MOVQ R13, R12 - SHRQ $0x33, R12 - MOVQ R15, R14 - SHRQ $0x33, R14 - ANDQ AX, DI - IMUL3Q $0x13, R14, R14 - ADDQ R14, DI - ANDQ AX, R9 - ADDQ SI, R9 - ANDQ AX, R11 - ADDQ R8, R11 - ANDQ AX, R13 - ADDQ R10, R13 - ANDQ AX, R15 - ADDQ R12, R15 - - // Store output - MOVQ out+0(FP), AX - MOVQ DI, (AX) - MOVQ R9, 8(AX) - MOVQ R11, 16(AX) - MOVQ R13, 24(AX) - MOVQ R15, 32(AX) - RET - -// func feSquare(out *Element, a *Element) -TEXT ·feSquare(SB), NOSPLIT, $0-16 - MOVQ a+8(FP), CX - - // r0 = l0×l0 - MOVQ (CX), AX - MULQ (CX) - MOVQ AX, SI - MOVQ DX, BX - - // r0 += 38×l1×l4 - MOVQ 8(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r0 += 38×l2×l3 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 24(CX) - ADDQ AX, SI - ADCQ DX, BX - - // r1 = 2×l0×l1 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 8(CX) - MOVQ AX, R8 - MOVQ DX, DI - - // r1 += 38×l2×l4 - MOVQ 16(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r1 += 19×l3×l3 - MOVQ 24(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 24(CX) - ADDQ AX, R8 - ADCQ DX, DI - - // r2 = 2×l0×l2 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 16(CX) - MOVQ AX, R10 - MOVQ DX, R9 - - // r2 += l1×l1 - MOVQ 8(CX), AX - MULQ 8(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r2 += 38×l3×l4 - MOVQ 24(CX), AX - IMUL3Q $0x26, AX, AX - MULQ 32(CX) - ADDQ AX, R10 - ADCQ DX, R9 - - // r3 = 2×l0×l3 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 24(CX) - MOVQ AX, R12 - MOVQ DX, R11 - - // r3 += 2×l1×l2 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 16(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r3 += 19×l4×l4 - MOVQ 32(CX), AX - IMUL3Q $0x13, AX, AX - MULQ 32(CX) - ADDQ AX, R12 - ADCQ DX, R11 - - // r4 = 2×l0×l4 - MOVQ (CX), AX - SHLQ $0x01, AX - MULQ 32(CX) - MOVQ AX, R14 - MOVQ DX, R13 - - // r4 += 2×l1×l3 - MOVQ 8(CX), AX - IMUL3Q $0x02, AX, AX - MULQ 24(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // r4 += l2×l2 - MOVQ 16(CX), AX - MULQ 16(CX) - ADDQ AX, R14 - ADCQ DX, R13 - - // First reduction chain - MOVQ $0x0007ffffffffffff, AX - SHLQ $0x0d, SI, BX - SHLQ $0x0d, R8, DI - SHLQ $0x0d, R10, R9 - SHLQ $0x0d, R12, R11 - SHLQ $0x0d, R14, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Second reduction chain (carryPropagate) - MOVQ SI, BX - SHRQ $0x33, BX - MOVQ R8, DI - SHRQ $0x33, DI - MOVQ R10, R9 - SHRQ $0x33, R9 - MOVQ R12, R11 - SHRQ $0x33, R11 - MOVQ R14, R13 - SHRQ $0x33, R13 - ANDQ AX, SI - IMUL3Q $0x13, R13, R13 - ADDQ R13, SI - ANDQ AX, R8 - ADDQ BX, R8 - ANDQ AX, R10 - ADDQ DI, R10 - ANDQ AX, R12 - ADDQ R9, R12 - ANDQ AX, R14 - ADDQ R11, R14 - - // Store output - MOVQ out+0(FP), AX - MOVQ SI, (AX) - MOVQ R8, 8(AX) - MOVQ R10, 16(AX) - MOVQ R12, 24(AX) - MOVQ R14, 32(AX) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go deleted file mode 100644 index ddb6c9b8..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package field - -func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } - -func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go deleted file mode 100644 index af459ef5..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -package field - -//go:noescape -func carryPropagate(v *Element) - -func (v *Element) carryPropagate() *Element { - carryPropagate(v) - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s deleted file mode 100644 index 5c91e458..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build arm64 && gc && !purego -// +build arm64,gc,!purego - -#include "textflag.h" - -// carryPropagate works exactly like carryPropagateGeneric and uses the -// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but -// avoids loading R0-R4 twice and uses LDP and STP. -// -// See https://golang.org/issues/43145 for the main compiler issue. -// -// func carryPropagate(v *Element) -TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 - MOVD v+0(FP), R20 - - LDP 0(R20), (R0, R1) - LDP 16(R20), (R2, R3) - MOVD 32(R20), R4 - - AND $0x7ffffffffffff, R0, R10 - AND $0x7ffffffffffff, R1, R11 - AND $0x7ffffffffffff, R2, R12 - AND $0x7ffffffffffff, R3, R13 - AND $0x7ffffffffffff, R4, R14 - - ADD R0>>51, R11, R11 - ADD R1>>51, R12, R12 - ADD R2>>51, R13, R13 - ADD R3>>51, R14, R14 - // R4>>51 * 19 + R10 -> R10 - LSR $51, R4, R21 - MOVD $19, R22 - MADD R22, R10, R21, R10 - - STP (R10, R11), 0(R20) - STP (R12, R13), 16(R20) - MOVD R14, 32(R20) - - RET diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go deleted file mode 100644 index 234a5b2e..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !arm64 || !gc || purego -// +build !arm64 !gc purego - -package field - -func (v *Element) carryPropagate() *Element { - return v.carryPropagateGeneric() -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go b/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go deleted file mode 100644 index 7b5b78cb..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package field - -import "math/bits" - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -// mul64 returns a * b. -func mul64(a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - return uint128{lo, hi} -} - -// addMul64 returns v + a * b. -func addMul64(v uint128, a, b uint64) uint128 { - hi, lo := bits.Mul64(a, b) - lo, c := bits.Add64(lo, v.lo, 0) - hi, _ = bits.Add64(hi, v.hi, c) - return uint128{lo, hi} -} - -// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. -func shiftRightBy51(a uint128) uint64 { - return (a.hi << (64 - 51)) | (a.lo >> 51) -} - -func feMulGeneric(v, a, b *Element) { - a0 := a.l0 - a1 := a.l1 - a2 := a.l2 - a3 := a.l3 - a4 := a.l4 - - b0 := b.l0 - b1 := b.l1 - b2 := b.l2 - b3 := b.l3 - b4 := b.l4 - - // Limb multiplication works like pen-and-paper columnar multiplication, but - // with 51-bit limbs instead of digits. - // - // a4 a3 a2 a1 a0 x - // b4 b3 b2 b1 b0 = - // ------------------------ - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a4b1 a3b1 a2b1 a1b1 a0b1 + - // a4b2 a3b2 a2b2 a1b2 a0b2 + - // a4b3 a3b3 a2b3 a1b3 a0b3 + - // a4b4 a3b4 a2b4 a1b4 a0b4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // We can then use the reduction identity (a * 2²âµâµ + b = a * 19 + b) to - // reduce the limbs that would overflow 255 bits. r5 * 2²âµâµ becomes 19 * r5, - // r6 * 2³â°â¶ becomes 19 * r6 * 2âµÂ¹, etc. - // - // Reduction can be carried out simultaneously to multiplication. For - // example, we do not compute r5: whenever the result of a multiplication - // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. - // - // a4b0 a3b0 a2b0 a1b0 a0b0 + - // a3b1 a2b1 a1b1 a0b1 19×a4b1 + - // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + - // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + - // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // Finally we add up the columns into wide, overlapping limbs. - - a1_19 := a1 * 19 - a2_19 := a2 * 19 - a3_19 := a3 * 19 - a4_19 := a4 * 19 - - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - r0 := mul64(a0, b0) - r0 = addMul64(r0, a1_19, b4) - r0 = addMul64(r0, a2_19, b3) - r0 = addMul64(r0, a3_19, b2) - r0 = addMul64(r0, a4_19, b1) - - // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) - r1 := mul64(a0, b1) - r1 = addMul64(r1, a1, b0) - r1 = addMul64(r1, a2_19, b4) - r1 = addMul64(r1, a3_19, b3) - r1 = addMul64(r1, a4_19, b2) - - // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) - r2 := mul64(a0, b2) - r2 = addMul64(r2, a1, b1) - r2 = addMul64(r2, a2, b0) - r2 = addMul64(r2, a3_19, b4) - r2 = addMul64(r2, a4_19, b3) - - // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 - r3 := mul64(a0, b3) - r3 = addMul64(r3, a1, b2) - r3 = addMul64(r3, a2, b1) - r3 = addMul64(r3, a3, b0) - r3 = addMul64(r3, a4_19, b4) - - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - r4 := mul64(a0, b4) - r4 = addMul64(r4, a1, b3) - r4 = addMul64(r4, a2, b2) - r4 = addMul64(r4, a3, b1) - r4 = addMul64(r4, a4, b0) - - // After the multiplication, we need to reduce (carry) the five coefficients - // to obtain a result with limbs that are at most slightly larger than 2âµÂ¹, - // to respect the Element invariant. - // - // Overall, the reduction works the same as carryPropagate, except with - // wider inputs: we take the carry for each coefficient by shifting it right - // by 51, and add it to the limb above it. The top carry is multiplied by 19 - // according to the reduction identity and added to the lowest limb. - // - // The largest coefficient (r0) will be at most 111 bits, which guarantees - // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. - // - // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) - // r0 < 2âµÂ²Ã—2âµÂ² + 19×(2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ² + 2âµÂ²Ã—2âµÂ²) - // r0 < (1 + 19 × 4) × 2âµÂ² × 2âµÂ² - // r0 < 2ⷠ× 2âµÂ² × 2âµÂ² - // r0 < 2¹¹¹ - // - // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most - // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and - // allows us to easily apply the reduction identity. - // - // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 - // r4 < 5 × 2âµÂ² × 2âµÂ² - // r4 < 2¹â°â· - // - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - // Now all coefficients fit into 64-bit registers but are still too large to - // be passed around as a Element. We therefore do one last carry chain, - // where the carries will be small enough to fit in the wiggle room above 2âµÂ¹. - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -func feSquareGeneric(v, a *Element) { - l0 := a.l0 - l1 := a.l1 - l2 := a.l2 - l3 := a.l3 - l4 := a.l4 - - // Squaring works precisely like multiplication above, but thanks to its - // symmetry we get to group a few terms together. - // - // l4 l3 l2 l1 l0 x - // l4 l3 l2 l1 l0 = - // ------------------------ - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l4l1 l3l1 l2l1 l1l1 l0l1 + - // l4l2 l3l2 l2l2 l1l2 l0l2 + - // l4l3 l3l3 l2l3 l1l3 l0l3 + - // l4l4 l3l4 l2l4 l1l4 l0l4 = - // ---------------------------------------------- - // r8 r7 r6 r5 r4 r3 r2 r1 r0 - // - // l4l0 l3l0 l2l0 l1l0 l0l0 + - // l3l1 l2l1 l1l1 l0l1 19×l4l1 + - // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + - // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + - // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = - // -------------------------------------- - // r4 r3 r2 r1 r0 - // - // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with - // only three Mul64 and four Add64, instead of five and eight. - - l0_2 := l0 * 2 - l1_2 := l1 * 2 - - l1_38 := l1 * 38 - l2_38 := l2 * 38 - l3_38 := l3 * 38 - - l3_19 := l3 * 19 - l4_19 := l4 * 19 - - // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) - r0 := mul64(l0, l0) - r0 = addMul64(r0, l1_38, l4) - r0 = addMul64(r0, l2_38, l3) - - // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 - r1 := mul64(l0_2, l1) - r1 = addMul64(r1, l2_38, l4) - r1 = addMul64(r1, l3_19, l3) - - // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 - r2 := mul64(l0_2, l2) - r2 = addMul64(r2, l1, l1) - r2 = addMul64(r2, l3_38, l4) - - // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 - r3 := mul64(l0_2, l3) - r3 = addMul64(r3, l1_2, l2) - r3 = addMul64(r3, l4_19, l4) - - // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 - r4 := mul64(l0_2, l4) - r4 = addMul64(r4, l1_2, l3) - r4 = addMul64(r4, l2, l2) - - c0 := shiftRightBy51(r0) - c1 := shiftRightBy51(r1) - c2 := shiftRightBy51(r2) - c3 := shiftRightBy51(r3) - c4 := shiftRightBy51(r4) - - rr0 := r0.lo&maskLow51Bits + c4*19 - rr1 := r1.lo&maskLow51Bits + c0 - rr2 := r2.lo&maskLow51Bits + c1 - rr3 := r3.lo&maskLow51Bits + c2 - rr4 := r4.lo&maskLow51Bits + c3 - - *v = Element{rr0, rr1, rr2, rr3, rr4} - v.carryPropagate() -} - -// carryPropagate brings the limbs below 52 bits by applying the reduction -// identity (a * 2²âµâµ + b = a * 19 + b) to the l4 carry. TODO inline -func (v *Element) carryPropagateGeneric() *Element { - c0 := v.l0 >> 51 - c1 := v.l1 >> 51 - c2 := v.l2 >> 51 - c3 := v.l3 >> 51 - c4 := v.l4 >> 51 - - v.l0 = v.l0&maskLow51Bits + c4*19 - v.l1 = v.l1&maskLow51Bits + c0 - v.l2 = v.l2&maskLow51Bits + c1 - v.l3 = v.l3&maskLow51Bits + c2 - v.l4 = v.l4&maskLow51Bits + c3 - - return v -} diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint deleted file mode 100644 index e3685f95..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint +++ /dev/null @@ -1 +0,0 @@ -b0c49ae9f59d233526f8934262c5bbbe14d4358d diff --git a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh b/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh deleted file mode 100644 index 1ba22a8b..00000000 --- a/vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -euo pipefail - -cd "$(git rev-parse --show-toplevel)" - -STD_PATH=src/crypto/ed25519/internal/edwards25519/field -LOCAL_PATH=curve25519/internal/field -LAST_SYNC_REF=$(cat $LOCAL_PATH/sync.checkpoint) - -git fetch https://go.googlesource.com/go master - -if git diff --quiet $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH; then - echo "No changes." -else - NEW_REF=$(git rev-parse FETCH_HEAD | tee $LOCAL_PATH/sync.checkpoint) - echo "Applying changes from $LAST_SYNC_REF to $NEW_REF..." - git diff $LAST_SYNC_REF:$STD_PATH FETCH_HEAD:$STD_PATH | \ - git apply -3 --directory=$LOCAL_PATH -fi diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index 71ad917d..00000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// In Go 1.13, the ed25519 package was promoted to the standard library as -// crypto/ed25519, and this package became a wrapper for the standard library one. -// -//go:build !go1.13 -// +build !go1.13 - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519†function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seedâ€. -package ed25519 - -// This code is a port of the public domain, “ref10†implementation of ed25519 -// from SUPERCOP. - -import ( - "bytes" - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "errors" - "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -type PublicKey []byte - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:32]) - return seed -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptorand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[32:]) - - return publicKey, privateKey, nil -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - - digest := sha512.Sum512(seed) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - privateKey := make([]byte, PrivateKeySize) - copy(privateKey, seed) - copy(privateKey[32:], publicKeyBytes[:]) - - return privateKey -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var s [32]byte - copy(s[:], sig[32:]) - - // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in - // the range [0, order) in order to prevent signature malleability. - if !edwards25519.ScMinimal(&s) { - return false - } - - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) - - var checkR [32]byte - R.ToBytes(&checkR) - return bytes.Equal(sig[:32], checkR[:]) -} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go deleted file mode 100644 index b5974dc8..00000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519†function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seedâ€. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c..00000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10†implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index fd03c252..00000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "encoding/binary" - -// This code is a port of the public domain, “ref10†implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} - -// order is the order of Curve25519 in little-endian form. -var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} - -// ScMinimal returns true if the given scalar is less than the order of the -// curve. -func ScMinimal(scalar *[32]byte) bool { - for i := 3; ; i-- { - v := binary.LittleEndian.Uint64(scalar[i*8:]) - if v > order[i] { - return false - } else if v < order[i] { - break - } else if i == 0 { - return false - } - } - - return true -} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go deleted file mode 100644 index dda3f143..00000000 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation -// Function (HKDF) as defined in RFC 5869. -// -// HKDF is a cryptographic key derivation function (KDF) with the goal of -// expanding limited input keying material into one or more cryptographically -// strong secret keys. -package hkdf // import "golang.org/x/crypto/hkdf" - -import ( - "crypto/hmac" - "errors" - "hash" - "io" -) - -// Extract generates a pseudorandom key for use with Expand from an input secret -// and an optional independent salt. -// -// Only use this function if you need to reuse the extracted key with multiple -// Expand invocations and different context values. Most common scenarios, -// including the generation of multiple keys, should use New instead. -func Extract(hash func() hash.Hash, secret, salt []byte) []byte { - if salt == nil { - salt = make([]byte, hash().Size()) - } - extractor := hmac.New(hash, salt) - extractor.Write(secret) - return extractor.Sum(nil) -} - -type hkdf struct { - expander hash.Hash - size int - - info []byte - counter byte - - prev []byte - buf []byte -} - -func (f *hkdf) Read(p []byte) (int, error) { - // Check whether enough data can be generated - need := len(p) - remains := len(f.buf) + int(255-f.counter+1)*f.size - if remains < need { - return 0, errors.New("hkdf: entropy limit reached") - } - // Read any leftover from the buffer - n := copy(p, f.buf) - p = p[n:] - - // Fill the rest of the buffer - for len(p) > 0 { - f.expander.Reset() - f.expander.Write(f.prev) - f.expander.Write(f.info) - f.expander.Write([]byte{f.counter}) - f.prev = f.expander.Sum(f.prev[:0]) - f.counter++ - - // Copy the new batch into p - f.buf = f.prev - n = copy(p, f.buf) - p = p[n:] - } - // Save leftovers for next run - f.buf = f.buf[n:] - - return need, nil -} - -// Expand returns a Reader, from which keys can be read, using the given -// pseudorandom key and optional context info, skipping the extraction step. -// -// The pseudorandomKey should have been generated by Extract, or be a uniformly -// random or pseudorandom cryptographically strong key. See RFC 5869, Section -// 3.3. Most common scenarios will want to use New instead. -func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { - expander := hmac.New(hash, pseudorandomKey) - return &hkdf{expander, expander.Size(), info, 1, nil, nil} -} - -// New returns a Reader, from which keys can be read, using the given hash, -// secret, salt and context info. Salt and info can be nil. -func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { - prk := Extract(hash, secret, salt) - return Expand(hash, prk, info) -} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go deleted file mode 100644 index 4fad24f8..00000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego -// +build !purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go deleted file mode 100644 index 80ccbed2..00000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go deleted file mode 100644 index 9d3fffa8..00000000 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ /dev/null @@ -1,789 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses -// are signed messages attesting to the validity of a certificate for a small -// period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "math/big" - "strconv" - "time" -) - -var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) - -// ResponseStatus contains the result of an OCSP request. See -// https://tools.ietf.org/html/rfc6960#section-2.3 -type ResponseStatus int - -const ( - Success ResponseStatus = 0 - Malformed ResponseStatus = 1 - InternalError ResponseStatus = 2 - TryLater ResponseStatus = 3 - // Status code four is unused in OCSP. See - // https://tools.ietf.org/html/rfc6960#section-4.2.1 - SignatureRequired ResponseStatus = 5 - Unauthorized ResponseStatus = 6 -) - -func (r ResponseStatus) String() string { - switch r { - case Success: - return "success" - case Malformed: - return "malformed" - case InternalError: - return "internal error" - case TryLater: - return "try later" - case SignatureRequired: - return "signature required" - case Unauthorized: - return "unauthorized" - default: - return "unknown OCSP status: " + strconv.Itoa(int(r)) - } -} - -// ResponseError is an error that may be returned by ParseResponse to indicate -// that the response itself is an error, not just that it's indicating that a -// certificate is revoked, unknown, etc. -type ResponseError struct { - Status ResponseStatus -} - -func (r ResponseError) Error() string { - return "ocsp: error from server: " + r.Status.String() -} - -// These are internal structures that reflect the ASN.1 structure of an OCSP -// response. See RFC 2560, section 4.2. - -type certID struct { - HashAlgorithm pkix.AlgorithmIdentifier - NameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// https://tools.ietf.org/html/rfc2560#section-4.1.1 -type ocspRequest struct { - TBSRequest tbsRequest -} - -type tbsRequest struct { - Version int `asn1:"explicit,tag:0,default:0,optional"` - RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` - RequestList []request -} - -type request struct { - Cert certID -} - -type responseASN1 struct { - Status asn1.Enumerated - Response responseBytes `asn1:"explicit,tag:0,optional"` -} - -type responseBytes struct { - ResponseType asn1.ObjectIdentifier - Response []byte -} - -type basicResponse struct { - TBSResponseData responseData - SignatureAlgorithm pkix.AlgorithmIdentifier - Signature asn1.BitString - Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` -} - -type responseData struct { - Raw asn1.RawContent - Version int `asn1:"optional,default:0,explicit,tag:0"` - RawResponderID asn1.RawValue - ProducedAt time.Time `asn1:"generalized"` - Responses []singleResponse -} - -type singleResponse struct { - CertID certID - Good asn1.Flag `asn1:"tag:0,optional"` - Revoked revokedInfo `asn1:"tag:1,optional"` - Unknown asn1.Flag `asn1:"tag:2,optional"` - ThisUpdate time.Time `asn1:"generalized"` - NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` - SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` -} - -type revokedInfo struct { - RevocationTime time.Time `asn1:"generalized"` - Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` -} - -var ( - oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} - oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} - oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} - oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} - oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} - oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} - oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} - oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} - oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} - oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} - oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} - oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} -) - -var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ - crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), - crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), - crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), - crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -var signatureAlgorithmDetails = []struct { - algo x509.SignatureAlgorithm - oid asn1.ObjectIdentifier - pubKeyAlgo x509.PublicKeyAlgorithm - hash crypto.Hash -}{ - {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, - {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, - {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, - {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, - {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, - {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, - {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, - {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, - {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, - {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, - {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, - {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, -} - -// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below -func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { - var pubType x509.PublicKeyAlgorithm - - switch pub := pub.(type) { - case *rsa.PublicKey: - pubType = x509.RSA - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureSHA256WithRSA - sigAlgo.Parameters = asn1.RawValue{ - Tag: 5, - } - - case *ecdsa.PublicKey: - pubType = x509.ECDSA - - switch pub.Curve { - case elliptic.P224(), elliptic.P256(): - hashFunc = crypto.SHA256 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 - case elliptic.P384(): - hashFunc = crypto.SHA384 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 - case elliptic.P521(): - hashFunc = crypto.SHA512 - sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 - default: - err = errors.New("x509: unknown elliptic curve") - } - - default: - err = errors.New("x509: only RSA and ECDSA keys supported") - } - - if err != nil { - return - } - - if requestedSigAlgo == 0 { - return - } - - found := false - for _, details := range signatureAlgorithmDetails { - if details.algo == requestedSigAlgo { - if details.pubKeyAlgo != pubType { - err = errors.New("x509: requested SignatureAlgorithm does not match private key type") - return - } - sigAlgo.Algorithm, hashFunc = details.oid, details.hash - if hashFunc == 0 { - err = errors.New("x509: cannot sign with hash function requested") - return - } - found = true - break - } - } - - if !found { - err = errors.New("x509: unknown SignatureAlgorithm") - } - - return -} - -// TODO(agl): this is taken from crypto/x509 and so should probably be exported -// from crypto/x509 or crypto/x509/pkix. -func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { - for _, details := range signatureAlgorithmDetails { - if oid.Equal(details.oid) { - return details.algo - } - } - return x509.UnknownSignatureAlgorithm -} - -// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. -func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { - for hash, oid := range hashOIDs { - if oid.Equal(target) { - return hash - } - } - return crypto.Hash(0) -} - -func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { - for hash, oid := range hashOIDs { - if hash == target { - return oid - } - } - return nil -} - -// This is the exposed reflection of the internal OCSP structures. - -// The status values that can be expressed in OCSP. See RFC 6960. -const ( - // Good means that the certificate is valid. - Good = iota - // Revoked means that the certificate has been deliberately revoked. - Revoked - // Unknown means that the OCSP responder doesn't know about the certificate. - Unknown - // ServerFailed is unused and was never used (see - // https://go-review.googlesource.com/#/c/18944). ParseResponse will - // return a ResponseError when an error response is parsed. - ServerFailed -) - -// The enumerated reasons for revoking a certificate. See RFC 5280. -const ( - Unspecified = 0 - KeyCompromise = 1 - CACompromise = 2 - AffiliationChanged = 3 - Superseded = 4 - CessationOfOperation = 5 - CertificateHold = 6 - - RemoveFromCRL = 8 - PrivilegeWithdrawn = 9 - AACompromise = 10 -) - -// Request represents an OCSP request. See RFC 6960. -type Request struct { - HashAlgorithm crypto.Hash - IssuerNameHash []byte - IssuerKeyHash []byte - SerialNumber *big.Int -} - -// Marshal marshals the OCSP request to ASN.1 DER encoded form. -func (req *Request) Marshal() ([]byte, error) { - hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm) - if hashAlg == nil { - return nil, errors.New("Unknown hash algorithm") - } - return asn1.Marshal(ocspRequest{ - tbsRequest{ - Version: 0, - RequestList: []request{ - { - Cert: certID{ - pkix.AlgorithmIdentifier{ - Algorithm: hashAlg, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - req.IssuerNameHash, - req.IssuerKeyHash, - req.SerialNumber, - }, - }, - }, - }, - }) -} - -// Response represents an OCSP response containing a single SingleResponse. See -// RFC 6960. -type Response struct { - // Status is one of {Good, Revoked, Unknown} - Status int - SerialNumber *big.Int - ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time - RevocationReason int - Certificate *x509.Certificate - // TBSResponseData contains the raw bytes of the signed response. If - // Certificate is nil then this can be used to verify Signature. - TBSResponseData []byte - Signature []byte - SignatureAlgorithm x509.SignatureAlgorithm - - // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash. - // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512. - // If zero, the default is crypto.SHA1. - IssuerHash crypto.Hash - - // RawResponderName optionally contains the DER-encoded subject of the - // responder certificate. Exactly one of RawResponderName and - // ResponderKeyHash is set. - RawResponderName []byte - // ResponderKeyHash optionally contains the SHA-1 hash of the - // responder's public key. Exactly one of RawResponderName and - // ResponderKeyHash is set. - ResponderKeyHash []byte - - // Extensions contains raw X.509 extensions from the singleExtensions field - // of the OCSP response. When parsing certificates, this can be used to - // extract non-critical extensions that are not parsed by this package. When - // marshaling OCSP responses, the Extensions field is ignored, see - // ExtraExtensions. - Extensions []pkix.Extension - - // ExtraExtensions contains extensions to be copied, raw, into any marshaled - // OCSP response (in the singleExtensions field). Values override any - // extensions that would otherwise be produced based on the other fields. The - // ExtraExtensions field is not populated when parsing certificates, see - // Extensions. - ExtraExtensions []pkix.Extension -} - -// These are pre-serialized error responses for the various non-success codes -// defined by OCSP. The Unauthorized code in particular can be used by an OCSP -// responder that supports only pre-signed responses as a response to requests -// for certificates with unknown status. See RFC 5019. -var ( - MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} - InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} - TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} - SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} - UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} -) - -// CheckSignatureFrom checks that the signature in resp is a valid signature -// from issuer. This should only be used if resp.Certificate is nil. Otherwise, -// the OCSP response contained an intermediate certificate that created the -// signature. That signature is checked by ParseResponse and only -// resp.Certificate remains to be validated. -func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { - return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) -} - -// ParseError results from an invalid OCSP response. -type ParseError string - -func (p ParseError) Error() string { - return string(p) -} - -// ParseRequest parses an OCSP request in DER form. It only supports -// requests for a single certificate. Signed requests are not supported. -// If a request includes a signature, it will result in a ParseError. -func ParseRequest(bytes []byte) (*Request, error) { - var req ocspRequest - rest, err := asn1.Unmarshal(bytes, &req) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP request") - } - - if len(req.TBSRequest.RequestList) == 0 { - return nil, ParseError("OCSP request contains no request body") - } - innerRequest := req.TBSRequest.RequestList[0] - - hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) - if hashFunc == crypto.Hash(0) { - return nil, ParseError("OCSP request uses unknown hash function") - } - - return &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: innerRequest.Cert.NameHash, - IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, - SerialNumber: innerRequest.Cert.SerialNumber, - }, nil -} - -// ParseResponse parses an OCSP response in DER form. The response must contain -// only one certificate status. To parse the status of a specific certificate -// from a response which may contain multiple statuses, use ParseResponseForCert -// instead. -// -// If the response contains an embedded certificate, then that certificate will -// be used to verify the response signature. If the response contains an -// embedded certificate and issuer is not nil, then issuer will be used to verify -// the signature on the embedded certificate. -// -// If the response does not contain an embedded certificate and issuer is not -// nil, then issuer will be used to verify the response signature. -// -// Invalid responses and parse failures will result in a ParseError. -// Error responses will result in a ResponseError. -func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { - return ParseResponseForCert(bytes, nil, issuer) -} - -// ParseResponseForCert acts identically to ParseResponse, except it supports -// parsing responses that contain multiple statuses. If the response contains -// multiple statuses and cert is not nil, then ParseResponseForCert will return -// the first status which contains a matching serial, otherwise it will return an -// error. If cert is nil, then the first status in the response will be returned. -func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) { - var resp responseASN1 - rest, err := asn1.Unmarshal(bytes, &resp) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP response") - } - - if status := ResponseStatus(resp.Status); status != Success { - return nil, ResponseError{status} - } - - if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { - return nil, ParseError("bad OCSP response type") - } - - var basicResp basicResponse - rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) - if err != nil { - return nil, err - } - if len(rest) > 0 { - return nil, ParseError("trailing data in OCSP response") - } - - if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 { - return nil, ParseError("OCSP response contains bad number of responses") - } - - var singleResp singleResponse - if cert == nil { - singleResp = basicResp.TBSResponseData.Responses[0] - } else { - match := false - for _, resp := range basicResp.TBSResponseData.Responses { - if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 { - singleResp = resp - match = true - break - } - } - if !match { - return nil, ParseError("no response matching the supplied certificate") - } - } - - ret := &Response{ - TBSResponseData: basicResp.TBSResponseData.Raw, - Signature: basicResp.Signature.RightAlign(), - SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), - Extensions: singleResp.SingleExtensions, - SerialNumber: singleResp.CertID.SerialNumber, - ProducedAt: basicResp.TBSResponseData.ProducedAt, - ThisUpdate: singleResp.ThisUpdate, - NextUpdate: singleResp.NextUpdate, - } - - // Handle the ResponderID CHOICE tag. ResponderID can be flattened into - // TBSResponseData once https://go-review.googlesource.com/34503 has been - // released. - rawResponderID := basicResp.TBSResponseData.RawResponderID - switch rawResponderID.Tag { - case 1: // Name - var rdn pkix.RDNSequence - if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 { - return nil, ParseError("invalid responder name") - } - ret.RawResponderName = rawResponderID.Bytes - case 2: // KeyHash - if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 { - return nil, ParseError("invalid responder key hash") - } - default: - return nil, ParseError("invalid responder id tag") - } - - if len(basicResp.Certificates) > 0 { - // Responders should only send a single certificate (if they - // send any) that connects the responder's certificate to the - // original issuer. We accept responses with multiple - // certificates due to a number responders sending them[1], but - // ignore all but the first. - // - // [1] https://github.com/golang/go/issues/21527 - ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) - if err != nil { - return nil, err - } - - if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { - return nil, ParseError("bad signature on embedded certificate: " + err.Error()) - } - - if issuer != nil { - if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { - return nil, ParseError("bad OCSP signature: " + err.Error()) - } - } - } else if issuer != nil { - if err := ret.CheckSignatureFrom(issuer); err != nil { - return nil, ParseError("bad OCSP signature: " + err.Error()) - } - } - - for _, ext := range singleResp.SingleExtensions { - if ext.Critical { - return nil, ParseError("unsupported critical extension") - } - } - - for h, oid := range hashOIDs { - if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) { - ret.IssuerHash = h - break - } - } - if ret.IssuerHash == 0 { - return nil, ParseError("unsupported issuer hash algorithm") - } - - switch { - case bool(singleResp.Good): - ret.Status = Good - case bool(singleResp.Unknown): - ret.Status = Unknown - default: - ret.Status = Revoked - ret.RevokedAt = singleResp.Revoked.RevocationTime - ret.RevocationReason = int(singleResp.Revoked.Reason) - } - - return ret, nil -} - -// RequestOptions contains options for constructing OCSP requests. -type RequestOptions struct { - // Hash contains the hash function that should be used when - // constructing the OCSP request. If zero, SHA-1 will be used. - Hash crypto.Hash -} - -func (opts *RequestOptions) hash() crypto.Hash { - if opts == nil || opts.Hash == 0 { - // SHA-1 is nearly universally used in OCSP. - return crypto.SHA1 - } - return opts.Hash -} - -// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If -// opts is nil then sensible defaults are used. -func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { - hashFunc := opts.hash() - - // OCSP seems to be the only place where these raw hash identifiers are - // used. I took the following from - // http://msdn.microsoft.com/en-us/library/ff635603.aspx - _, ok := hashOIDs[hashFunc] - if !ok { - return nil, x509.ErrUnsupportedAlgorithm - } - - if !hashFunc.Available() { - return nil, x509.ErrUnsupportedAlgorithm - } - h := opts.hash().New() - - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - req := &Request{ - HashAlgorithm: hashFunc, - IssuerNameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: cert.SerialNumber, - } - return req.Marshal() -} - -// CreateResponse returns a DER-encoded OCSP response with the specified contents. -// The fields in the response are populated as follows: -// -// The responder cert is used to populate the responder's name field, and the -// certificate itself is provided alongside the OCSP response signature. -// -// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. -// -// The template is used to populate the SerialNumber, Status, RevokedAt, -// RevocationReason, ThisUpdate, and NextUpdate fields. -// -// If template.IssuerHash is not set, SHA1 will be used. -// -// The ProducedAt date is automatically set to the current date, to the nearest minute. -func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { - var publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { - return nil, err - } - - if template.IssuerHash == 0 { - template.IssuerHash = crypto.SHA1 - } - hashOID := getOIDFromHashAlgorithm(template.IssuerHash) - if hashOID == nil { - return nil, errors.New("unsupported issuer hash algorithm") - } - - if !template.IssuerHash.Available() { - return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash) - } - h := template.IssuerHash.New() - h.Write(publicKeyInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(issuer.RawSubject) - issuerNameHash := h.Sum(nil) - - innerResponse := singleResponse{ - CertID: certID{ - HashAlgorithm: pkix.AlgorithmIdentifier{ - Algorithm: hashOID, - Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, - }, - NameHash: issuerNameHash, - IssuerKeyHash: issuerKeyHash, - SerialNumber: template.SerialNumber, - }, - ThisUpdate: template.ThisUpdate.UTC(), - NextUpdate: template.NextUpdate.UTC(), - SingleExtensions: template.ExtraExtensions, - } - - switch template.Status { - case Good: - innerResponse.Good = true - case Unknown: - innerResponse.Unknown = true - case Revoked: - innerResponse.Revoked = revokedInfo{ - RevocationTime: template.RevokedAt.UTC(), - Reason: asn1.Enumerated(template.RevocationReason), - } - } - - rawResponderID := asn1.RawValue{ - Class: 2, // context-specific - Tag: 1, // Name (explicit tag) - IsCompound: true, - Bytes: responderCert.RawSubject, - } - tbsResponseData := responseData{ - Version: 0, - RawResponderID: rawResponderID, - ProducedAt: time.Now().Truncate(time.Minute).UTC(), - Responses: []singleResponse{innerResponse}, - } - - tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) - if err != nil { - return nil, err - } - - hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) - if err != nil { - return nil, err - } - - responseHash := hashFunc.New() - responseHash.Write(tbsResponseDataDER) - signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) - if err != nil { - return nil, err - } - - response := basicResponse{ - TBSResponseData: tbsResponseData, - SignatureAlgorithm: signatureAlgorithm, - Signature: asn1.BitString{ - Bytes: signature, - BitLength: 8 * len(signature), - }, - } - if template.Certificate != nil { - response.Certificates = []asn1.RawValue{ - {FullBytes: template.Certificate.Raw}, - } - } - responseDER, err := asn1.Marshal(response) - if err != nil { - return nil, err - } - - return asn1.Marshal(responseASN1{ - Status: asn1.Enumerated(Success), - Response: responseBytes{ - ResponseType: idPKIXOCSPBasic, - Response: responseDER, - }, - }) -} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 593f6530..00000000 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := prf.Size() - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := 1; block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[len(dk)-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := 2; n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go deleted file mode 100644 index 45b5c966..00000000 --- a/vendor/golang.org/x/crypto/poly1305/bits_compat.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go deleted file mode 100644 index ed52b341..00000000 --- a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go deleted file mode 100644 index f184b67d..00000000 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego -// +build !amd64,!ppc64le,!s390x !gc purego - -package poly1305 - -type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go deleted file mode 100644 index 9d7a6af0..00000000 --- a/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package poly1305 implements Poly1305 one-time message authentication code as -// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. -// -// Poly1305 is a fast, one-time authentication function. It is infeasible for an -// attacker to generate an authenticator for a message without the key. However, a -// key must only be used for a single message. Authenticating two different -// messages with the same key allows an attacker to forge authenticators for other -// messages with the same key. -// -// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -// used with a fixed key in order to generate one-time keys from an nonce. -// However, in this package AES isn't used and the one-time key is specified -// directly. -package poly1305 // import "golang.org/x/crypto/poly1305" - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Sum generates an authenticator for msg using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := New(key) - h.Write(m) - h.Sum(out[:0]) -} - -// Verify returns true if mac is a valid authenticator for m with the given key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} - -// New returns a new MAC computing an authentication -// tag of all data written to it with the given key. -// This allows writing the message progressively instead -// of passing it as a single slice. Common users should use -// the Sum function instead. -// -// The key must be unique for each message, as authenticating -// two different messages with the same key allows an attacker -// to forge messages at will. -func New(key *[32]byte) *MAC { - m := &MAC{} - initialize(key, &m.macState) - return m -} - -// MAC is an io.Writer computing an authentication tag -// of the data written to it. -// -// MAC cannot be used like common hash.Hash implementations, -// because using a poly1305 key twice breaks its security. -// Therefore writing data to a running MAC after calling -// Sum or Verify causes it to panic. -type MAC struct { - mac // platform-dependent implementation - - finalized bool -} - -// Size returns the number of bytes Sum will return. -func (h *MAC) Size() int { return TagSize } - -// Write adds more data to the running message authentication code. -// It never returns an error. -// -// It must not be called after the first call of Sum or Verify. -func (h *MAC) Write(p []byte) (n int, err error) { - if h.finalized { - panic("poly1305: write to MAC after Sum or Verify") - } - return h.mac.Write(p) -} - -// Sum computes the authenticator of all data written to the -// message authentication code. -func (h *MAC) Sum(b []byte) []byte { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return append(b, mac[:]...) -} - -// Verify returns whether the authenticator of all data written to -// the message authentication code matches the expected value. -func (h *MAC) Verify(expected []byte) bool { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return subtle.ConstantTimeCompare(expected, mac[:]) == 1 -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go deleted file mode 100644 index 6d522333..00000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s deleted file mode 100644 index 1d74f0f8..00000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVQ state+0(FP), DI - MOVQ msg_base+8(FP), SI - MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 - JB bytes_between_0_and_15 - -loop: - POLY1305_ADD(SI, R8, R9, R10) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop - -bytes_between_0_and_15: - TESTQ R15, R15 - JZ done - MOVQ $1, BX - XORQ CX, CX - XORQ R13, R13 - ADDQ R15, SI - -flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX - MOVB -1(SI), R13 - XORQ R13, BX - DECQ SI - DECQ R15 - JNZ flush_buffer - - ADDQ BX, R8 - ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 - JMP multiply - -done: - MOVQ R8, 0(DI) - MOVQ R9, 8(DI) - MOVQ R10, 16(DI) - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go deleted file mode 100644 index c942a659..00000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides the generic implementation of Sum and MAC. Other files -// might provide optimized assembly implementations of some of this code. - -package poly1305 - -import "encoding/binary" - -// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag -// for a 64 bytes message is approximately -// -// s + m[0:16] * râ´ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³Ⱐ- 5 -// -// for some secret r and s. It can be computed sequentially like -// -// for len(msg) > 0: -// h += read(msg, 16) -// h *= r -// h %= 2¹³Ⱐ- 5 -// return h + s -// -// All the complexity is about doing performant constant-time math on numbers -// larger than any available numeric type. - -func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMACGeneric(key) - h.Write(msg) - h.Sum(out) -} - -func newMACGeneric(key *[32]byte) macGeneric { - m := macGeneric{} - initialize(key, &m.macState) - return m -} - -// macState holds numbers in saturated 64-bit little-endian limbs. That is, -// the value of [x0, x1, x2] is x[0] + x[1] * 2â¶â´ + x[2] * 2¹²â¸. -type macState struct { - // h is the main accumulator. It is to be interpreted modulo 2¹³Ⱐ- 5, but - // can grow larger during and after rounds. It must, however, remain below - // 2 * (2¹³Ⱐ- 5). - h [3]uint64 - // r and s are the private key components. - r [2]uint64 - s [2]uint64 -} - -type macGeneric struct { - macState - - buffer [TagSize]byte - offset int -} - -// Write splits the incoming message into TagSize chunks, and passes them to -// update. It buffers incomplete chunks. -func (h *macGeneric) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - updateGeneric(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - updateGeneric(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -// Sum flushes the last incomplete chunk from the buffer, if any, and generates -// the MAC output. It does not modify its state, in order to allow for multiple -// calls to Sum, even if no Write is allowed after Sum. -func (h *macGeneric) Sum(out *[TagSize]byte) { - state := h.macState - if h.offset > 0 { - updateGeneric(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} - -// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It -// clears some bits of the secret coefficient to make it possible to implement -// multiplication more efficiently. -const ( - rMask0 = 0x0FFFFFFC0FFFFFFF - rMask1 = 0x0FFFFFFC0FFFFFFC -) - -// initialize loads the 256-bit key into the two 128-bit secret values r and s. -func initialize(key *[32]byte, m *macState) { - m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 - m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 - m.s[0] = binary.LittleEndian.Uint64(key[16:24]) - m.s[1] = binary.LittleEndian.Uint64(key[24:32]) -} - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) - return uint128{lo, hi} -} - -func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) - if c != 0 { - panic("poly1305: unexpected overflow") - } - return uint128{lo, hi} -} - -func shiftRightBy2(a uint128) uint128 { - a.lo = a.lo>>2 | (a.hi&3)<<62 - a.hi = a.hi >> 2 - return a -} - -// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of -// 128 bits of message, it computes -// -// hâ‚Š = (h + m) * r mod 2¹³Ⱐ- 5 -// -// If the msg length is not a multiple of TagSize, it assumes the last -// incomplete chunk is the final one. -func updateGeneric(state *macState, msg []byte) { - h0, h1, h2 := state.h[0], state.h[1], state.h[2] - r0, r1 := state.r[0], state.r[1] - - for len(msg) > 0 { - var c uint64 - - // For the first step, h + m, we use a chain of bits.Add64 intrinsics. - // The resulting value of h might exceed 2¹³Ⱐ- 5, but will be partially - // reduced at the end of the multiplication below. - // - // The spec requires us to set a bit just above the message size, not to - // hide leading zeroes. For full chunks, that's 1 << 128, so we can just - // add 1 to the most significant (2¹²â¸) limb, h2. - if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) - h2 += c + 1 - - msg = msg[TagSize:] - } else { - var buf [TagSize]byte - copy(buf[:], msg) - buf[len(msg)] = 1 - - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) - h2 += c - - msg = nil - } - - // Multiplication of big number limbs is similar to elementary school - // columnar multiplication. Instead of digits, there are 64-bit limbs. - // - // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. - // - // h2 h1 h0 x - // r1 r0 = - // ---------------- - // h2r0 h1r0 h0r0 <-- individual 128-bit products - // + h2r1 h1r1 h0r1 - // ------------------------ - // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs - // ------------------------ - // m3.hi m2.hi m1.hi m0.hi <-- carry propagation - // + m3.lo m2.lo m1.lo m0.lo - // ------------------------------- - // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs - // - // The main difference from pen-and-paper multiplication is that we do - // carry propagation in a separate step, as if we wrote two digit sums - // at first (the 128-bit limbs), and then carried the tens all at once. - - h0r0 := mul64(h0, r0) - h1r0 := mul64(h1, r0) - h2r0 := mul64(h2, r0) - h0r1 := mul64(h0, r1) - h1r1 := mul64(h1, r1) - h2r1 := mul64(h2, r1) - - // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their - // top 4 bits cleared by rMask{0,1}, we know that their product is not going - // to overflow 64 bits, so we can ignore the high part of the products. - // - // This also means that the product doesn't have a fifth limb (t4). - if h2r0.hi != 0 { - panic("poly1305: unexpected overflow") - } - if h2r1.hi != 0 { - panic("poly1305: unexpected overflow") - } - - m0 := h0r0 - m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again - m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. - m3 := h2r1 - - t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) - - // Now we have the result as 4 64-bit limbs, and we need to reduce it - // modulo 2¹³Ⱐ- 5. The special shape of this Crandall prime lets us do - // a cheap partial reduction according to the reduction identity - // - // c * 2¹³Ⱐ+ n = c * 5 + n mod 2¹³Ⱐ- 5 - // - // because 2¹³Ⱐ= 5 mod 2¹³Ⱐ- 5. Partial reduction since the result is - // likely to be larger than 2¹³Ⱐ- 5, but still small enough to fit the - // assumptions we make about h in the rest of the code. - // - // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 - - // We split the final result at the 2¹³Ⱐmark into h and cc, the carry. - // Note that the carry bits are effectively shifted left by 2, in other - // words, cc = c * 4 for the c in the reduction identity. - h0, h1, h2 = t0, t1, t2&maskLow2Bits - cc := uint128{t2 & maskNotLow2Bits, t3} - - // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - cc = shiftRightBy2(cc) - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most - // - // 5 * 2¹²⸠+ (2¹²⸠- 1) = 6 * 2¹²⸠- 1 - } - - state.h[0], state.h[1], state.h[2] = h0, h1, h2 -} - -const ( - maskLow2Bits uint64 = 0x0000000000000003 - maskNotLow2Bits uint64 = ^maskLow2Bits -) - -// select64 returns x if v == 1 and y if v == 0, in constant time. -func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } - -// [p0, p1, p2] is 2¹³Ⱐ- 5 in little endian order. -const ( - p0 = 0xFFFFFFFFFFFFFFFB - p1 = 0xFFFFFFFFFFFFFFFF - p2 = 0x0000000000000003 -) - -// finalize completes the modular reduction of h and computes -// -// out = h + s mod 2¹²⸠-// -func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { - h0, h1, h2 := h[0], h[1], h[2] - - // After the partial reduction in updateGeneric, h might be more than - // 2¹³Ⱐ- 5, but will be less than 2 * (2¹³Ⱐ- 5). To complete the reduction - // in constant time, we compute t = h - (2¹³Ⱐ- 5), and select h as the - // result if the subtraction underflows, and t otherwise. - - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) - - // h = h if h < p else h - p - h0 = select64(b, h0, hMinusP0) - h1 = select64(b, h1, hMinusP1) - - // Finally, we compute the last Poly1305 step - // - // tag = h + s mod 2¹²⸠- // - // by just doing a wide addition with the 128 low bits of h and discarding - // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) - - binary.LittleEndian.PutUint64(out[0:8], h0) - binary.LittleEndian.PutUint64(out[8:16], h1) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go deleted file mode 100644 index 4a069941..00000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s deleted file mode 100644 index 58422aad..00000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -// This was ported from the amd64 implementation. - -#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ - MOVD $1, t2; \ - ADDC t0, h0, h0; \ - ADDE t1, h1, h1; \ - ADDE t2, h2; \ - ADD $16, msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ - MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ - MULHDU r0, h0, t1; \ - MULHDU r0, h1, t5; \ - ADDC t4, t1, t1; \ - MULLD r0, h2, t2; \ - ADDZE t5; \ - MULHDU r1, h0, t4; \ - MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ - ADDC h0, t1, t1; \ - MULLD h2, r1, t3; \ - ADDZE t4, h0; \ - MULHDU r1, h1, t5; \ - MULLD r1, h1, t4; \ - ADDC t4, t2, t2; \ - ADDE t5, t3, t3; \ - ADDC h0, t2, t2; \ - MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ - ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ - ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ - SLD $62, t3, t4; \ - SRD $2, t2; \ - ADDZE h2; \ - OR t4, t2, t2; \ - SRD $2, t3; \ - ADDC t2, h0, h0; \ - ADDE t3, h1, h1; \ - ADDZE h2 - -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVD state+0(FP), R3 - MOVD msg_base+8(FP), R4 - MOVD msg_len+16(FP), R5 - - MOVD 0(R3), R8 // h0 - MOVD 8(R3), R9 // h1 - MOVD 16(R3), R10 // h2 - MOVD 24(R3), R11 // r0 - MOVD 32(R3), R12 // r1 - - CMP R5, $16 - BLT bytes_between_0_and_15 - -loop: - POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) - ADD $-16, R5 - CMP R5, $16 - BGE loop - -bytes_between_0_and_15: - CMP R5, $0 - BEQ done - MOVD $0, R16 // h0 - MOVD $0, R17 // h1 - -flush_buffer: - CMP R5, $8 - BLE just1 - - MOVD $8, R21 - SUB R21, R5, R21 - - // Greater than 8 -- load the rightmost remaining bytes in msg - // and put into R17 (h1) - MOVD (R4)(R21), R17 - MOVD $16, R22 - - // Find the offset to those bytes - SUB R5, R22, R22 - SLD $3, R22 - - // Shift to get only the bytes in msg - SRD R22, R17, R17 - - // Put 1 at high end - MOVD $1, R23 - SLD $3, R21 - SLD R21, R23, R23 - OR R23, R17, R17 - - // Remainder is 8 - MOVD $8, R5 - -just1: - CMP R5, $8 - BLT less8 - - // Exactly 8 - MOVD (R4), R16 - - CMP R17, $0 - - // Check if we've already set R17; if not - // set 1 to indicate end of msg. - BNE carry - MOVD $1, R17 - BR carry - -less8: - MOVD $0, R16 // h0 - MOVD $0, R22 // shift count - CMP R5, $4 - BLT less4 - MOVWZ (R4), R16 - ADD $4, R4 - ADD $-4, R5 - MOVD $32, R22 - -less4: - CMP R5, $2 - BLT less2 - MOVHZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $16, R22 - ADD $-2, R5 - ADD $2, R4 - -less2: - CMP R5, $0 - BEQ insert1 - MOVBZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $8, R22 - -insert1: - // Insert 1 at end of msg - MOVD $1, R21 - SLD R22, R21, R21 - OR R16, R21, R16 - -carry: - // Add new values to h0, h1, h2 - ADDC R16, R8 - ADDE R17, R9 - ADDZE R10, R10 - MOVD $16, R5 - ADD R5, R4 - BR multiply - -done: - // Save h0, h1, h2 in state - MOVD R8, 0(R3) - MOVD R9, 8(R3) - MOVD R10, 16(R3) - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go deleted file mode 100644 index 62cc9f84..00000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -import ( - "golang.org/x/sys/cpu" -) - -// updateVX is an assembly implementation of Poly1305 that uses vector -// instructions. It must only be called if the vector facility (vx) is -// available. -//go:noescape -func updateVX(state *macState, msg []byte) - -// mac is a replacement for macGeneric that uses a larger buffer and redirects -// calls that would have gone to updateGeneric to updateVX if the vector -// facility is installed. -// -// A larger buffer is required for good performance because the vector -// implementation has a higher fixed cost per call than the generic -// implementation. -type mac struct { - macState - - buffer [16 * TagSize]byte // size must be a multiple of block size (16) - offset int -} - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < len(h.buffer) { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - if cpu.S390X.HasVX { - updateVX(&h.macState, h.buffer[:]) - } else { - updateGeneric(&h.macState, h.buffer[:]) - } - } - - tail := len(p) % len(h.buffer) // number of bytes to copy into buffer - body := len(p) - tail // number of bytes to process now - if body > 0 { - if cpu.S390X.HasVX { - updateVX(&h.macState, p[:body]) - } else { - updateGeneric(&h.macState, p[:body]) - } - } - h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 - return nn, nil -} - -func (h *mac) Sum(out *[TagSize]byte) { - state := h.macState - remainder := h.buffer[:h.offset] - - // Use the generic implementation if we have 2 or fewer blocks left - // to sum. The vector implementation has a higher startup time. - if cpu.S390X.HasVX && len(remainder) > 2*TagSize { - updateVX(&state, remainder) - } else if len(remainder) > 0 { - updateGeneric(&state, remainder) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s deleted file mode 100644 index 69c64f84..00000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -#include "textflag.h" - -// This implementation of Poly1305 uses the vector facility (vx) -// to process up to 2 blocks (32 bytes) per iteration using an -// algorithm based on the one described in: -// -// NEON crypto, Daniel J. Bernstein & Peter Schwabe -// https://cryptojedi.org/papers/neoncrypto-20120320.pdf -// -// This algorithm uses 5 26-bit limbs to represent a 130-bit -// value. These limbs are, for the most part, zero extended and -// placed into 64-bit vector register elements. Each vector -// register is 128-bits wide and so holds 2 of these elements. -// Using 26-bit limbs allows us plenty of headroom to accomodate -// accumulations before and after multiplication without -// overflowing either 32-bits (before multiplication) or 64-bits -// (after multiplication). -// -// In order to parallelise the operations required to calculate -// the sum we use two separate accumulators and then sum those -// in an extra final step. For compatibility with the generic -// implementation we perform this summation at the end of every -// updateVX call. -// -// To use two accumulators we must multiply the message blocks -// by r² rather than r. Only the final message block should be -// multiplied by r. -// -// Example: -// -// We want to calculate the sum (h) for a 64 byte message (m): -// -// h = m[0:16]râ´ + m[16:32]r³ + m[32:48]r² + m[48:64]r -// -// To do this we split the calculation into the even indices -// and odd indices of the message. These form our SIMD 'lanes': -// -// h = m[ 0:16]râ´ + m[32:48]r² + <- lane 0 -// m[16:32]r³ + m[48:64]r <- lane 1 -// -// To calculate this iteratively we refactor so that both lanes -// are written in terms of r² and r: -// -// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 -// (m[16:32]r² + m[48:64])r <- lane 1 -// ^ ^ -// | coefficients for second iteration -// coefficients for first iteration -// -// So in this case we would have two iterations. In the first -// both lanes are multiplied by r². In the second only the -// first lane is multiplied by r² and the second lane is -// instead multiplied by r. This gives use the odd and even -// powers of r that we need from the original equation. -// -// Notation: -// -// h - accumulator -// r - key -// m - message -// -// [a, b] - SIMD register holding two 64-bit values -// [a, b, c, d] - SIMD register holding four 32-bit values -// xáµ¢[n] - limb n of variable x with bit width i -// -// Limbs are expressed in little endian order, so for 26-bit -// limbs x₂₆[4] will be the most significant limb and x₂₆[0] -// will be the least significant limb. - -// masking constants -#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits -#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits - -// expansion constants (see EXPAND macro) -#define EX0 V2 -#define EX1 V3 -#define EX2 V4 - -// key (r², r or 1 depending on context) -#define R_0 V5 -#define R_1 V6 -#define R_2 V7 -#define R_3 V8 -#define R_4 V9 - -// precalculated coefficients (5r², 5r or 0 depending on context) -#define R5_1 V10 -#define R5_2 V11 -#define R5_3 V12 -#define R5_4 V13 - -// message block (m) -#define M_0 V14 -#define M_1 V15 -#define M_2 V16 -#define M_3 V17 -#define M_4 V18 - -// accumulator (h) -#define H_0 V19 -#define H_1 V20 -#define H_2 V21 -#define H_3 V22 -#define H_4 V23 - -// temporary registers (for short-lived values) -#define T_0 V24 -#define T_1 V25 -#define T_2 V26 -#define T_3 V27 -#define T_4 V28 - -GLOBL ·constants<>(SB), RODATA, $0x30 -// EX0 -DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 -DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 -// EX1 -DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 -// EX2 -DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d - -// MULTIPLY multiplies each lane of f and g, partially reduced -// modulo 2¹³Ⱐ- 5. The result, h, consists of partial products -// in each lane that need to be reduced further to produce the -// final result. -// -// hâ‚₃₀ = (fâ‚₃₀gâ‚₃₀) % 2¹³Ⱐ+ (5fâ‚₃₀gâ‚₃₀) / 2¹³Ⱐ-// -// Note that the multiplication by 5 of the high bits is -// achieved by precalculating the multiplication of four of the -// g coefficients by 5. These are g51-g54. -#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ - VMLOF f0, g0, h0 \ - VMLOF f0, g3, h3 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g4, h4 \ - VMLOF f0, g2, h2 \ - VMLOF f1, g54, T_0 \ - VMLOF f1, g2, T_3 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g3, T_4 \ - VMLOF f1, g1, T_2 \ - VMALOF f2, g53, h0, h0 \ - VMALOF f2, g1, h3, h3 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g2, h4, h4 \ - VMALOF f2, g0, h2, h2 \ - VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g0, T_3, T_3 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g1, T_4, T_4 \ - VMALOF f3, g54, T_2, T_2 \ - VMALOF f4, g51, h0, h0 \ - VMALOF f4, g54, h3, h3 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g0, h4, h4 \ - VMALOF f4, g53, h2, h2 \ - VAG T_0, h0, h0 \ - VAG T_3, h3, h3 \ - VAG T_1, h1, h1 \ - VAG T_4, h4, h4 \ - VAG T_2, h2, h2 - -// REDUCE performs the following carry operations in four -// stages, as specified in Bernstein & Schwabe: -// -// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] -// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] -// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] -// 4: h₂₆[3]->h₂₆[4] -// -// The result is that all of the limbs are limited to 26-bits -// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. -// -// Note that although each limb is aligned at 26-bit intervals -// they may contain values that exceed 2²ⶠ- 1, hence the need -// to carry the excess bits in each limb. -#define REDUCE(h0, h1, h2, h3, h4) \ - VESRLG $26, h0, T_0 \ - VESRLG $26, h3, T_1 \ - VN MOD26, h0, h0 \ - VN MOD26, h3, h3 \ - VAG T_0, h1, h1 \ - VAG T_1, h4, h4 \ - VESRLG $26, h1, T_2 \ - VESRLG $26, h4, T_3 \ - VN MOD26, h1, h1 \ - VN MOD26, h4, h4 \ - VESLG $2, T_3, T_4 \ - VAG T_3, T_4, T_4 \ - VAG T_2, h2, h2 \ - VAG T_4, h0, h0 \ - VESRLG $26, h2, T_0 \ - VESRLG $26, h0, T_1 \ - VN MOD26, h2, h2 \ - VN MOD26, h0, h0 \ - VAG T_0, h3, h3 \ - VAG T_1, h1, h1 \ - VESRLG $26, h3, T_2 \ - VN MOD26, h3, h3 \ - VAG T_2, h4, h4 - -// EXPAND splits the 128-bit little-endian values in0 and in1 -// into 26-bit big-endian limbs and places the results into -// the first and second lane of d₂₆[0:4] respectively. -// -// The EX0, EX1 and EX2 constants are arrays of byte indices -// for permutation. The permutation both reverses the bytes -// in the input and ensures the bytes are copied into the -// destination limb ready to be shifted into their final -// position. -#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VPERM in0, in1, EX0, d0 \ - VPERM in0, in1, EX1, d2 \ - VPERM in0, in1, EX2, d4 \ - VESRLG $26, d0, d1 \ - VESRLG $30, d2, d3 \ - VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] - VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] - VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] - VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] - VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] - -// func updateVX(state *macState, msg []byte) -TEXT ·updateVX(SB), NOSPLIT, $0 - MOVD state+0(FP), R1 - LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // generate masks - VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] - VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] - - // load h (accumulator) and r (key) from state - VZERO T_1 // [0, 0] - VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] - VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] - VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] - VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] - VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] - - // unpack h and r into 26-bit limbs - // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value - VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] - VZERO H_1 // [0, 0] - VZERO H_3 // [0, 0] - VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out - VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] - VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] - VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only - VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] - VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only - VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete - VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete - - // replicate r across all 4 vector elements - VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] - VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] - VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] - VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] - VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] - - // zero out lane 1 of h - VLEIG $1, $0, H_0 // [h₂₆[0], 0] - VLEIG $1, $0, H_1 // [h₂₆[1], 0] - VLEIG $1, $0, H_2 // [h₂₆[2], 0] - VLEIG $1, $0, H_3 // [h₂₆[3], 0] - VLEIG $1, $0, H_4 // [h₂₆[4], 0] - - // calculate 5r (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] - - // skip r² calculation if we are only calculating one block - CMPBLE R3, $16, skip - - // calculate r² - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) - REDUCE(M_0, M_1, M_2, M_3, M_4) - VGBM $0x0f0f, T_0 - VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] - VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] - VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] - VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] - VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] - - // calculate 5r² (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] - -loop: - CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients - - // load next 2 blocks from message - VLM (R2), T_0, T_1 - - // update message slice - SUB $32, R3 - MOVD $32(R2), R2 - - // unpack message blocks into 26-bit big-endian limbs - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // add 2¹²⸠to each message block value - VLEIB $4, $1, M_4 - VLEIB $12, $1, M_4 - -multiply: - // accumulate the incoming message - VAG H_0, M_0, M_0 - VAG H_3, M_3, M_3 - VAG H_1, M_1, M_1 - VAG H_4, M_4, M_4 - VAG H_2, M_2, M_2 - - // multiply the accumulator by the key coefficient - MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) - - // carry and partially reduce the partial products - REDUCE(H_0, H_1, H_2, H_3, H_4) - - CMPBNE R3, $0, loop - -finish: - // sum lane 0 and lane 1 and put the result in lane 1 - VZERO T_0 - VSUMQG H_0, T_0, H_0 - VSUMQG H_3, T_0, H_3 - VSUMQG H_1, T_0, H_1 - VSUMQG H_4, T_0, H_4 - VSUMQG H_2, T_0, H_2 - - // reduce again after summation - // TODO(mundaym): there might be a more efficient way to do this - // now that we only have 1 active lane. For example, we could - // simultaneously pack the values as we reduce them. - REDUCE(H_0, H_1, H_2, H_3, H_4) - - // carry h[1] through to h[4] so that only h[4] can exceed 2²ⶠ- 1 - // TODO(mundaym): in testing this final carry was unnecessary. - // Needs a proof before it can be removed though. - VESRLG $26, H_1, T_1 - VN MOD26, H_1, H_1 - VAQ T_1, H_2, H_2 - VESRLG $26, H_2, T_2 - VN MOD26, H_2, H_2 - VAQ T_2, H_3, H_3 - VESRLG $26, H_3, T_3 - VN MOD26, H_3, H_3 - VAQ T_3, H_4, H_4 - - // h is now < 2(2¹³Ⱐ- 5) - // Pack each lane in h₂₆[0:4] into hâ‚₂₈[0:1]. - VESLG $26, H_1, H_1 - VESLG $26, H_3, H_3 - VO H_0, H_1, H_0 - VO H_2, H_3, H_2 - VESLG $4, H_2, H_2 - VLEIB $7, $48, H_1 - VSLB H_1, H_2, H_2 - VO H_0, H_2, H_0 - VLEIB $7, $104, H_1 - VSLB H_1, H_4, H_3 - VO H_3, H_0, H_0 - VLEIB $7, $24, H_1 - VSRLB H_1, H_4, H_1 - - // update state - VSTEG $1, H_0, 0(R1) - VSTEG $0, H_0, 8(R1) - VSTEG $1, H_1, 16(R1) - RET - -b2: // 2 or fewer blocks remaining - CMPBLE R3, $16, b1 - - // Load the 2 remaining blocks (17-32 bytes remaining). - MOVD $-17(R3), R0 // index of final byte to load modulo 16 - VL (R2), T_0 // load full 16 byte block - VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) - CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long - VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 - - // Split both blocks into 26-bit limbs in the appropriate lanes. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the second to last block. - VLEIB $4, $1, M_4 - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, M_4 - - // Finally, set up the coefficients for the final multiplication. - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r² so that can be kept the - // same. We want lane 1 to be multiplied by r so we need to move - // the saved r value into the 32-bit odd index in lane 1 by - // rotating the 64-bit lane by 32. - VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only - VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] - VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] - VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] - VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] - VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] - VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] - VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] - VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] - VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] - - MOVD $0, R3 - BR multiply - -skip: - CMPBEQ R3, $0, finish - -b1: // 1 block remaining - - // Load the final block (1-16 bytes). This will be placed into - // lane 0. - MOVD $-1(R3), R0 - VLL R0, (R2), T_0 // pad to 16 bytes with zeros - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_0 - - // Set the message block in lane 1 to the value 0 so that it - // can be accumulated without affecting the final result. - VZERO T_1 - - // Split the final message block into 26-bit limbs in lane 0. - // Lane 1 will be contain 0. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, M_4 - - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r so we need to move the - // saved r value into the 32-bit odd index in lane 0. We want - // lane 1 to be set to the value 1. This makes multiplication - // a no-op. We do this by setting lane 1 in every register to 0 - // and then just setting the 32-bit index 3 in R_0 to 1. - VZERO T_0 - MOVD $0, R0 - MOVD $0x10111213, R12 - VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] - VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] - VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] - VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] - VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] - VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] - VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] - VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] - VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] - VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] - - // Set the value of lane 1 to be 1. - VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] - - MOVD $0, R3 - BR multiply diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go deleted file mode 100644 index bbe4494c..00000000 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package scrypt implements the scrypt key derivation function as defined in -// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard -// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" - -import ( - "crypto/sha256" - "encoding/binary" - "errors" - "math/bits" - - "golang.org/x/crypto/pbkdf2" -) - -const maxInt = int(^uint(0) >> 1) - -// blockCopy copies n numbers from src into dst. -func blockCopy(dst, src []uint32, n int) { - copy(dst, src[:n]) -} - -// blockXOR XORs numbers from dst with n numbers from src. -func blockXOR(dst, src []uint32, n int) { - for i, v := range src[:n] { - dst[i] ^= v - } -} - -// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, -// and puts the result into both tmp and out. -func salsaXOR(tmp *[16]uint32, in, out []uint32) { - w0 := tmp[0] ^ in[0] - w1 := tmp[1] ^ in[1] - w2 := tmp[2] ^ in[2] - w3 := tmp[3] ^ in[3] - w4 := tmp[4] ^ in[4] - w5 := tmp[5] ^ in[5] - w6 := tmp[6] ^ in[6] - w7 := tmp[7] ^ in[7] - w8 := tmp[8] ^ in[8] - w9 := tmp[9] ^ in[9] - w10 := tmp[10] ^ in[10] - w11 := tmp[11] ^ in[11] - w12 := tmp[12] ^ in[12] - w13 := tmp[13] ^ in[13] - w14 := tmp[14] ^ in[14] - w15 := tmp[15] ^ in[15] - - x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 - x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 - - for i := 0; i < 8; i += 2 { - x4 ^= bits.RotateLeft32(x0+x12, 7) - x8 ^= bits.RotateLeft32(x4+x0, 9) - x12 ^= bits.RotateLeft32(x8+x4, 13) - x0 ^= bits.RotateLeft32(x12+x8, 18) - - x9 ^= bits.RotateLeft32(x5+x1, 7) - x13 ^= bits.RotateLeft32(x9+x5, 9) - x1 ^= bits.RotateLeft32(x13+x9, 13) - x5 ^= bits.RotateLeft32(x1+x13, 18) - - x14 ^= bits.RotateLeft32(x10+x6, 7) - x2 ^= bits.RotateLeft32(x14+x10, 9) - x6 ^= bits.RotateLeft32(x2+x14, 13) - x10 ^= bits.RotateLeft32(x6+x2, 18) - - x3 ^= bits.RotateLeft32(x15+x11, 7) - x7 ^= bits.RotateLeft32(x3+x15, 9) - x11 ^= bits.RotateLeft32(x7+x3, 13) - x15 ^= bits.RotateLeft32(x11+x7, 18) - - x1 ^= bits.RotateLeft32(x0+x3, 7) - x2 ^= bits.RotateLeft32(x1+x0, 9) - x3 ^= bits.RotateLeft32(x2+x1, 13) - x0 ^= bits.RotateLeft32(x3+x2, 18) - - x6 ^= bits.RotateLeft32(x5+x4, 7) - x7 ^= bits.RotateLeft32(x6+x5, 9) - x4 ^= bits.RotateLeft32(x7+x6, 13) - x5 ^= bits.RotateLeft32(x4+x7, 18) - - x11 ^= bits.RotateLeft32(x10+x9, 7) - x8 ^= bits.RotateLeft32(x11+x10, 9) - x9 ^= bits.RotateLeft32(x8+x11, 13) - x10 ^= bits.RotateLeft32(x9+x8, 18) - - x12 ^= bits.RotateLeft32(x15+x14, 7) - x13 ^= bits.RotateLeft32(x12+x15, 9) - x14 ^= bits.RotateLeft32(x13+x12, 13) - x15 ^= bits.RotateLeft32(x14+x13, 18) - } - x0 += w0 - x1 += w1 - x2 += w2 - x3 += w3 - x4 += w4 - x5 += w5 - x6 += w6 - x7 += w7 - x8 += w8 - x9 += w9 - x10 += w10 - x11 += w11 - x12 += w12 - x13 += w13 - x14 += w14 - x15 += w15 - - out[0], tmp[0] = x0, x0 - out[1], tmp[1] = x1, x1 - out[2], tmp[2] = x2, x2 - out[3], tmp[3] = x3, x3 - out[4], tmp[4] = x4, x4 - out[5], tmp[5] = x5, x5 - out[6], tmp[6] = x6, x6 - out[7], tmp[7] = x7, x7 - out[8], tmp[8] = x8, x8 - out[9], tmp[9] = x9, x9 - out[10], tmp[10] = x10, x10 - out[11], tmp[11] = x11, x11 - out[12], tmp[12] = x12, x12 - out[13], tmp[13] = x13, x13 - out[14], tmp[14] = x14, x14 - out[15], tmp[15] = x15, x15 -} - -func blockMix(tmp *[16]uint32, in, out []uint32, r int) { - blockCopy(tmp[:], in[(2*r-1)*16:], 16) - for i := 0; i < 2*r; i += 2 { - salsaXOR(tmp, in[i*16:], out[i*8:]) - salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) - } -} - -func integer(b []uint32, r int) uint64 { - j := (2*r - 1) * 16 - return uint64(b[j]) | uint64(b[j+1])<<32 -} - -func smix(b []byte, r, N int, v, xy []uint32) { - var tmp [16]uint32 - R := 32 * r - x := xy - y := xy[R:] - - j := 0 - for i := 0; i < R; i++ { - x[i] = binary.LittleEndian.Uint32(b[j:]) - j += 4 - } - for i := 0; i < N; i += 2 { - blockCopy(v[i*R:], x, R) - blockMix(&tmp, x, y, r) - - blockCopy(v[(i+1)*R:], y, R) - blockMix(&tmp, y, x, r) - } - for i := 0; i < N; i += 2 { - j := int(integer(x, r) & uint64(N-1)) - blockXOR(x, v[j*R:], R) - blockMix(&tmp, x, y, r) - - j = int(integer(y, r) & uint64(N-1)) - blockXOR(y, v[j*R:], R) - blockMix(&tmp, y, x, r) - } - j = 0 - for _, v := range x[:R] { - binary.LittleEndian.PutUint32(b[j:], v) - j += 4 - } -} - -// Key derives a key from the password, salt, and cost parameters, returning -// a byte slice of length keyLen that can be used as cryptographic key. -// -// N is a CPU/memory cost parameter, which must be a power of two greater than 1. -// r and p must satisfy r * p < 2³â°. If the parameters do not satisfy the -// limits, the function returns a nil byte slice and an error. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) -// -// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 -// and p=1. The parameters N, r, and p should be increased as memory latency and -// CPU parallelism increases; consider setting N to the highest power of 2 you -// can derive within 100 milliseconds. Remember to get a good random salt. -func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { - if N <= 1 || N&(N-1) != 0 { - return nil, errors.New("scrypt: N must be > 1 and a power of 2") - } - if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { - return nil, errors.New("scrypt: parameters are too large") - } - - xy := make([]uint32, 64*r) - v := make([]uint32, 32*N*r) - b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) - - for i := 0; i < p; i++ { - smix(b[i*128*r:], r, N, v, xy) - } - - return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go deleted file mode 100644 index b909471c..00000000 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package agent implements the ssh-agent protocol, and provides both -// a client and a server. The client can talk to a standard ssh-agent -// that uses UNIX sockets, and one could implement an alternative -// ssh-agent process using the sample server. -// -// References: -// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 -package agent // import "golang.org/x/crypto/ssh/agent" - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "sync" - - "crypto" - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// SignatureFlags represent additional flags that can be passed to the signature -// requests an defined in [PROTOCOL.agent] section 4.5.1. -type SignatureFlags uint32 - -// SignatureFlag values as defined in [PROTOCOL.agent] section 5.3. -const ( - SignatureFlagReserved SignatureFlags = 1 << iota - SignatureFlagRsaSha256 - SignatureFlagRsaSha512 -) - -// Agent represents the capabilities of an ssh-agent. -type Agent interface { - // List returns the identities known to the agent. - List() ([]*Key, error) - - // Sign has the agent sign the data using a protocol 2 key as defined - // in [PROTOCOL.agent] section 2.6.2. - Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) - - // Add adds a private key to the agent. - Add(key AddedKey) error - - // Remove removes all identities with the given public key. - Remove(key ssh.PublicKey) error - - // RemoveAll removes all identities. - RemoveAll() error - - // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. - Lock(passphrase []byte) error - - // Unlock undoes the effect of Lock - Unlock(passphrase []byte) error - - // Signers returns signers for all the known keys. - Signers() ([]ssh.Signer, error) -} - -type ExtendedAgent interface { - Agent - - // SignWithFlags signs like Sign, but allows for additional flags to be sent/received - SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) - - // Extension processes a custom extension request. Standard-compliant agents are not - // required to support any extensions, but this method allows agents to implement - // vendor-specific methods or add experimental features. See [PROTOCOL.agent] section 4.7. - // If agent extensions are unsupported entirely this method MUST return an - // ErrExtensionUnsupported error. Similarly, if just the specific extensionType in - // the request is unsupported by the agent then ErrExtensionUnsupported MUST be - // returned. - // - // In the case of success, since [PROTOCOL.agent] section 4.7 specifies that the contents - // of the response are unspecified (including the type of the message), the complete - // response will be returned as a []byte slice, including the "type" byte of the message. - Extension(extensionType string, contents []byte) ([]byte, error) -} - -// ConstraintExtension describes an optional constraint defined by users. -type ConstraintExtension struct { - // ExtensionName consist of a UTF-8 string suffixed by the - // implementation domain following the naming scheme defined - // in Section 4.2 of [RFC4251], e.g. "foo@example.com". - ExtensionName string - // ExtensionDetails contains the actual content of the extended - // constraint. - ExtensionDetails []byte -} - -// AddedKey describes an SSH key to be added to an Agent. -type AddedKey struct { - // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey, - // ed25519.PrivateKey or *ecdsa.PrivateKey, which will be inserted into the - // agent. - PrivateKey interface{} - // Certificate, if not nil, is communicated to the agent and will be - // stored with the key. - Certificate *ssh.Certificate - // Comment is an optional, free-form string. - Comment string - // LifetimeSecs, if not zero, is the number of seconds that the - // agent will store the key for. - LifetimeSecs uint32 - // ConfirmBeforeUse, if true, requests that the agent confirm with the - // user before each use of this key. - ConfirmBeforeUse bool - // ConstraintExtensions are the experimental or private-use constraints - // defined by users. - ConstraintExtensions []ConstraintExtension -} - -// See [PROTOCOL.agent], section 3. -const ( - agentRequestV1Identities = 1 - agentRemoveAllV1Identities = 9 - - // 3.2 Requests from client to agent for protocol 2 key operations - agentAddIdentity = 17 - agentRemoveIdentity = 18 - agentRemoveAllIdentities = 19 - agentAddIDConstrained = 25 - - // 3.3 Key-type independent requests from client to agent - agentAddSmartcardKey = 20 - agentRemoveSmartcardKey = 21 - agentLock = 22 - agentUnlock = 23 - agentAddSmartcardKeyConstrained = 26 - - // 3.7 Key constraint identifiers - agentConstrainLifetime = 1 - agentConstrainConfirm = 2 - agentConstrainExtension = 3 -) - -// maxAgentResponseBytes is the maximum agent reply size that is accepted. This -// is a sanity check, not a limit in the spec. -const maxAgentResponseBytes = 16 << 20 - -// Agent messages: -// These structures mirror the wire format of the corresponding ssh agent -// messages found in [PROTOCOL.agent]. - -// 3.4 Generic replies from agent to client -const agentFailure = 5 - -type failureAgentMsg struct{} - -const agentSuccess = 6 - -type successAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentRequestIdentities = 11 - -type requestIdentitiesAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentIdentitiesAnswer = 12 - -type identitiesAnswerAgentMsg struct { - NumKeys uint32 `sshtype:"12"` - Keys []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 2.6.2. -const agentSignRequest = 13 - -type signRequestAgentMsg struct { - KeyBlob []byte `sshtype:"13"` - Data []byte - Flags uint32 -} - -// See [PROTOCOL.agent], section 2.6.2. - -// 3.6 Replies from agent to client for protocol 2 key operations -const agentSignResponse = 14 - -type signResponseAgentMsg struct { - SigBlob []byte `sshtype:"14"` -} - -type publicKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -// 3.7 Key constraint identifiers -type constrainLifetimeAgentMsg struct { - LifetimeSecs uint32 `sshtype:"1"` -} - -type constrainExtensionAgentMsg struct { - ExtensionName string `sshtype:"3"` - ExtensionDetails []byte - - // Rest is a field used for parsing, not part of message - Rest []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 4.7 -const agentExtension = 27 -const agentExtensionFailure = 28 - -// ErrExtensionUnsupported indicates that an extension defined in -// [PROTOCOL.agent] section 4.7 is unsupported by the agent. Specifically this -// error indicates that the agent returned a standard SSH_AGENT_FAILURE message -// as the result of a SSH_AGENTC_EXTENSION request. Note that the protocol -// specification (and therefore this error) does not distinguish between a -// specific extension being unsupported and extensions being unsupported entirely. -var ErrExtensionUnsupported = errors.New("agent: extension unsupported") - -type extensionAgentMsg struct { - ExtensionType string `sshtype:"27"` - Contents []byte -} - -// Key represents a protocol 2 public key as defined in -// [PROTOCOL.agent], section 2.5.2. -type Key struct { - Format string - Blob []byte - Comment string -} - -func clientErr(err error) error { - return fmt.Errorf("agent: client error: %v", err) -} - -// String returns the storage form of an agent key with the format, base64 -// encoded serialized key, and the comment if it is not empty. -func (k *Key) String() string { - s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) - - if k.Comment != "" { - s += " " + k.Comment - } - - return s -} - -// Type returns the public key type. -func (k *Key) Type() string { - return k.Format -} - -// Marshal returns key blob to satisfy the ssh.PublicKey interface. -func (k *Key) Marshal() []byte { - return k.Blob -} - -// Verify satisfies the ssh.PublicKey interface. -func (k *Key) Verify(data []byte, sig *ssh.Signature) error { - pubKey, err := ssh.ParsePublicKey(k.Blob) - if err != nil { - return fmt.Errorf("agent: bad public key: %v", err) - } - return pubKey.Verify(data, sig) -} - -type wireKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -func parseKey(in []byte) (out *Key, rest []byte, err error) { - var record struct { - Blob []byte - Comment string - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(in, &record); err != nil { - return nil, nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(record.Blob, &wk); err != nil { - return nil, nil, err - } - - return &Key{ - Format: wk.Format, - Blob: record.Blob, - Comment: record.Comment, - }, record.Rest, nil -} - -// client is a client for an ssh-agent process. -type client struct { - // conn is typically a *net.UnixConn - conn io.ReadWriter - // mu is used to prevent concurrent access to the agent - mu sync.Mutex -} - -// NewClient returns an Agent that talks to an ssh-agent process over -// the given connection. -func NewClient(rw io.ReadWriter) ExtendedAgent { - return &client{conn: rw} -} - -// call sends an RPC to the agent. On success, the reply is -// unmarshaled into reply and replyType is set to the first byte of -// the reply, which contains the type of the message. -func (c *client) call(req []byte) (reply interface{}, err error) { - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - reply, err = unmarshal(buf) - if err != nil { - return nil, clientErr(err) - } - return reply, nil -} - -// callRaw sends an RPC to the agent. On success, the raw -// bytes of the response are returned; no unmarshalling is -// performed on the response. -func (c *client) callRaw(req []byte) (reply []byte, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - msg := make([]byte, 4+len(req)) - binary.BigEndian.PutUint32(msg, uint32(len(req))) - copy(msg[4:], req) - if _, err = c.conn.Write(msg); err != nil { - return nil, clientErr(err) - } - - var respSizeBuf [4]byte - if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { - return nil, clientErr(err) - } - respSize := binary.BigEndian.Uint32(respSizeBuf[:]) - if respSize > maxAgentResponseBytes { - return nil, clientErr(errors.New("response too large")) - } - - buf := make([]byte, respSize) - if _, err = io.ReadFull(c.conn, buf); err != nil { - return nil, clientErr(err) - } - return buf, nil -} - -func (c *client) simpleCall(req []byte) error { - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -func (c *client) RemoveAll() error { - return c.simpleCall([]byte{agentRemoveAllIdentities}) -} - -func (c *client) Remove(key ssh.PublicKey) error { - req := ssh.Marshal(&agentRemoveIdentityMsg{ - KeyBlob: key.Marshal(), - }) - return c.simpleCall(req) -} - -func (c *client) Lock(passphrase []byte) error { - req := ssh.Marshal(&agentLockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -func (c *client) Unlock(passphrase []byte) error { - req := ssh.Marshal(&agentUnlockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -// List returns the identities known to the agent. -func (c *client) List() ([]*Key, error) { - // see [PROTOCOL.agent] section 2.5.2. - req := []byte{agentRequestIdentities} - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *identitiesAnswerAgentMsg: - if msg.NumKeys > maxAgentResponseBytes/8 { - return nil, errors.New("agent: too many keys in agent reply") - } - keys := make([]*Key, msg.NumKeys) - data := msg.Keys - for i := uint32(0); i < msg.NumKeys; i++ { - var key *Key - var err error - if key, data, err = parseKey(data); err != nil { - return nil, err - } - keys[i] = key - } - return keys, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to list keys") - } - panic("unreachable") -} - -// Sign has the agent sign the data using a protocol 2 key as defined -// in [PROTOCOL.agent] section 2.6.2. -func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return c.SignWithFlags(key, data, 0) -} - -func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - req := ssh.Marshal(signRequestAgentMsg{ - KeyBlob: key.Marshal(), - Data: data, - Flags: uint32(flags), - }) - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *signResponseAgentMsg: - var sig ssh.Signature - if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { - return nil, err - } - - return &sig, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to sign challenge") - } - panic("unreachable") -} - -// unmarshal parses an agent message in packet, returning the parsed -// form and the message type of packet. -func unmarshal(packet []byte) (interface{}, error) { - if len(packet) < 1 { - return nil, errors.New("agent: empty packet") - } - var msg interface{} - switch packet[0] { - case agentFailure: - return new(failureAgentMsg), nil - case agentSuccess: - return new(successAgentMsg), nil - case agentIdentitiesAnswer: - msg = new(identitiesAnswerAgentMsg) - case agentSignResponse: - msg = new(signResponseAgentMsg) - case agentV1IdentitiesAnswer: - msg = new(agentV1IdentityMsg) - default: - return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) - } - if err := ssh.Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -type rsaKeyMsg struct { - Type string `sshtype:"17|25"` - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaKeyMsg struct { - Type string `sshtype:"17|25"` - P *big.Int - Q *big.Int - G *big.Int - Y *big.Int - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaKeyMsg struct { - Type string `sshtype:"17|25"` - Curve string - KeyBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519KeyMsg struct { - Type string `sshtype:"17|25"` - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Insert adds a private key to the agent. -func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaKeyMsg{ - Type: ssh.KeyAlgoRSA, - N: k.N, - E: big.NewInt(int64(k.E)), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaKeyMsg{ - Type: ssh.KeyAlgoDSA, - P: k.P, - Q: k.Q, - G: k.G, - Y: k.Y, - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) - req = ssh.Marshal(ecdsaKeyMsg{ - Type: "ecdsa-sha2-" + nistID, - Curve: nistID, - KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case ed25519.PrivateKey: - req = ssh.Marshal(ed25519KeyMsg{ - Type: ssh.KeyAlgoED25519, - Pub: []byte(k)[32:], - Priv: []byte(k), - Comments: comment, - Constraints: constraints, - }) - // This function originally supported only *ed25519.PrivateKey, however the - // general idiom is to pass ed25519.PrivateKey by value, not by pointer. - // We still support the pointer variant for backwards compatibility. - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519KeyMsg{ - Type: ssh.KeyAlgoED25519, - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -type rsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519CertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Add adds a private key to the agent. If a certificate is given, -// that certificate is added instead as public key. -func (c *client) Add(key AddedKey) error { - var constraints []byte - - if secs := key.LifetimeSecs; secs != 0 { - constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...) - } - - if key.ConfirmBeforeUse { - constraints = append(constraints, agentConstrainConfirm) - } - - cert := key.Certificate - if cert == nil { - return c.insertKey(key.PrivateKey, key.Comment, constraints) - } - return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) -} - -func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - req = ssh.Marshal(ecdsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case ed25519.PrivateKey: - req = ssh.Marshal(ed25519CertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - Pub: []byte(k)[32:], - Priv: []byte(k), - Comments: comment, - Constraints: constraints, - }) - // This function originally supported only *ed25519.PrivateKey, however the - // general idiom is to pass ed25519.PrivateKey by value, not by pointer. - // We still support the pointer variant for backwards compatibility. - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519CertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - signer, err := ssh.NewSignerFromKey(s) - if err != nil { - return err - } - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return errors.New("agent: signer and cert have different public key") - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -// Signers provides a callback for client authentication. -func (c *client) Signers() ([]ssh.Signer, error) { - keys, err := c.List() - if err != nil { - return nil, err - } - - var result []ssh.Signer - for _, k := range keys { - result = append(result, &agentKeyringSigner{c, k}) - } - return result, nil -} - -type agentKeyringSigner struct { - agent *client - pub ssh.PublicKey -} - -func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { - return s.pub -} - -func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { - // The agent has its own entropy source, so the rand argument is ignored. - return s.agent.Sign(s.pub, data) -} - -func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { - var flags SignatureFlags - if opts != nil { - switch opts.HashFunc() { - case crypto.SHA256: - flags = SignatureFlagRsaSha256 - case crypto.SHA512: - flags = SignatureFlagRsaSha512 - } - } - return s.agent.SignWithFlags(s.pub, data, flags) -} - -// Calls an extension method. It is up to the agent implementation as to whether or not -// any particular extension is supported and may always return an error. Because the -// type of the response is up to the implementation, this returns the bytes of the -// response and does not attempt any type of unmarshalling. -func (c *client) Extension(extensionType string, contents []byte) ([]byte, error) { - req := ssh.Marshal(extensionAgentMsg{ - ExtensionType: extensionType, - Contents: contents, - }) - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - if len(buf) == 0 { - return nil, errors.New("agent: failure; empty response") - } - // [PROTOCOL.agent] section 4.7 indicates that an SSH_AGENT_FAILURE message - // represents an agent that does not support the extension - if buf[0] == agentFailure { - return nil, ErrExtensionUnsupported - } - if buf[0] == agentExtensionFailure { - return nil, errors.New("agent: generic extension failure") - } - - return buf, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go deleted file mode 100644 index fd24ba90..00000000 --- a/vendor/golang.org/x/crypto/ssh/agent/forward.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "errors" - "io" - "net" - "sync" - - "golang.org/x/crypto/ssh" -) - -// RequestAgentForwarding sets up agent forwarding for the session. -// ForwardToAgent or ForwardToRemote should be called to route -// the authentication requests. -func RequestAgentForwarding(session *ssh.Session) error { - ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) - if err != nil { - return err - } - if !ok { - return errors.New("forwarding request denied") - } - return nil -} - -// ForwardToAgent routes authentication requests to the given keyring. -func ForwardToAgent(client *ssh.Client, keyring Agent) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go func() { - ServeAgent(keyring, channel) - channel.Close() - }() - } - }() - return nil -} - -const channelType = "auth-agent@openssh.com" - -// ForwardToRemote routes authentication requests to the ssh-agent -// process serving on the given unix socket. -func ForwardToRemote(client *ssh.Client, addr string) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - conn, err := net.Dial("unix", addr) - if err != nil { - return err - } - conn.Close() - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go forwardUnixSocket(channel, addr) - } - }() - return nil -} - -func forwardUnixSocket(channel ssh.Channel, addr string) { - conn, err := net.Dial("unix", addr) - if err != nil { - return - } - - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(conn, channel) - conn.(*net.UnixConn).CloseWrite() - wg.Done() - }() - go func() { - io.Copy(channel, conn) - channel.CloseWrite() - wg.Done() - }() - - wg.Wait() - conn.Close() - channel.Close() -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go deleted file mode 100644 index c9d97943..00000000 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "bytes" - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "sync" - "time" - - "golang.org/x/crypto/ssh" -) - -type privKey struct { - signer ssh.Signer - comment string - expire *time.Time -} - -type keyring struct { - mu sync.Mutex - keys []privKey - - locked bool - passphrase []byte -} - -var errLocked = errors.New("agent: locked") - -// NewKeyring returns an Agent that holds keys in memory. It is safe -// for concurrent use by multiple goroutines. -func NewKeyring() Agent { - return &keyring{} -} - -// RemoveAll removes all identities. -func (r *keyring) RemoveAll() error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.keys = nil - return nil -} - -// removeLocked does the actual key removal. The caller must already be holding the -// keyring mutex. -func (r *keyring) removeLocked(want []byte) error { - found := false - for i := 0; i < len(r.keys); { - if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { - found = true - r.keys[i] = r.keys[len(r.keys)-1] - r.keys = r.keys[:len(r.keys)-1] - continue - } else { - i++ - } - } - - if !found { - return errors.New("agent: key not found") - } - return nil -} - -// Remove removes all identities with the given public key. -func (r *keyring) Remove(key ssh.PublicKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - return r.removeLocked(key.Marshal()) -} - -// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. -func (r *keyring) Lock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.locked = true - r.passphrase = passphrase - return nil -} - -// Unlock undoes the effect of Lock -func (r *keyring) Unlock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if !r.locked { - return errors.New("agent: not locked") - } - if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { - return fmt.Errorf("agent: incorrect passphrase") - } - - r.locked = false - r.passphrase = nil - return nil -} - -// expireKeysLocked removes expired keys from the keyring. If a key was added -// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have -// ellapsed, it is removed. The caller *must* be holding the keyring mutex. -func (r *keyring) expireKeysLocked() { - for _, k := range r.keys { - if k.expire != nil && time.Now().After(*k.expire) { - r.removeLocked(k.signer.PublicKey().Marshal()) - } - } -} - -// List returns the identities known to the agent. -func (r *keyring) List() ([]*Key, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - // section 2.7: locked agents return empty. - return nil, nil - } - - r.expireKeysLocked() - var ids []*Key - for _, k := range r.keys { - pub := k.signer.PublicKey() - ids = append(ids, &Key{ - Format: pub.Type(), - Blob: pub.Marshal(), - Comment: k.comment}) - } - return ids, nil -} - -// Insert adds a private key to the keyring. If a certificate -// is given, that certificate is added as public key. Note that -// any constraints given are ignored. -func (r *keyring) Add(key AddedKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - signer, err := ssh.NewSignerFromKey(key.PrivateKey) - - if err != nil { - return err - } - - if cert := key.Certificate; cert != nil { - signer, err = ssh.NewCertSigner(cert, signer) - if err != nil { - return err - } - } - - p := privKey{ - signer: signer, - comment: key.Comment, - } - - if key.LifetimeSecs > 0 { - t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) - p.expire = &t - } - - r.keys = append(r.keys, p) - - return nil -} - -// Sign returns a signature for the data. -func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return r.SignWithFlags(key, data, 0) -} - -func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - wanted := key.Marshal() - for _, k := range r.keys { - if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { - if flags == 0 { - return k.signer.Sign(rand.Reader, data) - } else { - if algorithmSigner, ok := k.signer.(ssh.AlgorithmSigner); !ok { - return nil, fmt.Errorf("agent: signature does not support non-default signature algorithm: %T", k.signer) - } else { - var algorithm string - switch flags { - case SignatureFlagRsaSha256: - algorithm = ssh.SigAlgoRSASHA2256 - case SignatureFlagRsaSha512: - algorithm = ssh.SigAlgoRSASHA2512 - default: - return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) - } - return algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm) - } - } - } - } - return nil, errors.New("not found") -} - -// Signers returns signers for all the known keys. -func (r *keyring) Signers() ([]ssh.Signer, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - s := make([]ssh.Signer, 0, len(r.keys)) - for _, k := range r.keys { - s = append(s, k.signer) - } - return s, nil -} - -// The keyring does not support any extensions -func (r *keyring) Extension(extensionType string, contents []byte) ([]byte, error) { - return nil, ErrExtensionUnsupported -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go deleted file mode 100644 index 6e7a1e02..00000000 --- a/vendor/golang.org/x/crypto/ssh/agent/server.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "math/big" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// Server wraps an Agent and uses it to implement the agent side of -// the SSH-agent, wire protocol. -type server struct { - agent Agent -} - -func (s *server) processRequestBytes(reqData []byte) []byte { - rep, err := s.processRequest(reqData) - if err != nil { - if err != errLocked { - // TODO(hanwen): provide better logging interface? - log.Printf("agent %d: %v", reqData[0], err) - } - return []byte{agentFailure} - } - - if err == nil && rep == nil { - return []byte{agentSuccess} - } - - return ssh.Marshal(rep) -} - -func marshalKey(k *Key) []byte { - var record struct { - Blob []byte - Comment string - } - record.Blob = k.Marshal() - record.Comment = k.Comment - - return ssh.Marshal(&record) -} - -// See [PROTOCOL.agent], section 2.5.1. -const agentV1IdentitiesAnswer = 2 - -type agentV1IdentityMsg struct { - Numkeys uint32 `sshtype:"2"` -} - -type agentRemoveIdentityMsg struct { - KeyBlob []byte `sshtype:"18"` -} - -type agentLockMsg struct { - Passphrase []byte `sshtype:"22"` -} - -type agentUnlockMsg struct { - Passphrase []byte `sshtype:"23"` -} - -func (s *server) processRequest(data []byte) (interface{}, error) { - switch data[0] { - case agentRequestV1Identities: - return &agentV1IdentityMsg{0}, nil - - case agentRemoveAllV1Identities: - return nil, nil - - case agentRemoveIdentity: - var req agentRemoveIdentityMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) - - case agentRemoveAllIdentities: - return nil, s.agent.RemoveAll() - - case agentLock: - var req agentLockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - return nil, s.agent.Lock(req.Passphrase) - - case agentUnlock: - var req agentUnlockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - return nil, s.agent.Unlock(req.Passphrase) - - case agentSignRequest: - var req signRequestAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - k := &Key{ - Format: wk.Format, - Blob: req.KeyBlob, - } - - var sig *ssh.Signature - var err error - if extendedAgent, ok := s.agent.(ExtendedAgent); ok { - sig, err = extendedAgent.SignWithFlags(k, req.Data, SignatureFlags(req.Flags)) - } else { - sig, err = s.agent.Sign(k, req.Data) - } - - if err != nil { - return nil, err - } - return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil - - case agentRequestIdentities: - keys, err := s.agent.List() - if err != nil { - return nil, err - } - - rep := identitiesAnswerAgentMsg{ - NumKeys: uint32(len(keys)), - } - for _, k := range keys { - rep.Keys = append(rep.Keys, marshalKey(k)...) - } - return rep, nil - - case agentAddIDConstrained, agentAddIdentity: - return nil, s.insertIdentity(data) - - case agentExtension: - // Return a stub object where the whole contents of the response gets marshaled. - var responseStub struct { - Rest []byte `ssh:"rest"` - } - - if extendedAgent, ok := s.agent.(ExtendedAgent); !ok { - // If this agent doesn't implement extensions, [PROTOCOL.agent] section 4.7 - // requires that we return a standard SSH_AGENT_FAILURE message. - responseStub.Rest = []byte{agentFailure} - } else { - var req extensionAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - res, err := extendedAgent.Extension(req.ExtensionType, req.Contents) - if err != nil { - // If agent extensions are unsupported, return a standard SSH_AGENT_FAILURE - // message as required by [PROTOCOL.agent] section 4.7. - if err == ErrExtensionUnsupported { - responseStub.Rest = []byte{agentFailure} - } else { - // As the result of any other error processing an extension request, - // [PROTOCOL.agent] section 4.7 requires that we return a - // SSH_AGENT_EXTENSION_FAILURE code. - responseStub.Rest = []byte{agentExtensionFailure} - } - } else { - if len(res) == 0 { - return nil, nil - } - responseStub.Rest = res - } - } - - return responseStub, nil - } - - return nil, fmt.Errorf("unknown opcode %d", data[0]) -} - -func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) { - for len(constraints) != 0 { - switch constraints[0] { - case agentConstrainLifetime: - lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5]) - constraints = constraints[5:] - case agentConstrainConfirm: - confirmBeforeUse = true - constraints = constraints[1:] - case agentConstrainExtension: - var msg constrainExtensionAgentMsg - if err = ssh.Unmarshal(constraints, &msg); err != nil { - return 0, false, nil, err - } - extensions = append(extensions, ConstraintExtension{ - ExtensionName: msg.ExtensionName, - ExtensionDetails: msg.ExtensionDetails, - }) - constraints = msg.Rest - default: - return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0]) - } - } - return -} - -func setConstraints(key *AddedKey, constraintBytes []byte) error { - lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes) - if err != nil { - return err - } - - key.LifetimeSecs = lifetimeSecs - key.ConfirmBeforeUse = confirmBeforeUse - key.ConstraintExtensions = constraintExtensions - return nil -} - -func parseRSAKey(req []byte) (*AddedKey, error) { - var k rsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - if k.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(k.E.Int64()), - N: k.N, - }, - D: k.D, - Primes: []*big.Int{k.P, k.Q}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseEd25519Key(req []byte) (*AddedKey, error) { - var k ed25519KeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - - addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSAKey(req []byte) (*AddedKey, error) { - var k dsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { - priv = &ecdsa.PrivateKey{ - D: privScalar, - } - - switch curveName { - case "nistp256": - priv.Curve = elliptic.P256() - case "nistp384": - priv.Curve = elliptic.P384() - case "nistp521": - priv.Curve = elliptic.P521() - default: - return nil, fmt.Errorf("agent: unknown curve %q", curveName) - } - - priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) - if priv.X == nil || priv.Y == nil { - return nil, errors.New("agent: point not on curve") - } - - return priv, nil -} - -func parseEd25519Cert(req []byte) (*AddedKey, error) { - var k ed25519CertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ED25519 certificate") - } - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSAKey(req []byte) (*AddedKey, error) { - var k ecdsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseRSACert(req []byte) (*AddedKey, error) { - var k rsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad RSA certificate") - } - - // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go - var rsaPub struct { - Name string - E *big.Int - N *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - if rsaPub.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - - priv := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(rsaPub.E.Int64()), - N: rsaPub.N, - }, - D: k.D, - Primes: []*big.Int{k.Q, k.P}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSACert(req []byte) (*AddedKey, error) { - var k dsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad DSA certificate") - } - - // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go - var w struct { - Name string - P, Q, G, Y *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - }, - Y: w.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSACert(req []byte) (*AddedKey, error) { - var k ecdsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ECDSA certificate") - } - - // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go - var ecdsaPub struct { - Name string - ID string - Key []byte - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func (s *server) insertIdentity(req []byte) error { - var record struct { - Type string `sshtype:"17|25"` - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(req, &record); err != nil { - return err - } - - var addedKey *AddedKey - var err error - - switch record.Type { - case ssh.KeyAlgoRSA: - addedKey, err = parseRSAKey(req) - case ssh.KeyAlgoDSA: - addedKey, err = parseDSAKey(req) - case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: - addedKey, err = parseECDSAKey(req) - case ssh.KeyAlgoED25519: - addedKey, err = parseEd25519Key(req) - case ssh.CertAlgoRSAv01: - addedKey, err = parseRSACert(req) - case ssh.CertAlgoDSAv01: - addedKey, err = parseDSACert(req) - case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: - addedKey, err = parseECDSACert(req) - case ssh.CertAlgoED25519v01: - addedKey, err = parseEd25519Cert(req) - default: - return fmt.Errorf("agent: not implemented: %q", record.Type) - } - - if err != nil { - return err - } - return s.agent.Add(*addedKey) -} - -// ServeAgent serves the agent protocol on the given connection. It -// returns when an I/O error occurs. -func ServeAgent(agent Agent, c io.ReadWriter) error { - s := &server{agent} - - var length [4]byte - for { - if _, err := io.ReadFull(c, length[:]); err != nil { - return err - } - l := binary.BigEndian.Uint32(length[:]) - if l == 0 { - return fmt.Errorf("agent: request size is 0") - } - if l > maxAgentResponseBytes { - // We also cap requests. - return fmt.Errorf("agent: request too large: %d", l) - } - - req := make([]byte, l) - if _, err := io.ReadFull(c, req); err != nil { - return err - } - - repData := s.processRequestBytes(req) - if len(repData) > maxAgentResponseBytes { - return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) - } - - binary.BigEndian.PutUint32(length[:], uint32(len(repData))) - if _, err := c.Write(length[:]); err != nil { - return err - } - if _, err := c.Write(repData); err != nil { - return err - } - } -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d07..00000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index 916c840b..00000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" - CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte - Rest []byte `ssh:"rest"` -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -type algorithmOpenSSHCertSigner struct { - *openSSHCertSigner - algorithmSigner AlgorithmSigner -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { - return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { - return &openSSHCertSigner{cert, signer}, nil - } -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, - KeyAlgoED25519: CertAlgoED25519v01, - KeyAlgoSKED25519: CertAlgoSKED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - switch out.Format { - case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: - out.Rest = in - return out, nil, ok - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00..00000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 8bd6b3da..00000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readCipherPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writeCipherPacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readCipherPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [32]byte - contentKey [32]byte - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - copy(c.contentKey[:], key[:32]) - copy(c.lengthKey[:], key[32:]) - return c, nil -} - -func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return nil, err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return nil, err - } - ls.XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return err - } - ls.XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index 99f68bd3..00000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c, user: fullConf.User}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index c611aeb6..00000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - var tried []string - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { - tried = append(tried, m) - } - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if contains(tried, candidateMethod) { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) -} - -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pub := signer.PublicKey() - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, fmt.Errorf("ssh: incorrect number of answers from keyboard-interactive callback %d (expected %d)", len(answers), len(prompts)) - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} - -// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. -// See RFC 4462 section 3 -// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. -// target is the server host you want to log in to. -func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { - if gssAPIClient == nil { - panic("gss-api client must be not nil with enable gssapi-with-mic") - } - return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} -} - -type gssAPIWithMICCallback struct { - gssAPIClient GSSAPIClient - target string -} - -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - m := &userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: g.method(), - } - // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. - // See RFC 4462 section 3.2. - m.Payload = appendU32(m.Payload, 1) - m.Payload = appendString(m.Payload, string(krb5OID)) - if err := c.writePacket(Marshal(m)); err != nil { - return authFailure, nil, err - } - // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an - // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or - // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. - // See RFC 4462 section 3.3. - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check - // selected mech if it is valid. - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - userAuthGSSAPIResp := &userAuthGSSAPIResponse{} - if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { - return authFailure, nil, err - } - // Start the loop into the exchange token. - // See RFC 4462 section 3.4. - var token []byte - defer g.gssAPIClient.DeleteSecContext() - for { - // Initiates the establishment of a security context between the application and a remote peer. - nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) - if err != nil { - return authFailure, nil, err - } - if len(nextToken) > 0 { - if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: nextToken, - })); err != nil { - return authFailure, nil, err - } - } - if !needContinue { - break - } - packet, err = c.readPacket() - if err != nil { - return authFailure, nil, err - } - switch packet[0] { - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthGSSAPIError: - userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} - if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { - return authFailure, nil, err - } - return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ - "Major Status: %d\n"+ - "Minor Status: %d\n"+ - "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, - userAuthGSSAPIErrorResp.Message) - case msgUserAuthGSSAPIToken: - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return authFailure, nil, err - } - token = userAuthGSSAPITokenReq.Token - } - } - // Binding Encryption Keys. - // See RFC 4462 section 3.5. - micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") - micToken, err := g.gssAPIClient.GetMIC(micField) - if err != nil { - return authFailure, nil, err - } - if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ - MIC: micToken, - })); err != nil { - return authFailure, nil, err - } - return handleAuthResponse(c) -} - -func (g *gssAPIWithMICCallback) method() string { - return "gssapi-with-mic" -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index 290382d0..00000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden -// for the server half. -var serverForbiddenKexAlgos = map[string]struct{}{ - kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests - kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests -} - -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. -var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - stoc, ctos := &result.w, &result.r - if isClient { - ctos, stoc = stoc, ctos - } - - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = preferredKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b0681..00000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index 67b7322c..00000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 2b10b05a..00000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - isClient := len(t.hostKeys) == 0 - if isClient { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) - } else { - result, err = t.client(kex, t.algorithms, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index af81d266..00000000 --- a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. -// -// See https://flak.tedunangst.com/post/bcrypt-pbkdf and -// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - "golang.org/x/crypto/blowfish" -) - -const blockSize = 32 - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - - numBlocks := (keyLen + blockSize - 1) / blockSize - key := make([]byte, numBlocks*blockSize) - - h := sha512.New() - h.Write(password) - shapass := h.Sum(nil) - - shasalt := make([]byte, 0, sha512.Size) - cnt, tmp := make([]byte, 4), make([]byte, blockSize) - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - - out := make([]byte, blockSize) - copy(out, tmp) - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out, magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index 766e9293..00000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,782 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" - - // For the following kex only the client half contains a production - // ready implementation. The server half only consists of a minimal - // implementation to satisfy the automated tests. - kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" - kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, err -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} - kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} - kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} - -// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and -// diffie-hellman-group-exchange-sha256 key agreement protocols, -// as described in RFC 4419 -type dhGEXSHA struct { - g, p *big.Int - hashFunc crypto.Hash -} - -const ( - dhGroupExchangeMinimumBits = 2048 - dhGroupExchangePreferredBits = 2048 - dhGroupExchangeMaximumBits = 8192 -) - -func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { - return nil, fmt.Errorf("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil -} - -func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - // Send GexRequest - kexDHGexRequest := kexDHGexRequestMsg{ - MinBits: dhGroupExchangeMinimumBits, - PreferedBits: dhGroupExchangePreferredBits, - MaxBits: dhGroupExchangeMaximumBits, - } - if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { - return nil, err - } - - // Receive GexGroup - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexGroup kexDHGexGroupMsg - if err = Unmarshal(packet, &kexDHGexGroup); err != nil { - return nil, err - } - - // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) - } - - gex.p = kexDHGexGroup.P - gex.g = kexDHGexGroup.G - - // Check if g is safe by verifing that g > 1 and g < p - 1 - one := big.NewInt(1) - var pMinusOne = &big.Int{} - pMinusOne.Sub(gex.p, one) - if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { - return nil, fmt.Errorf("ssh: server provided gex g is not safe") - } - - // Send GexInit - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) - x, err := rand.Int(randSource, pHalf) - if err != nil { - return nil, err - } - X := new(big.Int).Exp(gex.g, x, gex.p) - kexDHGexInit := kexDHGexInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { - return nil, err - } - - // Receive GexReply - packet, err = c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexReply kexDHGexReplyMsg - if err = Unmarshal(packet, &kexDHGexReply); err != nil { - return nil, err - } - - kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) - if err != nil { - return nil, err - } - - // Check if k is safe by verifing that k > 1 and k < p - 1 - if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { - return nil, fmt.Errorf("ssh: derived k is not safe") - } - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, kexDHGexReply.HostKey) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) - writeInt(h, X) - writeInt(h, kexDHGexReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHGexReply.HostKey, - Signature: kexDHGexReply.Signature, - Hash: gex.hashFunc, - }, nil -} - -// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. -// -// This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - // Receive GexRequest - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHGexRequest kexDHGexRequestMsg - if err = Unmarshal(packet, &kexDHGexRequest); err != nil { - return - } - - // smoosh the user's preferred size into our own limits - if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits - } - if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits - } - // fix min/max if they're inconsistent. technically, we could just pout - // and hang up, but there's no harm in giving them the benefit of the - // doubt and just picking a bitsize for them. - if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { - kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits - } - if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { - kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits - } - - // Send GexGroup - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - gex.p = p - gex.g = big.NewInt(2) - - kexDHGexGroup := kexDHGexGroupMsg{ - P: gex.p, - G: gex.g, - } - if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { - return nil, err - } - - // Receive GexInit - packet, err = c.readPacket() - if err != nil { - return - } - var kexDHGexInit kexDHGexInitMsg - if err = Unmarshal(packet, &kexDHGexInit); err != nil { - return - } - - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) - - y, err := rand.Int(randSource, pHalf) - if err != nil { - return - } - - Y := new(big.Int).Exp(gex.g, y, gex.p) - kInt, err := gex.diffieHellman(kexDHGexInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) - writeInt(h, kexDHGexInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHGexReply := kexDHGexReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHGexReply) - - err = c.writePacket(packet) - - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: gex.hashFunc, - }, err -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 31f26349..00000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1474 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" - KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" -) - -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 -const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoSKECDSA256: - return parseSKECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case KeyAlgoSKED25519: - return parseSKEd25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsaâ€). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. -type AlgorithmSigner interface { - Signer - - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. - SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := hash.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") -} - -func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != "" && algorithm != k.PublicKey().Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - return ed25519PublicKey(w.KeyBytes), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k); l != ed25519.PublicKeySize { - return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - - if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// skFields holds the additional fields present in U2F/FIDO2 signatures. -// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. -type skFields struct { - // Flags contains U2F/FIDO2 flags such as 'user present' - Flags byte - // Counter is a monotonic signature counter which can be - // used to detect concurrent use of a private key, should - // it be extracted from hardware. - Counter uint32 -} - -type skECDSAPublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ecdsa.PublicKey -} - -func (k *skECDSAPublicKey) Type() string { - return KeyAlgoSKECDSA256 -} - -func (k *skECDSAPublicKey) nistID() string { - return "nistp256" -} - -func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(skECDSAPublicKey) - key.application = w.Application - - if w.Curve != "nistp256" { - return nil, nil, errors.New("ssh: unsupported curve") - } - key.Curve = elliptic.P256() - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - - return key, w.Rest, nil -} - -func (k *skECDSAPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - w := struct { - Name string - ID string - Key []byte - Application string - }{ - k.Type(), - k.nistID(), - keyBytes, - k.application, - } - - return Marshal(&w) -} - -func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var ecSig struct { - R *big.Int - S *big.Int - } - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - h.Reset() - h.Write(original) - digest := h.Sum(nil) - - if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -type skEd25519PublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ed25519.PublicKey -} - -func (k *skEd25519PublicKey) Type() string { - return KeyAlgoSKED25519 -} - -func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - key := new(skEd25519PublicKey) - key.application = w.Application - key.PublicKey = ed25519.PublicKey(w.KeyBytes) - - return key, w.Rest, nil -} - -func (k *skEd25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - Application string - }{ - KeyAlgoSKED25519, - []byte(k.PublicKey), - k.application, - } - return Marshal(&w) -} - -func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k.PublicKey); l != ed25519.PublicKeySize { - return fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - h := sha256.New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var edSig struct { - Signature []byte `ssh:"rest"` - } - - if err := Unmarshal(sig.Blob, &edSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") -} - -func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: algorithm, - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - if l := len(key); l != ed25519.PublicKeySize { - return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - return ed25519PublicKey(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. If the private key is encrypted, it -// will return a PassphraseMissingError. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// A PassphraseMissingError indicates that parsing this private key requires a -// passphrase. Use ParsePrivateKeyWithPassphrase. -type PassphraseMissingError struct { - // PublicKey will be set if the private key format includes an unencrypted - // public key along with the encrypted private key. - PublicKey PublicKey -} - -func (*PassphraseMissingError) Error() string { - return "ssh: this private key is passphrase protected" -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the -// private key is encrypted, it will return a PassphraseMissingError. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, &PassphraseMissingError{} - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - // RFC5208 - https://tools.ietf.org/html/rfc5208 - case "PRIVATE KEY": - return x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If the passphrase is wrong, it -// will return x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if block.Type == "OPENSSH PRIVATE KEY" { - return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) - } - - if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { - return nil, errors.New("ssh: not an encrypted key") - } - - buf, err := x509.DecryptPEMBlock(block, passphrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName != "none" || cipherName != "none" { - return nil, &PassphraseMissingError{} - } - if kdfOpts != "" { - return nil, errors.New("ssh: invalid openssh private key") - } - return privKeyBlock, nil -} - -func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { - return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName == "none" || cipherName == "none" { - return nil, errors.New("ssh: key is not password protected") - } - if kdfName != "bcrypt" { - return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") - } - - var opts struct { - Salt string - Rounds uint32 - } - if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { - return nil, err - } - - k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) - if err != nil { - return nil, err - } - key, iv := k[:32], k[32:] - - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - switch cipherName { - case "aes256-ctr": - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) - case "aes256-cbc": - if len(privKeyBlock)%c.BlockSize() != 0 { - return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") - } - cbc := cipher.NewCBCDecrypter(c, iv) - cbc.CryptBlocks(privKeyBlock, privKeyBlock) - default: - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") - } - - return privKeyBlock, nil - } -} - -type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) - -// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt -// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used -// as the decrypt function to parse an unencrypted private key. See -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. -func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - if w.NumKeys != 1 { - // We only support single key files, and so does OpenSSH. - // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 - return nil, errors.New("ssh: multi-key files are not supported") - } - - privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) - if err != nil { - if err, ok := err.(*PassphraseMissingError); ok { - pub, errPub := ParsePublicKey(w.PubKey) - if errPub != nil { - return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) - } - err.PublicKey = pub - } - return nil, err - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { - if w.CipherName != "none" { - return nil, x509.IncorrectPasswordError - } - return nil, errors.New("ssh: malformed OpenSSH key") - } - - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - var curve elliptic.Curve - switch key.Curve { - case "nistp256": - curve = elliptic.P256() - case "nistp384": - curve = elliptic.P384() - case "nistp521": - curve = elliptic.P521() - default: - return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) - } - - X, Y := elliptic.Unmarshal(curve, key.Pub) - if X == nil || Y == nil { - return nil, errors.New("ssh: failed to unmarshal public key") - } - - if key.D.Cmp(curve.Params().N) >= 0 { - return nil, errors.New("ssh: scalar is out of range") - } - - x, y := curve.ScalarBaseMult(key.D.Bytes()) - if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { - return nil, errors.New("ssh: public key does not match private key") - } - - return &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: X, - Y: Y, - }, - D: key.D, - }, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -func checkOpenSSHKeyPadding(pad []byte) error { - for i, b := range pad { - if int(b) != i+1 { - return errors.New("ssh: padding not as expected") - } - } - return nil -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a0628..00000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index ac41a416..00000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,866 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4419, section 5. -const msgKexDHGexGroup = 31 - -type kexDHGexGroupMsg struct { - P *big.Int `sshtype:"31"` - G *big.Int -} - -const msgKexDHGexInit = 32 - -type kexDHGexInitMsg struct { - X *big.Int `sshtype:"32"` -} - -const msgKexDHGexReply = 33 - -type kexDHGexReplyMsg struct { - HostKey []byte `sshtype:"33"` - Y *big.Int - Signature []byte -} - -const msgKexDHGexRequest = 34 - -type kexDHGexRequestMsg struct { - MinBits uint32 `sshtype:"34"` - PreferedBits uint32 - MaxBits uint32 -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// See RFC 4462, section 3 -const msgUserAuthGSSAPIResponse = 60 - -type userAuthGSSAPIResponse struct { - SupportMech []byte `sshtype:"60"` -} - -const msgUserAuthGSSAPIToken = 61 - -type userAuthGSSAPIToken struct { - Token []byte `sshtype:"61"` -} - -const msgUserAuthGSSAPIMIC = 66 - -type userAuthGSSAPIMIC struct { - MIC []byte `sshtype:"66"` -} - -// See RFC 4462, section 3.9 -const msgUserAuthGSSAPIErrTok = 64 - -type userAuthGSSAPIErrTok struct { - ErrorToken []byte `sshtype:"64"` -} - -// See RFC 4462, section 3.8 -const msgUserAuthGSSAPIError = 65 - -type userAuthGSSAPIError struct { - MajorStatus uint32 `sshtype:"65"` - MinorStatus uint32 - Message string - LanguageTag string -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - case msgUserAuthGSSAPIToken: - msg = new(userAuthGSSAPIToken) - case msgUserAuthGSSAPIMIC: - msg = new(userAuthGSSAPIMIC) - case msgUserAuthGSSAPIErrTok: - msg = new(userAuthGSSAPIErrTok) - case msgUserAuthGSSAPIError: - msg = new(userAuthGSSAPIError) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -var packetTypeNames = map[byte]string{ - msgDisconnect: "disconnectMsg", - msgServiceRequest: "serviceRequestMsg", - msgServiceAccept: "serviceAcceptMsg", - msgKexInit: "kexInitMsg", - msgKexDHInit: "kexDHInitMsg", - msgKexDHReply: "kexDHReplyMsg", - msgUserAuthRequest: "userAuthRequestMsg", - msgUserAuthSuccess: "userAuthSuccessMsg", - msgUserAuthFailure: "userAuthFailureMsg", - msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", - msgGlobalRequest: "globalRequestMsg", - msgRequestSuccess: "globalRequestSuccessMsg", - msgRequestFailure: "globalRequestFailureMsg", - msgChannelOpen: "channelOpenMsg", - msgChannelData: "channelDataMsg", - msgChannelOpenConfirm: "channelOpenConfirmMsg", - msgChannelOpenFailure: "channelOpenFailureMsg", - msgChannelWindowAdjust: "windowAdjustMsg", - msgChannelEOF: "channelEOFMsg", - msgChannelClose: "channelCloseMsg", - msgChannelRequest: "channelRequestMsg", - msgChannelSuccess: "channelRequestSuccessMsg", - msgChannelFailure: "channelRequestFailureMsg", -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index 9654c018..00000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return m.handleUnknownChannelPacket(id, packet) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} - -func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - // RFC 4254 section 5.4 says unrecognized channel requests should - // receive a failure response. - case *channelRequestMsg: - if msg.WantReply { - return m.sendMessage(channelRequestFailureMsg{ - PeersID: msg.PeersID, - }) - } - return nil - default: - return fmt.Errorf("ssh: invalid channel %d", id) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index b6911e83..00000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,720 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -type GSSAPIWithMICConfig struct { - // AllowLogin, must be set, is called when gssapi-with-mic - // authentication is selected (RFC 4462 section 3). The srcName is from the - // results of the GSS-API authentication. The format is username@DOMAIN. - // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. - // This callback is called after the user identity is established with GSSAPI to decide if the user can login with - // which permissions. If the user is allowed to login, it should return a nil error. - AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) - - // Server must be set. It's the implementation - // of the GSSAPIServer interface. See GSSAPIServer interface for details. - Server GSSAPIServer -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string - - // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used - // when gssapi-with-mic authentication is selected (RFC 4462 section 3). - GSSAPIWithMICConfig *GSSAPIWithMICConfig -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - // Check if the config contains any unsupported key exchanges - for _, kex := range fullConf.KeyExchanges { - if _, ok := serverForbiddenKexAlgos[kex]; ok { - return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) - } - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && - config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || - config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, - sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { - gssAPIServer := gssapiConfig.Server - defer gssAPIServer.DeleteSecContext() - var srcName string - for { - var ( - outToken []byte - needContinue bool - ) - outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) - if err != nil { - return err, nil, nil - } - if len(outToken) != 0 { - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: outToken, - })); err != nil { - return nil, nil, err - } - } - if !needContinue { - break - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, nil, err - } - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} - if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { - return nil, nil, err - } - mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) - if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { - return err, nil, nil - } - perms, authErr = gssapiConfig.AllowLogin(s, srcName) - return authErr, perms, nil -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configured") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) - break - } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { - authErr = errors.New("ssh: gssapi-with-mic auth not configured") - break - } - gssapiConfig := config.GSSAPIWithMICConfig - userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) - if err != nil { - return nil, parseError(msgUserAuthRequest) - } - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. - if userAuthRequestGSSAPI.N == 0 { - authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") - break - } - var i uint32 - present := false - for i = 0; i < userAuthRequestGSSAPI.N; i++ { - if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { - present = true - break - } - } - if !present { - authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") - break - } - // Initial server response, see RFC 4462 section 3.3. - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ - SupportMech: krb5OID, - })); err != nil { - return nil, err - } - // Exchange token, see RFC 4462 section 3.4. - packet, err := s.transport.readPacket() - if err != nil { - return nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, err - } - authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, - userAuthReq) - if err != nil { - return nil, err - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { - failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index d3321f6b..00000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go deleted file mode 100644 index 24bd7c8e..00000000 --- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/asn1" - "errors" -) - -var krb5OID []byte - -func init() { - krb5OID, _ = asn1.Marshal(krb5Mesh) -} - -// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. -type GSSAPIClient interface { - // InitSecContext initiates the establishment of a security context for GSS-API between the - // ssh client and ssh server. Initially the token parameter should be specified as nil. - // The routine may return a outputToken which should be transferred to - // the ssh server, where the ssh server will present it to - // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting - // needContinue to false. To complete the context - // establishment, one or more reply tokens may be required from the ssh - // server;if so, InitSecContext will return a needContinue which is true. - // In this case, InitSecContext should be called again when the - // reply token is received from the ssh server, passing the reply - // token to InitSecContext via the token parameters. - // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. - InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) - // GetMIC generates a cryptographic MIC for the SSH2 message, and places - // the MIC in a token for transfer to the ssh server. - // The contents of the MIC field are obtained by calling GSS_GetMIC() - // over the following, using the GSS-API context that was just - // established: - // string session identifier - // byte SSH_MSG_USERAUTH_REQUEST - // string user name - // string service - // string "gssapi-with-mic" - // See RFC 2743 section 2.3.1 and RFC 4462 3.5. - GetMIC(micFiled []byte) ([]byte, error) - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. -type GSSAPIServer interface { - // AcceptSecContext allows a remotely initiated security context between the application - // and a remote peer to be established by the ssh client. The routine may return a - // outputToken which should be transferred to the ssh client, - // where the ssh client will present it to InitSecContext. - // If no token need be sent, AcceptSecContext will indicate this - // by setting the needContinue to false. To - // complete the context establishment, one or more reply tokens may be - // required from the ssh client. if so, AcceptSecContext - // will return a needContinue which is true, in which case it - // should be called again when the reply token is received from the ssh - // client, passing the token to AcceptSecContext via the - // token parameters. - // The srcName return value is the authenticated username. - // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. - AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) - // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, - // fits the supplied message is received from the ssh client. - // See RFC 2743 section 2.3.2. - VerifyMIC(micField []byte, micToken []byte) error - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -var ( - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, - // so we also support the krb5 mechanism only. - // See RFC 1964 section 1. - krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} -) - -// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST -// See RFC 4462 section 3.2. -type userAuthRequestGSSAPI struct { - N uint32 - OIDS []asn1.ObjectIdentifier -} - -func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { - n, rest, ok := parseUint32(payload) - if !ok { - return nil, errors.New("parse uint32 failed") - } - s := &userAuthRequestGSSAPI{ - N: n, - OIDS: make([]asn1.ObjectIdentifier, n), - } - for i := 0; i < int(n); i++ { - var ( - desiredMech []byte - err error - ) - desiredMech, rest, ok = parseString(rest) - if !ok { - return nil, errors.New("parse string failed") - } - if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { - return nil, err - } - - } - return s, nil -} - -// See RFC 4462 section 3.6. -func buildMIC(sessionID string, username string, service string, authMethod string) []byte { - out := make([]byte, 0, 0) - out = appendString(out, sessionID) - out = append(out, msgUserAuthRequest) - out = appendString(out, username) - out = appendString(out, service) - out = appendString(out, authMethod) - return out -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330..00000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5e..00000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index 49ddc2e7..00000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writeCipherPacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readCipherPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/mod/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/mod/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go deleted file mode 100644 index 0e030148..00000000 --- a/vendor/golang.org/x/mod/module/module.go +++ /dev/null @@ -1,822 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package module defines the module.Version type along with support code. -// -// The module.Version type is a simple Path, Version pair: -// -// type Version struct { -// Path string -// Version string -// } -// -// There are no restrictions imposed directly by use of this structure, -// but additional checking functions, most notably Check, verify that -// a particular path, version pair is valid. -// -// Escaped Paths -// -// Module paths appear as substrings of file system paths -// (in the download cache) and of web server URLs in the proxy protocol. -// In general we cannot rely on file systems to be case-sensitive, -// nor can we rely on web servers, since they read from file systems. -// That is, we cannot rely on the file system to keep rsc.io/QUOTE -// and rsc.io/quote separate. Windows and macOS don't. -// Instead, we must never require two different casings of a file path. -// Because we want the download cache to match the proxy protocol, -// and because we want the proxy protocol to be possible to serve -// from a tree of static files (which might be stored on a case-insensitive -// file system), the proxy protocol must never require two different casings -// of a URL path either. -// -// One possibility would be to make the escaped form be the lowercase -// hexadecimal encoding of the actual path bytes. This would avoid ever -// needing different casings of a file path, but it would be fairly illegible -// to most programmers when those paths appeared in the file system -// (including in file paths in compiler errors and stack traces) -// in web server logs, and so on. Instead, we want a safe escaped form that -// leaves most paths unaltered. -// -// The safe escaped form is to replace every uppercase letter -// with an exclamation mark followed by the letter's lowercase equivalent. -// -// For example, -// -// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. -// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy -// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. -// -// Import paths that avoid upper-case letters are left unchanged. -// Note that because import paths are ASCII-only and avoid various -// problematic punctuation (like : < and >), the escaped form is also ASCII-only -// and avoids the same problematic punctuation. -// -// Import paths have never allowed exclamation marks, so there is no -// need to define how to escape a literal !. -// -// Unicode Restrictions -// -// Today, paths are disallowed from using Unicode. -// -// Although paths are currently disallowed from using Unicode, -// we would like at some point to allow Unicode letters as well, to assume that -// file systems and URLs are Unicode-safe (storing UTF-8), and apply -// the !-for-uppercase convention for escaping them in the file system. -// But there are at least two subtle considerations. -// -// First, note that not all case-fold equivalent distinct runes -// form an upper/lower pair. -// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) -// are three distinct runes that case-fold to each other. -// When we do add Unicode letters, we must not assume that upper/lower -// are the only case-equivalent pairs. -// Perhaps the Kelvin symbol would be disallowed entirely, for example. -// Or perhaps it would escape as "!!k", or perhaps as "(212A)". -// -// Second, it would be nice to allow Unicode marks as well as letters, -// but marks include combining marks, and then we must deal not -// only with case folding but also normalization: both U+00E9 ('é') -// and U+0065 U+0301 ('e' followed by combining acute accent) -// look the same on the page and are treated by some file systems -// as the same path. If we do allow Unicode marks in paths, there -// must be some kind of normalization to allow only one canonical -// encoding of any character used in an import path. -package module - -// IMPORTANT NOTE -// -// This file essentially defines the set of valid import paths for the go command. -// There are many subtle considerations, including Unicode ambiguity, -// security, network, and file system representations. -// -// This file also defines the set of valid module path and version combinations, -// another topic with many subtle considerations. -// -// Changes to the semantics in this file require approval from rsc. - -import ( - "fmt" - "path" - "sort" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/mod/semver" - errors "golang.org/x/xerrors" -) - -// A Version (for clients, a module.Version) is defined by a module path and version pair. -// These are stored in their plain (unescaped) form. -type Version struct { - // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". - Path string - - // Version is usually a semantic version in canonical form. - // There are three exceptions to this general rule. - // First, the top-level target of a build has no specific version - // and uses Version = "". - // Second, during MVS calculations the version "none" is used - // to represent the decision to take no version of a given module. - // Third, filesystem paths found in "replace" directives are - // represented by a path with an empty version. - Version string `json:",omitempty"` -} - -// String returns a representation of the Version suitable for logging -// (Path@Version, or just Path if Version is empty). -func (m Version) String() string { - if m.Version == "" { - return m.Path - } - return m.Path + "@" + m.Version -} - -// A ModuleError indicates an error specific to a module. -type ModuleError struct { - Path string - Version string - Err error -} - -// VersionError returns a ModuleError derived from a Version and error, -// or err itself if it is already such an error. -func VersionError(v Version, err error) error { - var mErr *ModuleError - if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { - return err - } - return &ModuleError{ - Path: v.Path, - Version: v.Version, - Err: err, - } -} - -func (e *ModuleError) Error() string { - if v, ok := e.Err.(*InvalidVersionError); ok { - return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) - } - if e.Version != "" { - return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) - } - return fmt.Sprintf("module %s: %v", e.Path, e.Err) -} - -func (e *ModuleError) Unwrap() error { return e.Err } - -// An InvalidVersionError indicates an error specific to a version, with the -// module path unknown or specified externally. -// -// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError -// must not wrap a ModuleError. -type InvalidVersionError struct { - Version string - Pseudo bool - Err error -} - -// noun returns either "version" or "pseudo-version", depending on whether -// e.Version is a pseudo-version. -func (e *InvalidVersionError) noun() string { - if e.Pseudo { - return "pseudo-version" - } - return "version" -} - -func (e *InvalidVersionError) Error() string { - return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) -} - -func (e *InvalidVersionError) Unwrap() error { return e.Err } - -// Check checks that a given module path, version pair is valid. -// In addition to the path being a valid module path -// and the version being a valid semantic version, -// the two must correspond. -// For example, the path "yaml/v2" only corresponds to -// semantic versions beginning with "v2.". -func Check(path, version string) error { - if err := CheckPath(path); err != nil { - return err - } - if !semver.IsValid(version) { - return &ModuleError{ - Path: path, - Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, - } - } - _, pathMajor, _ := SplitPathVersion(path) - if err := CheckPathMajor(version, pathMajor); err != nil { - return &ModuleError{Path: path, Err: err} - } - return nil -} - -// firstPathOK reports whether r can appear in the first element of a module path. -// The first element of the path must be an LDH domain name, at least for now. -// To avoid case ambiguity, the domain name must be entirely lower case. -func firstPathOK(r rune) bool { - return r == '-' || r == '.' || - '0' <= r && r <= '9' || - 'a' <= r && r <= 'z' -} - -// modPathOK reports whether r can appear in a module path element. -// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. -// -// This matches what "go get" has historically recognized in import paths, -// and avoids confusing sequences like '%20' or '+' that would change meaning -// if used in a URL. -// -// TODO(rsc): We would like to allow Unicode letters, but that requires additional -// care in the safe encoding (see "escaped paths" above). -func modPathOK(r rune) bool { - if r < utf8.RuneSelf { - return r == '-' || r == '.' || r == '_' || r == '~' || - '0' <= r && r <= '9' || - 'A' <= r && r <= 'Z' || - 'a' <= r && r <= 'z' - } - return false -} - -// modPathOK reports whether r can appear in a package import path element. -// -// Import paths are intermediate between module paths and file paths: we allow -// disallow characters that would be confusing or ambiguous as arguments to -// 'go get' (such as '@' and ' ' ), but allow certain characters that are -// otherwise-unambiguous on the command line and historically used for some -// binary names (such as '++' as a suffix for compiler binaries and wrappers). -func importPathOK(r rune) bool { - return modPathOK(r) || r == '+' -} - -// fileNameOK reports whether r can appear in a file name. -// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. -// If we expand the set of allowed characters here, we have to -// work harder at detecting potential case-folding and normalization collisions. -// See note about "escaped paths" above. -func fileNameOK(r rune) bool { - if r < utf8.RuneSelf { - // Entire set of ASCII punctuation, from which we remove characters: - // ! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~ - // We disallow some shell special characters: " ' * < > ? ` | - // (Note that some of those are disallowed by the Windows file system as well.) - // We also disallow path separators / : and \ (fileNameOK is only called on path element characters). - // We allow spaces (U+0020) in file names. - const allowed = "!#$%&()+,-.=@[]^_{}~ " - if '0' <= r && r <= '9' || 'A' <= r && r <= 'Z' || 'a' <= r && r <= 'z' { - return true - } - for i := 0; i < len(allowed); i++ { - if rune(allowed[i]) == r { - return true - } - } - return false - } - // It may be OK to add more ASCII punctuation here, but only carefully. - // For example Windows disallows < > \, and macOS disallows :, so we must not allow those. - return unicode.IsLetter(r) -} - -// CheckPath checks that a module path is valid. -// A valid module path is a valid import path, as checked by CheckImportPath, -// with three additional constraints. -// First, the leading path element (up to the first slash, if any), -// by convention a domain name, must contain only lower-case ASCII letters, -// ASCII digits, dots (U+002E), and dashes (U+002D); -// it must contain at least one dot and cannot start with a dash. -// Second, for a final path element of the form /vN, where N looks numeric -// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, -// and must not contain any dots. For paths beginning with "gopkg.in/", -// this second requirement is replaced by a requirement that the path -// follow the gopkg.in server's conventions. -// Third, no path element may begin with a dot. -func CheckPath(path string) error { - if err := checkPath(path, modulePath); err != nil { - return fmt.Errorf("malformed module path %q: %v", path, err) - } - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - if i == 0 { - return fmt.Errorf("malformed module path %q: leading slash", path) - } - if !strings.Contains(path[:i], ".") { - return fmt.Errorf("malformed module path %q: missing dot in first path element", path) - } - if path[0] == '-' { - return fmt.Errorf("malformed module path %q: leading dash in first path element", path) - } - for _, r := range path[:i] { - if !firstPathOK(r) { - return fmt.Errorf("malformed module path %q: invalid char %q in first path element", path, r) - } - } - if _, _, ok := SplitPathVersion(path); !ok { - return fmt.Errorf("malformed module path %q: invalid version", path) - } - return nil -} - -// CheckImportPath checks that an import path is valid. -// -// A valid import path consists of one or more valid path elements -// separated by slashes (U+002F). (It must not begin with nor end in a slash.) -// -// A valid path element is a non-empty string made up of -// ASCII letters, ASCII digits, and limited ASCII punctuation: - . _ and ~. -// It must not end with a dot (U+002E), nor contain two dots in a row. -// -// The element prefix up to the first dot must not be a reserved file name -// on Windows, regardless of case (CON, com1, NuL, and so on). The element -// must not have a suffix of a tilde followed by one or more ASCII digits -// (to exclude paths elements that look like Windows short-names). -// -// CheckImportPath may be less restrictive in the future, but see the -// top-level package documentation for additional information about -// subtleties of Unicode. -func CheckImportPath(path string) error { - if err := checkPath(path, importPath); err != nil { - return fmt.Errorf("malformed import path %q: %v", path, err) - } - return nil -} - -// pathKind indicates what kind of path we're checking. Module paths, -// import paths, and file paths have different restrictions. -type pathKind int - -const ( - modulePath pathKind = iota - importPath - filePath -) - -// checkPath checks that a general path is valid. -// It returns an error describing why but not mentioning path. -// Because these checks apply to both module paths and import paths, -// the caller is expected to add the "malformed ___ path %q: " prefix. -// fileName indicates whether the final element of the path is a file name -// (as opposed to a directory name). -func checkPath(path string, kind pathKind) error { - if !utf8.ValidString(path) { - return fmt.Errorf("invalid UTF-8") - } - if path == "" { - return fmt.Errorf("empty string") - } - if path[0] == '-' { - return fmt.Errorf("leading dash") - } - if strings.Contains(path, "//") { - return fmt.Errorf("double slash") - } - if path[len(path)-1] == '/' { - return fmt.Errorf("trailing slash") - } - elemStart := 0 - for i, r := range path { - if r == '/' { - if err := checkElem(path[elemStart:i], kind); err != nil { - return err - } - elemStart = i + 1 - } - } - if err := checkElem(path[elemStart:], kind); err != nil { - return err - } - return nil -} - -// checkElem checks whether an individual path element is valid. -func checkElem(elem string, kind pathKind) error { - if elem == "" { - return fmt.Errorf("empty path element") - } - if strings.Count(elem, ".") == len(elem) { - return fmt.Errorf("invalid path element %q", elem) - } - if elem[0] == '.' && kind == modulePath { - return fmt.Errorf("leading dot in path element") - } - if elem[len(elem)-1] == '.' { - return fmt.Errorf("trailing dot in path element") - } - for _, r := range elem { - ok := false - switch kind { - case modulePath: - ok = modPathOK(r) - case importPath: - ok = importPathOK(r) - case filePath: - ok = fileNameOK(r) - default: - panic(fmt.Sprintf("internal error: invalid kind %v", kind)) - } - if !ok { - return fmt.Errorf("invalid char %q", r) - } - } - - // Windows disallows a bunch of path elements, sadly. - // See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file - short := elem - if i := strings.Index(short, "."); i >= 0 { - short = short[:i] - } - for _, bad := range badWindowsNames { - if strings.EqualFold(bad, short) { - return fmt.Errorf("%q disallowed as path element component on Windows", short) - } - } - - if kind == filePath { - // don't check for Windows short-names in file names. They're - // only an issue for import paths. - return nil - } - - // Reject path components that look like Windows short-names. - // Those usually end in a tilde followed by one or more ASCII digits. - if tilde := strings.LastIndexByte(short, '~'); tilde >= 0 && tilde < len(short)-1 { - suffix := short[tilde+1:] - suffixIsDigits := true - for _, r := range suffix { - if r < '0' || r > '9' { - suffixIsDigits = false - break - } - } - if suffixIsDigits { - return fmt.Errorf("trailing tilde and digits in path element") - } - } - - return nil -} - -// CheckFilePath checks that a slash-separated file path is valid. -// The definition of a valid file path is the same as the definition -// of a valid import path except that the set of allowed characters is larger: -// all Unicode letters, ASCII digits, the ASCII space character (U+0020), -// and the ASCII punctuation characters -// “!#$%&()+,-.=@[]^_{}~â€. -// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, -// have special meanings in certain shells or operating systems.) -// -// CheckFilePath may be less restrictive in the future, but see the -// top-level package documentation for additional information about -// subtleties of Unicode. -func CheckFilePath(path string) error { - if err := checkPath(path, filePath); err != nil { - return fmt.Errorf("malformed file path %q: %v", path, err) - } - return nil -} - -// badWindowsNames are the reserved file path elements on Windows. -// See https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file -var badWindowsNames = []string{ - "CON", - "PRN", - "AUX", - "NUL", - "COM1", - "COM2", - "COM3", - "COM4", - "COM5", - "COM6", - "COM7", - "COM8", - "COM9", - "LPT1", - "LPT2", - "LPT3", - "LPT4", - "LPT5", - "LPT6", - "LPT7", - "LPT8", - "LPT9", -} - -// SplitPathVersion returns prefix and major version such that prefix+pathMajor == path -// and version is either empty or "/vN" for N >= 2. -// As a special case, gopkg.in paths are recognized directly; -// they require ".vN" instead of "/vN", and for all N, not just N >= 2. -// SplitPathVersion returns with ok = false when presented with -// a path whose last path element does not satisfy the constraints -// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". -func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { - if strings.HasPrefix(path, "gopkg.in/") { - return splitGopkgIn(path) - } - - i := len(path) - dot := false - for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { - if path[i-1] == '.' { - dot = true - } - i-- - } - if i <= 1 || i == len(path) || path[i-1] != 'v' || path[i-2] != '/' { - return path, "", true - } - prefix, pathMajor = path[:i-2], path[i-2:] - if dot || len(pathMajor) <= 2 || pathMajor[2] == '0' || pathMajor == "/v1" { - return path, "", false - } - return prefix, pathMajor, true -} - -// splitGopkgIn is like SplitPathVersion but only for gopkg.in paths. -func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { - if !strings.HasPrefix(path, "gopkg.in/") { - return path, "", false - } - i := len(path) - if strings.HasSuffix(path, "-unstable") { - i -= len("-unstable") - } - for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9') { - i-- - } - if i <= 1 || path[i-1] != 'v' || path[i-2] != '.' { - // All gopkg.in paths must end in vN for some N. - return path, "", false - } - prefix, pathMajor = path[:i-2], path[i-2:] - if len(pathMajor) <= 2 || pathMajor[2] == '0' && pathMajor != ".v0" { - return path, "", false - } - return prefix, pathMajor, true -} - -// MatchPathMajor reports whether the semantic version v -// matches the path major version pathMajor. -// -// MatchPathMajor returns true if and only if CheckPathMajor returns nil. -func MatchPathMajor(v, pathMajor string) bool { - return CheckPathMajor(v, pathMajor) == nil -} - -// CheckPathMajor returns a non-nil error if the semantic version v -// does not match the path major version pathMajor. -func CheckPathMajor(v, pathMajor string) error { - // TODO(jayconrod): return errors or panic for invalid inputs. This function - // (and others) was covered by integration tests for cmd/go, and surrounding - // code protected against invalid inputs like non-canonical versions. - if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { - pathMajor = strings.TrimSuffix(pathMajor, "-unstable") - } - if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { - // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. - // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. - return nil - } - m := semver.Major(v) - if pathMajor == "" { - if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { - return nil - } - pathMajor = "v0 or v1" - } else if pathMajor[0] == '/' || pathMajor[0] == '.' { - if m == pathMajor[1:] { - return nil - } - pathMajor = pathMajor[1:] - } - return &InvalidVersionError{ - Version: v, - Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), - } -} - -// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. -// An empty PathMajorPrefix allows either v0 or v1. -// -// Note that MatchPathMajor may accept some versions that do not actually begin -// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' -// pathMajor, even though that pathMajor implies 'v1' tagging. -func PathMajorPrefix(pathMajor string) string { - if pathMajor == "" { - return "" - } - if pathMajor[0] != '/' && pathMajor[0] != '.' { - panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") - } - if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { - pathMajor = strings.TrimSuffix(pathMajor, "-unstable") - } - m := pathMajor[1:] - if m != semver.Major(m) { - panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") - } - return m -} - -// CanonicalVersion returns the canonical form of the version string v. -// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". -func CanonicalVersion(v string) string { - cv := semver.Canonical(v) - if semver.Build(v) == "+incompatible" { - cv += "+incompatible" - } - return cv -} - -// Sort sorts the list by Path, breaking ties by comparing Version fields. -// The Version fields are interpreted as semantic versions (using semver.Compare) -// optionally followed by a tie-breaking suffix introduced by a slash character, -// like in "v0.0.1/go.mod". -func Sort(list []Version) { - sort.Slice(list, func(i, j int) bool { - mi := list[i] - mj := list[j] - if mi.Path != mj.Path { - return mi.Path < mj.Path - } - // To help go.sum formatting, allow version/file. - // Compare semver prefix by semver rules, - // file by string order. - vi := mi.Version - vj := mj.Version - var fi, fj string - if k := strings.Index(vi, "/"); k >= 0 { - vi, fi = vi[:k], vi[k:] - } - if k := strings.Index(vj, "/"); k >= 0 { - vj, fj = vj[:k], vj[k:] - } - if vi != vj { - return semver.Compare(vi, vj) < 0 - } - return fi < fj - }) -} - -// EscapePath returns the escaped form of the given module path. -// It fails if the module path is invalid. -func EscapePath(path string) (escaped string, err error) { - if err := CheckPath(path); err != nil { - return "", err - } - - return escapeString(path) -} - -// EscapeVersion returns the escaped form of the given module version. -// Versions are allowed to be in non-semver form but must be valid file names -// and not contain exclamation marks. -func EscapeVersion(v string) (escaped string, err error) { - if err := checkElem(v, filePath); err != nil || strings.Contains(v, "!") { - return "", &InvalidVersionError{ - Version: v, - Err: fmt.Errorf("disallowed version string"), - } - } - return escapeString(v) -} - -func escapeString(s string) (escaped string, err error) { - haveUpper := false - for _, r := range s { - if r == '!' || r >= utf8.RuneSelf { - // This should be disallowed by CheckPath, but diagnose anyway. - // The correctness of the escaping loop below depends on it. - return "", fmt.Errorf("internal error: inconsistency in EscapePath") - } - if 'A' <= r && r <= 'Z' { - haveUpper = true - } - } - - if !haveUpper { - return s, nil - } - - var buf []byte - for _, r := range s { - if 'A' <= r && r <= 'Z' { - buf = append(buf, '!', byte(r+'a'-'A')) - } else { - buf = append(buf, byte(r)) - } - } - return string(buf), nil -} - -// UnescapePath returns the module path for the given escaped path. -// It fails if the escaped path is invalid or describes an invalid path. -func UnescapePath(escaped string) (path string, err error) { - path, ok := unescapeString(escaped) - if !ok { - return "", fmt.Errorf("invalid escaped module path %q", escaped) - } - if err := CheckPath(path); err != nil { - return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) - } - return path, nil -} - -// UnescapeVersion returns the version string for the given escaped version. -// It fails if the escaped form is invalid or describes an invalid version. -// Versions are allowed to be in non-semver form but must be valid file names -// and not contain exclamation marks. -func UnescapeVersion(escaped string) (v string, err error) { - v, ok := unescapeString(escaped) - if !ok { - return "", fmt.Errorf("invalid escaped version %q", escaped) - } - if err := checkElem(v, filePath); err != nil { - return "", fmt.Errorf("invalid escaped version %q: %v", v, err) - } - return v, nil -} - -func unescapeString(escaped string) (string, bool) { - var buf []byte - - bang := false - for _, r := range escaped { - if r >= utf8.RuneSelf { - return "", false - } - if bang { - bang = false - if r < 'a' || 'z' < r { - return "", false - } - buf = append(buf, byte(r+'A'-'a')) - continue - } - if r == '!' { - bang = true - continue - } - if 'A' <= r && r <= 'Z' { - return "", false - } - buf = append(buf, byte(r)) - } - if bang { - return "", false - } - return string(buf), true -} - -// MatchPrefixPatterns reports whether any path prefix of target matches one of -// the glob patterns (as defined by path.Match) in the comma-separated globs -// list. This implements the algorithm used when matching a module path to the -// GOPRIVATE environment variable, as described by 'go help module-private'. -// -// It ignores any empty or malformed patterns in the list. -func MatchPrefixPatterns(globs, target string) bool { - for globs != "" { - // Extract next non-empty glob in comma-separated list. - var glob string - if i := strings.Index(globs, ","); i >= 0 { - glob, globs = globs[:i], globs[i+1:] - } else { - glob, globs = globs, "" - } - if glob == "" { - continue - } - - // A glob with N+1 path elements (N slashes) needs to be matched - // against the first N+1 path elements of target, - // which end just before the N+1'th slash. - n := strings.Count(glob, "/") - prefix := target - // Walk target, counting slashes, truncating at the N+1'th slash. - for i := 0; i < len(target); i++ { - if target[i] == '/' { - if n == 0 { - prefix = target[:i] - break - } - n-- - } - } - if n > 0 { - // Not enough prefix elements. - continue - } - matched, _ := path.Match(glob, prefix) - if matched { - return true - } - } - return false -} diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go deleted file mode 100644 index 4338f351..00000000 --- a/vendor/golang.org/x/mod/semver/semver.go +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package semver implements comparison of semantic version strings. -// In this package, semantic version strings must begin with a leading "v", -// as in "v1.0.0". -// -// The general form of a semantic version string accepted by this package is -// -// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] -// -// where square brackets indicate optional parts of the syntax; -// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; -// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers -// using only alphanumeric characters and hyphens; and -// all-numeric PRERELEASE identifiers must not have leading zeros. -// -// This package follows Semantic Versioning 2.0.0 (see semver.org) -// with two exceptions. First, it requires the "v" prefix. Second, it recognizes -// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) -// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. -package semver - -// parsed returns the parsed form of a semantic version string. -type parsed struct { - major string - minor string - patch string - short string - prerelease string - build string - err string -} - -// IsValid reports whether v is a valid semantic version string. -func IsValid(v string) bool { - _, ok := parse(v) - return ok -} - -// Canonical returns the canonical formatting of the semantic version v. -// It fills in any missing .MINOR or .PATCH and discards build metadata. -// Two semantic versions compare equal only if their canonical formattings -// are identical strings. -// The canonical invalid semantic version is the empty string. -func Canonical(v string) string { - p, ok := parse(v) - if !ok { - return "" - } - if p.build != "" { - return v[:len(v)-len(p.build)] - } - if p.short != "" { - return v + p.short - } - return v -} - -// Major returns the major version prefix of the semantic version v. -// For example, Major("v2.1.0") == "v2". -// If v is an invalid semantic version string, Major returns the empty string. -func Major(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return v[:1+len(pv.major)] -} - -// MajorMinor returns the major.minor version prefix of the semantic version v. -// For example, MajorMinor("v2.1.0") == "v2.1". -// If v is an invalid semantic version string, MajorMinor returns the empty string. -func MajorMinor(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - i := 1 + len(pv.major) - if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { - return v[:j] - } - return v[:i] + "." + pv.minor -} - -// Prerelease returns the prerelease suffix of the semantic version v. -// For example, Prerelease("v2.1.0-pre+meta") == "-pre". -// If v is an invalid semantic version string, Prerelease returns the empty string. -func Prerelease(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return pv.prerelease -} - -// Build returns the build suffix of the semantic version v. -// For example, Build("v2.1.0+meta") == "+meta". -// If v is an invalid semantic version string, Build returns the empty string. -func Build(v string) string { - pv, ok := parse(v) - if !ok { - return "" - } - return pv.build -} - -// Compare returns an integer comparing two versions according to -// semantic version precedence. -// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. -// -// An invalid semantic version string is considered less than a valid one. -// All invalid semantic version strings compare equal to each other. -func Compare(v, w string) int { - pv, ok1 := parse(v) - pw, ok2 := parse(w) - if !ok1 && !ok2 { - return 0 - } - if !ok1 { - return -1 - } - if !ok2 { - return +1 - } - if c := compareInt(pv.major, pw.major); c != 0 { - return c - } - if c := compareInt(pv.minor, pw.minor); c != 0 { - return c - } - if c := compareInt(pv.patch, pw.patch); c != 0 { - return c - } - return comparePrerelease(pv.prerelease, pw.prerelease) -} - -// Max canonicalizes its arguments and then returns the version string -// that compares greater. -// -// Deprecated: use Compare instead. In most cases, returning a canonicalized -// version is not expected or desired. -func Max(v, w string) string { - v = Canonical(v) - w = Canonical(w) - if Compare(v, w) > 0 { - return v - } - return w -} - -func parse(v string) (p parsed, ok bool) { - if v == "" || v[0] != 'v' { - p.err = "missing v prefix" - return - } - p.major, v, ok = parseInt(v[1:]) - if !ok { - p.err = "bad major version" - return - } - if v == "" { - p.minor = "0" - p.patch = "0" - p.short = ".0.0" - return - } - if v[0] != '.' { - p.err = "bad minor prefix" - ok = false - return - } - p.minor, v, ok = parseInt(v[1:]) - if !ok { - p.err = "bad minor version" - return - } - if v == "" { - p.patch = "0" - p.short = ".0" - return - } - if v[0] != '.' { - p.err = "bad patch prefix" - ok = false - return - } - p.patch, v, ok = parseInt(v[1:]) - if !ok { - p.err = "bad patch version" - return - } - if len(v) > 0 && v[0] == '-' { - p.prerelease, v, ok = parsePrerelease(v) - if !ok { - p.err = "bad prerelease" - return - } - } - if len(v) > 0 && v[0] == '+' { - p.build, v, ok = parseBuild(v) - if !ok { - p.err = "bad build" - return - } - } - if v != "" { - p.err = "junk on end" - ok = false - return - } - ok = true - return -} - -func parseInt(v string) (t, rest string, ok bool) { - if v == "" { - return - } - if v[0] < '0' || '9' < v[0] { - return - } - i := 1 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - if v[0] == '0' && i != 1 { - return - } - return v[:i], v[i:], true -} - -func parsePrerelease(v string) (t, rest string, ok bool) { - // "A pre-release version MAY be denoted by appending a hyphen and - // a series of dot separated identifiers immediately following the patch version. - // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. - // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." - if v == "" || v[0] != '-' { - return - } - i := 1 - start := 1 - for i < len(v) && v[i] != '+' { - if !isIdentChar(v[i]) && v[i] != '.' { - return - } - if v[i] == '.' { - if start == i || isBadNum(v[start:i]) { - return - } - start = i + 1 - } - i++ - } - if start == i || isBadNum(v[start:i]) { - return - } - return v[:i], v[i:], true -} - -func parseBuild(v string) (t, rest string, ok bool) { - if v == "" || v[0] != '+' { - return - } - i := 1 - start := 1 - for i < len(v) { - if !isIdentChar(v[i]) && v[i] != '.' { - return - } - if v[i] == '.' { - if start == i { - return - } - start = i + 1 - } - i++ - } - if start == i { - return - } - return v[:i], v[i:], true -} - -func isIdentChar(c byte) bool { - return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' -} - -func isBadNum(v string) bool { - i := 0 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - return i == len(v) && i > 1 && v[0] == '0' -} - -func isNum(v string) bool { - i := 0 - for i < len(v) && '0' <= v[i] && v[i] <= '9' { - i++ - } - return i == len(v) -} - -func compareInt(x, y string) int { - if x == y { - return 0 - } - if len(x) < len(y) { - return -1 - } - if len(x) > len(y) { - return +1 - } - if x < y { - return -1 - } else { - return +1 - } -} - -func comparePrerelease(x, y string) int { - // "When major, minor, and patch are equal, a pre-release version has - // lower precedence than a normal version. - // Example: 1.0.0-alpha < 1.0.0. - // Precedence for two pre-release versions with the same major, minor, - // and patch version MUST be determined by comparing each dot separated - // identifier from left to right until a difference is found as follows: - // identifiers consisting of only digits are compared numerically and - // identifiers with letters or hyphens are compared lexically in ASCII - // sort order. Numeric identifiers always have lower precedence than - // non-numeric identifiers. A larger set of pre-release fields has a - // higher precedence than a smaller set, if all of the preceding - // identifiers are equal. - // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < - // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." - if x == y { - return 0 - } - if x == "" { - return +1 - } - if y == "" { - return -1 - } - for x != "" && y != "" { - x = x[1:] // skip - or . - y = y[1:] // skip - or . - var dx, dy string - dx, x = nextIdent(x) - dy, y = nextIdent(y) - if dx != dy { - ix := isNum(dx) - iy := isNum(dy) - if ix != iy { - if ix { - return -1 - } else { - return +1 - } - } - if ix { - if len(dx) < len(dy) { - return -1 - } - if len(dx) > len(dy) { - return +1 - } - } - if dx < dy { - return -1 - } else { - return +1 - } - } - } - if x == "" { - return -1 - } else { - return +1 - } -} - -func nextIdent(x string) (dx, rest string) { - i := 0 - for i < len(x) && x[i] != '.' { - i++ - } - return x[:i], x[i:] -} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go deleted file mode 100644 index 15e21b18..00000000 --- a/vendor/golang.org/x/net/bpf/asm.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import "fmt" - -// Assemble converts insts into raw instructions suitable for loading -// into a BPF virtual machine. -// -// Currently, no optimization is attempted, the assembled program flow -// is exactly as provided. -func Assemble(insts []Instruction) ([]RawInstruction, error) { - ret := make([]RawInstruction, len(insts)) - var err error - for i, inst := range insts { - ret[i], err = inst.Assemble() - if err != nil { - return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) - } - } - return ret, nil -} - -// Disassemble attempts to parse raw back into -// Instructions. Unrecognized RawInstructions are assumed to be an -// extension not implemented by this package, and are passed through -// unchanged to the output. The allDecoded value reports whether insts -// contains no RawInstructions. -func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { - insts = make([]Instruction, len(raw)) - allDecoded = true - for i, r := range raw { - insts[i] = r.Disassemble() - if _, ok := insts[i].(RawInstruction); ok { - allDecoded = false - } - } - return insts, allDecoded -} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go deleted file mode 100644 index 12f3ee83..00000000 --- a/vendor/golang.org/x/net/bpf/constants.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -// A Register is a register of the BPF virtual machine. -type Register uint16 - -const ( - // RegA is the accumulator register. RegA is always the - // destination register of ALU operations. - RegA Register = iota - // RegX is the indirection register, used by LoadIndirect - // operations. - RegX -) - -// An ALUOp is an arithmetic or logic operation. -type ALUOp uint16 - -// ALU binary operation types. -const ( - ALUOpAdd ALUOp = iota << 4 - ALUOpSub - ALUOpMul - ALUOpDiv - ALUOpOr - ALUOpAnd - ALUOpShiftLeft - ALUOpShiftRight - aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. - ALUOpMod - ALUOpXor -) - -// A JumpTest is a comparison operator used in conditional jumps. -type JumpTest uint16 - -// Supported operators for conditional jumps. -// K can be RegX for JumpIfX -const ( - // K == A - JumpEqual JumpTest = iota - // K != A - JumpNotEqual - // K > A - JumpGreaterThan - // K < A - JumpLessThan - // K >= A - JumpGreaterOrEqual - // K <= A - JumpLessOrEqual - // K & A != 0 - JumpBitsSet - // K & A == 0 - JumpBitsNotSet -) - -// An Extension is a function call provided by the kernel that -// performs advanced operations that are expensive or impossible -// within the BPF virtual machine. -// -// Extensions are only implemented by the Linux kernel. -// -// TODO: should we prune this list? Some of these extensions seem -// either broken or near-impossible to use correctly, whereas other -// (len, random, ifindex) are quite useful. -type Extension int - -// Extension functions available in the Linux kernel. -const ( - // extOffset is the negative maximum number of instructions used - // to load instructions by overloading the K argument. - extOffset = -0x1000 - // ExtLen returns the length of the packet. - ExtLen Extension = 1 - // ExtProto returns the packet's L3 protocol type. - ExtProto Extension = 0 - // ExtType returns the packet's type (skb->pkt_type in the kernel) - // - // TODO: better documentation. How nice an API do we want to - // provide for these esoteric extensions? - ExtType Extension = 4 - // ExtPayloadOffset returns the offset of the packet payload, or - // the first protocol header that the kernel does not know how to - // parse. - ExtPayloadOffset Extension = 52 - // ExtInterfaceIndex returns the index of the interface on which - // the packet was received. - ExtInterfaceIndex Extension = 8 - // ExtNetlinkAttr returns the netlink attribute of type X at - // offset A. - ExtNetlinkAttr Extension = 12 - // ExtNetlinkAttrNested returns the nested netlink attribute of - // type X at offset A. - ExtNetlinkAttrNested Extension = 16 - // ExtMark returns the packet's mark value. - ExtMark Extension = 20 - // ExtQueue returns the packet's assigned hardware queue. - ExtQueue Extension = 24 - // ExtLinkLayerType returns the packet's hardware address type - // (e.g. Ethernet, Infiniband). - ExtLinkLayerType Extension = 28 - // ExtRXHash returns the packets receive hash. - // - // TODO: figure out what this rxhash actually is. - ExtRXHash Extension = 32 - // ExtCPUID returns the ID of the CPU processing the current - // packet. - ExtCPUID Extension = 36 - // ExtVLANTag returns the packet's VLAN tag. - ExtVLANTag Extension = 44 - // ExtVLANTagPresent returns non-zero if the packet has a VLAN - // tag. - // - // TODO: I think this might be a lie: it reads bit 0x1000 of the - // VLAN header, which changed meaning in recent revisions of the - // spec - this extension may now return meaningless information. - ExtVLANTagPresent Extension = 48 - // ExtVLANProto returns 0x8100 if the frame has a VLAN header, - // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some - // other value if no VLAN information is present. - ExtVLANProto Extension = 60 - // ExtRand returns a uniformly random uint32. - ExtRand Extension = 56 -) - -// The following gives names to various bit patterns used in opcode construction. - -const ( - opMaskCls uint16 = 0x7 - // opClsLoad masks - opMaskLoadDest = 0x01 - opMaskLoadWidth = 0x18 - opMaskLoadMode = 0xe0 - // opClsALU & opClsJump - opMaskOperand = 0x08 - opMaskOperator = 0xf0 -) - -const ( - // +---------------+-----------------+---+---+---+ - // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | - // +---------------+-----------------+---+---+---+ - opClsLoadA uint16 = iota - // +---------------+-----------------+---+---+---+ - // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | - // +---------------+-----------------+---+---+---+ - opClsLoadX - // +---+---+---+---+---+---+---+---+ - // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | - // +---+---+---+---+---+---+---+---+ - opClsStoreA - // +---+---+---+---+---+---+---+---+ - // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | - // +---+---+---+---+---+---+---+---+ - opClsStoreX - // +---------------+-----------------+---+---+---+ - // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | - // +---------------+-----------------+---+---+---+ - opClsALU - // +-----------------------------+---+---+---+---+ - // | TestOperator (4b) | 0 | 1 | 0 | 1 | - // +-----------------------------+---+---+---+---+ - opClsJump - // +---+-------------------------+---+---+---+---+ - // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | - // +---+-------------------------+---+---+---+---+ - opClsReturn - // +---+-------------------------+---+---+---+---+ - // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | - // +---+-------------------------+---+---+---+---+ - opClsMisc -) - -const ( - opAddrModeImmediate uint16 = iota << 5 - opAddrModeAbsolute - opAddrModeIndirect - opAddrModeScratch - opAddrModePacketLen // actually an extension, not an addressing mode. - opAddrModeMemShift -) - -const ( - opLoadWidth4 uint16 = iota << 3 - opLoadWidth2 - opLoadWidth1 -) - -// Operand for ALU and Jump instructions -type opOperand uint16 - -// Supported operand sources. -const ( - opOperandConstant opOperand = iota << 3 - opOperandX -) - -// An jumpOp is a conditional jump condition. -type jumpOp uint16 - -// Supported jump conditions. -const ( - opJumpAlways jumpOp = iota << 4 - opJumpEqual - opJumpGT - opJumpGE - opJumpSet -) - -const ( - opRetSrcConstant uint16 = iota << 4 - opRetSrcA -) - -const ( - opMiscTAX = 0x00 - opMiscTXA = 0x80 -) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go deleted file mode 100644 index ae62feb5..00000000 --- a/vendor/golang.org/x/net/bpf/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - -Package bpf implements marshaling and unmarshaling of programs for the -Berkeley Packet Filter virtual machine, and provides a Go implementation -of the virtual machine. - -BPF's main use is to specify a packet filter for network taps, so that -the kernel doesn't have to expensively copy every packet it sees to -userspace. However, it's been repurposed to other areas where running -user code in-kernel is needed. For example, Linux's seccomp uses BPF -to apply security policies to system calls. For simplicity, this -documentation refers only to packets, but other uses of BPF have their -own data payloads. - -BPF programs run in a restricted virtual machine. It has almost no -access to kernel functions, and while conditional branches are -allowed, they can only jump forwards, to guarantee that there are no -infinite loops. - -The virtual machine - -The BPF VM is an accumulator machine. Its main register, called -register A, is an implicit source and destination in all arithmetic -and logic operations. The machine also has 16 scratch registers for -temporary storage, and an indirection register (register X) for -indirect memory access. All registers are 32 bits wide. - -Each run of a BPF program is given one packet, which is placed in the -VM's read-only "main memory". LoadAbsolute and LoadIndirect -instructions can fetch up to 32 bits at a time into register A for -examination. - -The goal of a BPF program is to produce and return a verdict (uint32), -which tells the kernel what to do with the packet. In the context of -packet filtering, the returned value is the number of bytes of the -packet to forward to userspace, or 0 to ignore the packet. Other -contexts like seccomp define their own return values. - -In order to simplify programs, attempts to read past the end of the -packet terminate the program execution with a verdict of 0 (ignore -packet). This means that the vast majority of BPF programs don't need -to do any explicit bounds checking. - -In addition to the bytes of the packet, some BPF programs have access -to extensions, which are essentially calls to kernel utility -functions. Currently, the only extensions supported by this package -are the Linux packet filter extensions. - -Examples - -This packet filter selects all ARP packets. - - bpf.Assemble([]bpf.Instruction{ - // Load "EtherType" field from the ethernet header. - bpf.LoadAbsolute{Off: 12, Size: 2}, - // Skip over the next instruction if EtherType is not ARP. - bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, - // Verdict is "send up to 4k of the packet to userspace." - bpf.RetConstant{Val: 4096}, - // Verdict is "ignore packet." - bpf.RetConstant{Val: 0}, - }) - -This packet filter captures a random 1% sample of traffic. - - bpf.Assemble([]bpf.Instruction{ - // Get a 32-bit random number from the Linux kernel. - bpf.LoadExtension{Num: bpf.ExtRand}, - // 1% dice roll? - bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, - // Capture. - bpf.RetConstant{Val: 4096}, - // Ignore. - bpf.RetConstant{Val: 0}, - }) - -*/ -package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go deleted file mode 100644 index 3cffcaa0..00000000 --- a/vendor/golang.org/x/net/bpf/instructions.go +++ /dev/null @@ -1,726 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import "fmt" - -// An Instruction is one instruction executed by the BPF virtual -// machine. -type Instruction interface { - // Assemble assembles the Instruction into a RawInstruction. - Assemble() (RawInstruction, error) -} - -// A RawInstruction is a raw BPF virtual machine instruction. -type RawInstruction struct { - // Operation to execute. - Op uint16 - // For conditional jump instructions, the number of instructions - // to skip if the condition is true/false. - Jt uint8 - Jf uint8 - // Constant parameter. The meaning depends on the Op. - K uint32 -} - -// Assemble implements the Instruction Assemble method. -func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } - -// Disassemble parses ri into an Instruction and returns it. If ri is -// not recognized by this package, ri itself is returned. -func (ri RawInstruction) Disassemble() Instruction { - switch ri.Op & opMaskCls { - case opClsLoadA, opClsLoadX: - reg := Register(ri.Op & opMaskLoadDest) - sz := 0 - switch ri.Op & opMaskLoadWidth { - case opLoadWidth4: - sz = 4 - case opLoadWidth2: - sz = 2 - case opLoadWidth1: - sz = 1 - default: - return ri - } - switch ri.Op & opMaskLoadMode { - case opAddrModeImmediate: - if sz != 4 { - return ri - } - return LoadConstant{Dst: reg, Val: ri.K} - case opAddrModeScratch: - if sz != 4 || ri.K > 15 { - return ri - } - return LoadScratch{Dst: reg, N: int(ri.K)} - case opAddrModeAbsolute: - if ri.K > extOffset+0xffffffff { - return LoadExtension{Num: Extension(-extOffset + ri.K)} - } - return LoadAbsolute{Size: sz, Off: ri.K} - case opAddrModeIndirect: - return LoadIndirect{Size: sz, Off: ri.K} - case opAddrModePacketLen: - if sz != 4 { - return ri - } - return LoadExtension{Num: ExtLen} - case opAddrModeMemShift: - return LoadMemShift{Off: ri.K} - default: - return ri - } - - case opClsStoreA: - if ri.Op != opClsStoreA || ri.K > 15 { - return ri - } - return StoreScratch{Src: RegA, N: int(ri.K)} - - case opClsStoreX: - if ri.Op != opClsStoreX || ri.K > 15 { - return ri - } - return StoreScratch{Src: RegX, N: int(ri.K)} - - case opClsALU: - switch op := ALUOp(ri.Op & opMaskOperator); op { - case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: - switch operand := opOperand(ri.Op & opMaskOperand); operand { - case opOperandX: - return ALUOpX{Op: op} - case opOperandConstant: - return ALUOpConstant{Op: op, Val: ri.K} - default: - return ri - } - case aluOpNeg: - return NegateA{} - default: - return ri - } - - case opClsJump: - switch op := jumpOp(ri.Op & opMaskOperator); op { - case opJumpAlways: - return Jump{Skip: ri.K} - case opJumpEqual, opJumpGT, opJumpGE, opJumpSet: - cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf) - switch operand := opOperand(ri.Op & opMaskOperand); operand { - case opOperandX: - return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse} - case opOperandConstant: - return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse} - default: - return ri - } - default: - return ri - } - - case opClsReturn: - switch ri.Op { - case opClsReturn | opRetSrcA: - return RetA{} - case opClsReturn | opRetSrcConstant: - return RetConstant{Val: ri.K} - default: - return ri - } - - case opClsMisc: - switch ri.Op { - case opClsMisc | opMiscTAX: - return TAX{} - case opClsMisc | opMiscTXA: - return TXA{} - default: - return ri - } - - default: - panic("unreachable") // switch is exhaustive on the bit pattern - } -} - -func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) { - var test JumpTest - - // Decode "fake" jump conditions that don't appear in machine code - // Ensures the Assemble -> Disassemble stage recreates the same instructions - // See https://github.com/golang/go/issues/18470 - if skipTrue == 0 { - switch op { - case opJumpEqual: - test = JumpNotEqual - case opJumpGT: - test = JumpLessOrEqual - case opJumpGE: - test = JumpLessThan - case opJumpSet: - test = JumpBitsNotSet - } - - return test, skipFalse, 0 - } - - switch op { - case opJumpEqual: - test = JumpEqual - case opJumpGT: - test = JumpGreaterThan - case opJumpGE: - test = JumpGreaterOrEqual - case opJumpSet: - test = JumpBitsSet - } - - return test, skipTrue, skipFalse -} - -// LoadConstant loads Val into register Dst. -type LoadConstant struct { - Dst Register - Val uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadConstant) Assemble() (RawInstruction, error) { - return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) -} - -// String returns the instruction in assembler notation. -func (a LoadConstant) String() string { - switch a.Dst { - case RegA: - return fmt.Sprintf("ld #%d", a.Val) - case RegX: - return fmt.Sprintf("ldx #%d", a.Val) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadScratch loads scratch[N] into register Dst. -type LoadScratch struct { - Dst Register - N int // 0-15 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadScratch) Assemble() (RawInstruction, error) { - if a.N < 0 || a.N > 15 { - return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) - } - return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) -} - -// String returns the instruction in assembler notation. -func (a LoadScratch) String() string { - switch a.Dst { - case RegA: - return fmt.Sprintf("ld M[%d]", a.N) - case RegX: - return fmt.Sprintf("ldx M[%d]", a.N) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadAbsolute loads packet[Off:Off+Size] as an integer value into -// register A. -type LoadAbsolute struct { - Off uint32 - Size int // 1, 2 or 4 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadAbsolute) Assemble() (RawInstruction, error) { - return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) -} - -// String returns the instruction in assembler notation. -func (a LoadAbsolute) String() string { - switch a.Size { - case 1: // byte - return fmt.Sprintf("ldb [%d]", a.Off) - case 2: // half word - return fmt.Sprintf("ldh [%d]", a.Off) - case 4: // word - if a.Off > extOffset+0xffffffff { - return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() - } - return fmt.Sprintf("ld [%d]", a.Off) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value -// into register A. -type LoadIndirect struct { - Off uint32 - Size int // 1, 2 or 4 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadIndirect) Assemble() (RawInstruction, error) { - return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) -} - -// String returns the instruction in assembler notation. -func (a LoadIndirect) String() string { - switch a.Size { - case 1: // byte - return fmt.Sprintf("ldb [x + %d]", a.Off) - case 2: // half word - return fmt.Sprintf("ldh [x + %d]", a.Off) - case 4: // word - return fmt.Sprintf("ld [x + %d]", a.Off) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] -// by 4 and stores the result in register X. -// -// This instruction is mainly useful to load into X the length of an -// IPv4 packet header in a single instruction, rather than have to do -// the arithmetic on the header's first byte by hand. -type LoadMemShift struct { - Off uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadMemShift) Assemble() (RawInstruction, error) { - return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) -} - -// String returns the instruction in assembler notation. -func (a LoadMemShift) String() string { - return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) -} - -// LoadExtension invokes a linux-specific extension and stores the -// result in register A. -type LoadExtension struct { - Num Extension -} - -// Assemble implements the Instruction Assemble method. -func (a LoadExtension) Assemble() (RawInstruction, error) { - if a.Num == ExtLen { - return assembleLoad(RegA, 4, opAddrModePacketLen, 0) - } - return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) -} - -// String returns the instruction in assembler notation. -func (a LoadExtension) String() string { - switch a.Num { - case ExtLen: - return "ld #len" - case ExtProto: - return "ld #proto" - case ExtType: - return "ld #type" - case ExtPayloadOffset: - return "ld #poff" - case ExtInterfaceIndex: - return "ld #ifidx" - case ExtNetlinkAttr: - return "ld #nla" - case ExtNetlinkAttrNested: - return "ld #nlan" - case ExtMark: - return "ld #mark" - case ExtQueue: - return "ld #queue" - case ExtLinkLayerType: - return "ld #hatype" - case ExtRXHash: - return "ld #rxhash" - case ExtCPUID: - return "ld #cpu" - case ExtVLANTag: - return "ld #vlan_tci" - case ExtVLANTagPresent: - return "ld #vlan_avail" - case ExtVLANProto: - return "ld #vlan_tpid" - case ExtRand: - return "ld #rand" - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// StoreScratch stores register Src into scratch[N]. -type StoreScratch struct { - Src Register - N int // 0-15 -} - -// Assemble implements the Instruction Assemble method. -func (a StoreScratch) Assemble() (RawInstruction, error) { - if a.N < 0 || a.N > 15 { - return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) - } - var op uint16 - switch a.Src { - case RegA: - op = opClsStoreA - case RegX: - op = opClsStoreX - default: - return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) - } - - return RawInstruction{ - Op: op, - K: uint32(a.N), - }, nil -} - -// String returns the instruction in assembler notation. -func (a StoreScratch) String() string { - switch a.Src { - case RegA: - return fmt.Sprintf("st M[%d]", a.N) - case RegX: - return fmt.Sprintf("stx M[%d]", a.N) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// ALUOpConstant executes A = A Val. -type ALUOpConstant struct { - Op ALUOp - Val uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a ALUOpConstant) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op), - K: a.Val, - }, nil -} - -// String returns the instruction in assembler notation. -func (a ALUOpConstant) String() string { - switch a.Op { - case ALUOpAdd: - return fmt.Sprintf("add #%d", a.Val) - case ALUOpSub: - return fmt.Sprintf("sub #%d", a.Val) - case ALUOpMul: - return fmt.Sprintf("mul #%d", a.Val) - case ALUOpDiv: - return fmt.Sprintf("div #%d", a.Val) - case ALUOpMod: - return fmt.Sprintf("mod #%d", a.Val) - case ALUOpAnd: - return fmt.Sprintf("and #%d", a.Val) - case ALUOpOr: - return fmt.Sprintf("or #%d", a.Val) - case ALUOpXor: - return fmt.Sprintf("xor #%d", a.Val) - case ALUOpShiftLeft: - return fmt.Sprintf("lsh #%d", a.Val) - case ALUOpShiftRight: - return fmt.Sprintf("rsh #%d", a.Val) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// ALUOpX executes A = A X -type ALUOpX struct { - Op ALUOp -} - -// Assemble implements the Instruction Assemble method. -func (a ALUOpX) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsALU | uint16(opOperandX) | uint16(a.Op), - }, nil -} - -// String returns the instruction in assembler notation. -func (a ALUOpX) String() string { - switch a.Op { - case ALUOpAdd: - return "add x" - case ALUOpSub: - return "sub x" - case ALUOpMul: - return "mul x" - case ALUOpDiv: - return "div x" - case ALUOpMod: - return "mod x" - case ALUOpAnd: - return "and x" - case ALUOpOr: - return "or x" - case ALUOpXor: - return "xor x" - case ALUOpShiftLeft: - return "lsh x" - case ALUOpShiftRight: - return "rsh x" - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// NegateA executes A = -A. -type NegateA struct{} - -// Assemble implements the Instruction Assemble method. -func (a NegateA) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsALU | uint16(aluOpNeg), - }, nil -} - -// String returns the instruction in assembler notation. -func (a NegateA) String() string { - return fmt.Sprintf("neg") -} - -// Jump skips the following Skip instructions in the program. -type Jump struct { - Skip uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a Jump) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsJump | uint16(opJumpAlways), - K: a.Skip, - }, nil -} - -// String returns the instruction in assembler notation. -func (a Jump) String() string { - return fmt.Sprintf("ja %d", a.Skip) -} - -// JumpIf skips the following Skip instructions in the program if A -// Val is true. -type JumpIf struct { - Cond JumpTest - Val uint32 - SkipTrue uint8 - SkipFalse uint8 -} - -// Assemble implements the Instruction Assemble method. -func (a JumpIf) Assemble() (RawInstruction, error) { - return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse) -} - -// String returns the instruction in assembler notation. -func (a JumpIf) String() string { - return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse) -} - -// JumpIfX skips the following Skip instructions in the program if A -// X is true. -type JumpIfX struct { - Cond JumpTest - SkipTrue uint8 - SkipFalse uint8 -} - -// Assemble implements the Instruction Assemble method. -func (a JumpIfX) Assemble() (RawInstruction, error) { - return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse) -} - -// String returns the instruction in assembler notation. -func (a JumpIfX) String() string { - return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse) -} - -// jumpToRaw assembles a jump instruction into a RawInstruction -func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) { - var ( - cond jumpOp - flip bool - ) - switch test { - case JumpEqual: - cond = opJumpEqual - case JumpNotEqual: - cond, flip = opJumpEqual, true - case JumpGreaterThan: - cond = opJumpGT - case JumpLessThan: - cond, flip = opJumpGE, true - case JumpGreaterOrEqual: - cond = opJumpGE - case JumpLessOrEqual: - cond, flip = opJumpGT, true - case JumpBitsSet: - cond = opJumpSet - case JumpBitsNotSet: - cond, flip = opJumpSet, true - default: - return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test) - } - jt, jf := skipTrue, skipFalse - if flip { - jt, jf = jf, jt - } - return RawInstruction{ - Op: opClsJump | uint16(cond) | uint16(operand), - Jt: jt, - Jf: jf, - K: k, - }, nil -} - -// jumpToString converts a jump instruction to assembler notation -func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string { - switch cond { - // K == A - case JumpEqual: - return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq") - // K != A - case JumpNotEqual: - return fmt.Sprintf("jneq %s,%d", operand, skipTrue) - // K > A - case JumpGreaterThan: - return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle") - // K < A - case JumpLessThan: - return fmt.Sprintf("jlt %s,%d", operand, skipTrue) - // K >= A - case JumpGreaterOrEqual: - return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt") - // K <= A - case JumpLessOrEqual: - return fmt.Sprintf("jle %s,%d", operand, skipTrue) - // K & A != 0 - case JumpBitsSet: - if skipFalse > 0 { - return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse) - } - return fmt.Sprintf("jset %s,%d", operand, skipTrue) - // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips - case JumpBitsNotSet: - return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue) - default: - return fmt.Sprintf("unknown JumpTest %#v", cond) - } -} - -func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string { - if skipTrue > 0 { - if skipFalse > 0 { - return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse) - } - return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue) - } - return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse) -} - -// RetA exits the BPF program, returning the value of register A. -type RetA struct{} - -// Assemble implements the Instruction Assemble method. -func (a RetA) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsReturn | opRetSrcA, - }, nil -} - -// String returns the instruction in assembler notation. -func (a RetA) String() string { - return fmt.Sprintf("ret a") -} - -// RetConstant exits the BPF program, returning a constant value. -type RetConstant struct { - Val uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a RetConstant) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsReturn | opRetSrcConstant, - K: a.Val, - }, nil -} - -// String returns the instruction in assembler notation. -func (a RetConstant) String() string { - return fmt.Sprintf("ret #%d", a.Val) -} - -// TXA copies the value of register X to register A. -type TXA struct{} - -// Assemble implements the Instruction Assemble method. -func (a TXA) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsMisc | opMiscTXA, - }, nil -} - -// String returns the instruction in assembler notation. -func (a TXA) String() string { - return fmt.Sprintf("txa") -} - -// TAX copies the value of register A to register X. -type TAX struct{} - -// Assemble implements the Instruction Assemble method. -func (a TAX) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsMisc | opMiscTAX, - }, nil -} - -// String returns the instruction in assembler notation. -func (a TAX) String() string { - return fmt.Sprintf("tax") -} - -func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { - var ( - cls uint16 - sz uint16 - ) - switch dst { - case RegA: - cls = opClsLoadA - case RegX: - cls = opClsLoadX - default: - return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) - } - switch loadSize { - case 1: - sz = opLoadWidth1 - case 2: - sz = opLoadWidth2 - case 4: - sz = opLoadWidth4 - default: - return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) - } - return RawInstruction{ - Op: cls | sz | mode, - K: k, - }, nil -} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go deleted file mode 100644 index 43e35f0a..00000000 --- a/vendor/golang.org/x/net/bpf/setter.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -// A Setter is a type which can attach a compiled BPF filter to itself. -type Setter interface { - SetBPF(filter []RawInstruction) error -} diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go deleted file mode 100644 index 73f57f1f..00000000 --- a/vendor/golang.org/x/net/bpf/vm.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import ( - "errors" - "fmt" -) - -// A VM is an emulated BPF virtual machine. -type VM struct { - filter []Instruction -} - -// NewVM returns a new VM using the input BPF program. -func NewVM(filter []Instruction) (*VM, error) { - if len(filter) == 0 { - return nil, errors.New("one or more Instructions must be specified") - } - - for i, ins := range filter { - check := len(filter) - (i + 1) - switch ins := ins.(type) { - // Check for out-of-bounds jumps in instructions - case Jump: - if check <= int(ins.Skip) { - return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) - } - case JumpIf: - if check <= int(ins.SkipTrue) { - return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) - } - if check <= int(ins.SkipFalse) { - return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) - } - case JumpIfX: - if check <= int(ins.SkipTrue) { - return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) - } - if check <= int(ins.SkipFalse) { - return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) - } - // Check for division or modulus by zero - case ALUOpConstant: - if ins.Val != 0 { - break - } - - switch ins.Op { - case ALUOpDiv, ALUOpMod: - return nil, errors.New("cannot divide by zero using ALUOpConstant") - } - // Check for unknown extensions - case LoadExtension: - switch ins.Num { - case ExtLen: - default: - return nil, fmt.Errorf("extension %d not implemented", ins.Num) - } - } - } - - // Make sure last instruction is a return instruction - switch filter[len(filter)-1].(type) { - case RetA, RetConstant: - default: - return nil, errors.New("BPF program must end with RetA or RetConstant") - } - - // Though our VM works using disassembled instructions, we - // attempt to assemble the input filter anyway to ensure it is compatible - // with an operating system VM. - _, err := Assemble(filter) - - return &VM{ - filter: filter, - }, err -} - -// Run runs the VM's BPF program against the input bytes. -// Run returns the number of bytes accepted by the BPF program, and any errors -// which occurred while processing the program. -func (v *VM) Run(in []byte) (int, error) { - var ( - // Registers of the virtual machine - regA uint32 - regX uint32 - regScratch [16]uint32 - - // OK is true if the program should continue processing the next - // instruction, or false if not, causing the loop to break - ok = true - ) - - // TODO(mdlayher): implement: - // - NegateA: - // - would require a change from uint32 registers to int32 - // registers - - // TODO(mdlayher): add interop tests that check signedness of ALU - // operations against kernel implementation, and make sure Go - // implementation matches behavior - - for i := 0; i < len(v.filter) && ok; i++ { - ins := v.filter[i] - - switch ins := ins.(type) { - case ALUOpConstant: - regA = aluOpConstant(ins, regA) - case ALUOpX: - regA, ok = aluOpX(ins, regA, regX) - case Jump: - i += int(ins.Skip) - case JumpIf: - jump := jumpIf(ins, regA) - i += jump - case JumpIfX: - jump := jumpIfX(ins, regA, regX) - i += jump - case LoadAbsolute: - regA, ok = loadAbsolute(ins, in) - case LoadConstant: - regA, regX = loadConstant(ins, regA, regX) - case LoadExtension: - regA = loadExtension(ins, in) - case LoadIndirect: - regA, ok = loadIndirect(ins, in, regX) - case LoadMemShift: - regX, ok = loadMemShift(ins, in) - case LoadScratch: - regA, regX = loadScratch(ins, regScratch, regA, regX) - case RetA: - return int(regA), nil - case RetConstant: - return int(ins.Val), nil - case StoreScratch: - regScratch = storeScratch(ins, regScratch, regA, regX) - case TAX: - regX = regA - case TXA: - regA = regX - default: - return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) - } - } - - return 0, nil -} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go deleted file mode 100644 index cf8947c3..00000000 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import ( - "encoding/binary" - "fmt" -) - -func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { - return aluOpCommon(ins.Op, regA, ins.Val) -} - -func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { - // Guard against division or modulus by zero by terminating - // the program, as the OS BPF VM does - if regX == 0 { - switch ins.Op { - case ALUOpDiv, ALUOpMod: - return 0, false - } - } - - return aluOpCommon(ins.Op, regA, regX), true -} - -func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { - switch op { - case ALUOpAdd: - return regA + value - case ALUOpSub: - return regA - value - case ALUOpMul: - return regA * value - case ALUOpDiv: - // Division by zero not permitted by NewVM and aluOpX checks - return regA / value - case ALUOpOr: - return regA | value - case ALUOpAnd: - return regA & value - case ALUOpShiftLeft: - return regA << value - case ALUOpShiftRight: - return regA >> value - case ALUOpMod: - // Modulus by zero not permitted by NewVM and aluOpX checks - return regA % value - case ALUOpXor: - return regA ^ value - default: - return regA - } -} - -func jumpIf(ins JumpIf, regA uint32) int { - return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val) -} - -func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int { - return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX) -} - -func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int { - var ok bool - - switch cond { - case JumpEqual: - ok = regA == value - case JumpNotEqual: - ok = regA != value - case JumpGreaterThan: - ok = regA > value - case JumpLessThan: - ok = regA < value - case JumpGreaterOrEqual: - ok = regA >= value - case JumpLessOrEqual: - ok = regA <= value - case JumpBitsSet: - ok = (regA & value) != 0 - case JumpBitsNotSet: - ok = (regA & value) == 0 - } - - if ok { - return int(skipTrue) - } - - return int(skipFalse) -} - -func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { - offset := int(ins.Off) - size := int(ins.Size) - - return loadCommon(in, offset, size) -} - -func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { - switch ins.Dst { - case RegA: - regA = ins.Val - case RegX: - regX = ins.Val - } - - return regA, regX -} - -func loadExtension(ins LoadExtension, in []byte) uint32 { - switch ins.Num { - case ExtLen: - return uint32(len(in)) - default: - panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) - } -} - -func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { - offset := int(ins.Off) + int(regX) - size := int(ins.Size) - - return loadCommon(in, offset, size) -} - -func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { - offset := int(ins.Off) - - // Size of LoadMemShift is always 1 byte - if !inBounds(len(in), offset, 1) { - return 0, false - } - - // Mask off high 4 bits and multiply low 4 bits by 4 - return uint32(in[offset]&0x0f) * 4, true -} - -func inBounds(inLen int, offset int, size int) bool { - return offset+size <= inLen -} - -func loadCommon(in []byte, offset int, size int) (uint32, bool) { - if !inBounds(len(in), offset, size) { - return 0, false - } - - switch size { - case 1: - return uint32(in[offset]), true - case 2: - return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true - case 4: - return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true - default: - panic(fmt.Sprintf("invalid load size: %d", size)) - } -} - -func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { - switch ins.Dst { - case RegA: - regA = regScratch[ins.N] - case RegX: - regX = regScratch[ins.N] - } - - return regA, regX -} - -func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { - switch ins.Src { - case RegA: - regScratch[ins.N] = regA - case RegX: - regScratch[ins.N] = regX - } - - return regScratch -} diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go deleted file mode 100644 index cd0a8ac1..00000000 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atom provides integer codes (also known as atoms) for a fixed set of -// frequently occurring HTML strings: tag names and attribute keys such as "p" -// and "id". -// -// Sharing an atom's name between all elements with the same tag can result in -// fewer string allocations when tokenizing and parsing HTML. Integer -// comparisons are also generally faster than string comparisons. -// -// The value of an atom's particular code is not guaranteed to stay the same -// between versions of this package. Neither is any ordering guaranteed: -// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to -// be dense. The only guarantees are that e.g. looking up "div" will yield -// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom // import "golang.org/x/net/html/atom" - -// Atom is an integer code for a string. The zero value maps to "". -type Atom uint32 - -// String returns the atom's name. -func (a Atom) String() string { - start := uint32(a >> 8) - n := uint32(a & 0xff) - if start+n > uint32(len(atomText)) { - return "" - } - return atomText[start : start+n] -} - -func (a Atom) string() string { - return atomText[a>>8 : a>>8+a&0xff] -} - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s []byte) uint32 { - for i := range s { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -func match(s string, t []byte) bool { - for i, c := range t { - if s[i] != c { - return false - } - } - return true -} - -// Lookup returns the atom whose name is s. It returns zero if there is no -// such atom. The lookup is case sensitive. -func Lookup(s []byte) Atom { - if len(s) == 0 || len(s) > maxAtomLen { - return 0 - } - h := fnv(hash0, s) - if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - return 0 -} - -// String returns a string whose contents are equal to s. In that sense, it is -// equivalent to string(s) but may be more efficient. -func String(s []byte) string { - if a := Lookup(s); a != 0 { - return a.String() - } - return string(s) -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go deleted file mode 100644 index 2a938864..00000000 --- a/vendor/golang.org/x/net/html/atom/table.go +++ /dev/null @@ -1,783 +0,0 @@ -// Code generated by go generate gen.go; DO NOT EDIT. - -//go:generate go run gen.go - -package atom - -const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x1a06 - AcceptCharset Atom = 0x1a0e - Accesskey Atom = 0x2c09 - Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 - Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f - Allowpaymentrequest Atom = 0xc113 - Allowusermedia Atom = 0xdd0e - Alt Atom = 0xf303 - Annotation Atom = 0x1c90a - AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 - As Atom = 0x3c02 - Aside Atom = 0x10705 - Async Atom = 0xff05 - Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c - Autofocus Atom = 0x12109 - Autoplay Atom = 0x13c08 - B Atom = 0x101 - Base Atom = 0x3b04 - Basefont Atom = 0x3b08 - Bdi Atom = 0xba03 - Bdo Atom = 0x14b03 - Bgsound Atom = 0x15e07 - Big Atom = 0x17003 - Blink Atom = 0x17305 - Blockquote Atom = 0x1870a - Body Atom = 0x2804 - Br Atom = 0x202 - Button Atom = 0x19106 - Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 - Charset Atom = 0x2107 - Checked Atom = 0x47907 - Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 - Col Atom = 0x1ab03 - Colgroup Atom = 0x1ab08 - Color Atom = 0x1bf05 - Cols Atom = 0x1c404 - Colspan Atom = 0x1c407 - Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b - Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 - Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 - Details Atom = 0x7207 - Dfn Atom = 0x8703 - Dialog Atom = 0xbb06 - Dir Atom = 0x9303 - Dirname Atom = 0x9307 - Disabled Atom = 0x16408 - Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 - Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 - Em Atom = 0x6e02 - Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 - Font Atom = 0x3f04 - Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a - Frame Atom = 0x8b05 - Frameset Atom = 0x8b08 - H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 - Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 - Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 - Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a - I Atom = 0x601 - Icon Atom = 0x58a04 - Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 - Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 - Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 - Kbd Atom = 0xb903 - Keygen Atom = 0x3206 - Keytype Atom = 0xd607 - Kind Atom = 0x17704 - Label Atom = 0x5905 - Lang Atom = 0x2e404 - Legend Atom = 0x18106 - Li Atom = 0xb202 - Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 - Loop Atom = 0x5d04 - Low Atom = 0xc303 - Main Atom = 0x1004 - Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 - Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 - Media Atom = 0xe605 - Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 - Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 - Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 - Name Atom = 0x9604 - Nav Atom = 0x1303 - Nobr Atom = 0x3704 - Noembed Atom = 0x6c07 - Noframes Atom = 0x8908 - Nomodule Atom = 0xa208 - Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 - Ol Atom = 0x13702 - Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 - Oncancel Atom = 0x11908 - Oncanplay Atom = 0x14d09 - Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 - Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 - Open Atom = 0x1a304 - Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x6607 - Picture Atom = 0x7b07 - Ping Atom = 0xef04 - Placeholder Atom = 0x1310b - Plaintext Atom = 0x1b209 - Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 - Q Atom = 0xcf01 - Radiogroup Atom = 0x30a - Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 - Reversed Atom = 0x8008 - Rows Atom = 0x9c04 - Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 - Rt Atom = 0x19a02 - Rtc Atom = 0x19a03 - Ruby Atom = 0xfb04 - S Atom = 0x2501 - Samp Atom = 0x7804 - Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 - Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 - Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 - Tbody Atom = 0x2705 - Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 - Tfoot Atom = 0xf505 - Th Atom = 0x15602 - Thead Atom = 0x33005 - Time Atom = 0x4204 - Title Atom = 0x11005 - Tr Atom = 0xcc02 - Track Atom = 0x1ba05 - Translate Atom = 0x1f209 - Tt Atom = 0x6802 - Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d - U Atom = 0xb01 - Ul Atom = 0xa702 - Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 - Value Atom = 0x1505 - Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 - Xmp Atom = 0x12f03 -) - -const hash0 = 0x81cdf10e - -const maxAtomLen = 25 - -var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command -} - -const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + - "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + - "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + - "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + - "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + - "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + - "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + - "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go deleted file mode 100644 index ff7acf2d..00000000 --- a/vendor/golang.org/x/net/html/const.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// Section 12.2.4.2 of the HTML5 specification says "The following elements -// have varying levels of special parsing rules". -// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements -var isSpecialElementMap = map[string]bool{ - "address": true, - "applet": true, - "area": true, - "article": true, - "aside": true, - "base": true, - "basefont": true, - "bgsound": true, - "blockquote": true, - "body": true, - "br": true, - "button": true, - "caption": true, - "center": true, - "col": true, - "colgroup": true, - "dd": true, - "details": true, - "dir": true, - "div": true, - "dl": true, - "dt": true, - "embed": true, - "fieldset": true, - "figcaption": true, - "figure": true, - "footer": true, - "form": true, - "frame": true, - "frameset": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "header": true, - "hgroup": true, - "hr": true, - "html": true, - "iframe": true, - "img": true, - "input": true, - "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. - "li": true, - "link": true, - "listing": true, - "main": true, - "marquee": true, - "menu": true, - "meta": true, - "nav": true, - "noembed": true, - "noframes": true, - "noscript": true, - "object": true, - "ol": true, - "p": true, - "param": true, - "plaintext": true, - "pre": true, - "script": true, - "section": true, - "select": true, - "source": true, - "style": true, - "summary": true, - "table": true, - "tbody": true, - "td": true, - "template": true, - "textarea": true, - "tfoot": true, - "th": true, - "thead": true, - "title": true, - "tr": true, - "track": true, - "ul": true, - "wbr": true, - "xmp": true, -} - -func isSpecialElement(element *Node) bool { - switch element.Namespace { - case "", "html": - return isSpecialElementMap[element.Data] - case "math": - switch element.Data { - case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": - return true - } - case "svg": - switch element.Data { - case "foreignObject", "desc", "title": - return true - } - } - return false -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go deleted file mode 100644 index 822ed42a..00000000 --- a/vendor/golang.org/x/net/html/doc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package html implements an HTML5-compliant tokenizer and parser. - -Tokenization is done by creating a Tokenizer for an io.Reader r. It is the -caller's responsibility to ensure that r provides UTF-8 encoded HTML. - - z := html.NewTokenizer(r) - -Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), -which parses the next token and returns its type, or an error: - - for { - tt := z.Next() - if tt == html.ErrorToken { - // ... - return ... - } - // Process the current token. - } - -There are two APIs for retrieving the current token. The high-level API is to -call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs -allow optionally calling Raw after Next but before Token, Text, TagName, or -TagAttr. In EBNF notation, the valid call sequence per token is: - - Next {Raw} [ Token | Text | TagName {TagAttr} ] - -Token returns an independent data structure that completely describes a token. -Entities (such as "<") are unescaped, tag names and attribute keys are -lower-cased, and attributes are collected into a []Attribute. For example: - - for { - if z.Next() == html.ErrorToken { - // Returning io.EOF indicates success. - return z.Err() - } - emitToken(z.Token()) - } - -The low-level API performs fewer allocations and copies, but the contents of -the []byte values returned by Text, TagName and TagAttr may change on the next -call to Next. For example, to extract an HTML page's anchor text: - - depth := 0 - for { - tt := z.Next() - switch tt { - case html.ErrorToken: - return z.Err() - case html.TextToken: - if depth > 0 { - // emitBytes should copy the []byte it receives, - // if it doesn't process it immediately. - emitBytes(z.Text()) - } - case html.StartTagToken, html.EndTagToken: - tn, _ := z.TagName() - if len(tn) == 1 && tn[0] == 'a' { - if tt == html.StartTagToken { - depth++ - } else { - depth-- - } - } - } - } - -Parsing is done by calling Parse with an io.Reader, which returns the root of -the parse tree (the document element) as a *Node. It is the caller's -responsibility to ensure that the Reader provides UTF-8 encoded HTML. For -example, to process each anchor node in depth-first order: - - doc, err := html.Parse(r) - if err != nil { - // ... - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - // Do something with n... - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - -The relevant specifications include: -https://html.spec.whatwg.org/multipage/syntax.html and -https://html.spec.whatwg.org/multipage/syntax.html#tokenization -*/ -package html // import "golang.org/x/net/html" - -// The tokenization algorithm implemented by this package is not a line-by-line -// transliteration of the relatively verbose state-machine in the WHATWG -// specification. A more direct approach is used instead, where the program -// counter implies the state, such as whether it is tokenizing a tag or a text -// node. Specification compliance is verified by checking expected and actual -// outputs over a test suite rather than aiming for algorithmic fidelity. - -// TODO(nigeltao): Does a DOM API belong in this package or a separate one? -// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go deleted file mode 100644 index c484e5a9..00000000 --- a/vendor/golang.org/x/net/html/doctype.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -// parseDoctype parses the data from a DoctypeToken into a name, -// public identifier, and system identifier. It returns a Node whose Type -// is DoctypeNode, whose Data is the name, and which has attributes -// named "system" and "public" for the two identifiers if they were present. -// quirks is whether the document should be parsed in "quirks mode". -func parseDoctype(s string) (n *Node, quirks bool) { - n = &Node{Type: DoctypeNode} - - // Find the name. - space := strings.IndexAny(s, whitespace) - if space == -1 { - space = len(s) - } - n.Data = s[:space] - // The comparison to "html" is case-sensitive. - if n.Data != "html" { - quirks = true - } - n.Data = strings.ToLower(n.Data) - s = strings.TrimLeft(s[space:], whitespace) - - if len(s) < 6 { - // It can't start with "PUBLIC" or "SYSTEM". - // Ignore the rest of the string. - return n, quirks || s != "" - } - - key := strings.ToLower(s[:6]) - s = s[6:] - for key == "public" || key == "system" { - s = strings.TrimLeft(s, whitespace) - if s == "" { - break - } - quote := s[0] - if quote != '"' && quote != '\'' { - break - } - s = s[1:] - q := strings.IndexRune(s, rune(quote)) - var id string - if q == -1 { - id = s - s = "" - } else { - id = s[:q] - s = s[q+1:] - } - n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) - if key == "public" { - key = "system" - } else { - key = "" - } - } - - if key != "" || s != "" { - quirks = true - } else if len(n.Attr) > 0 { - if n.Attr[0].Key == "public" { - public := strings.ToLower(n.Attr[0].Val) - switch public { - case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": - quirks = true - default: - for _, q := range quirkyIDs { - if strings.HasPrefix(public, q) { - quirks = true - break - } - } - } - // The following two public IDs only cause quirks mode if there is no system ID. - if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || - strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { - quirks = true - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { - quirks = true - } - } - - return n, quirks -} - -// quirkyIDs is a list of public doctype identifiers that cause a document -// to be interpreted in quirks mode. The identifiers should be in lower case. -var quirkyIDs = []string{ - "+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//", -} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go deleted file mode 100644 index b628880a..00000000 --- a/vendor/golang.org/x/net/html/entity.go +++ /dev/null @@ -1,2253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// All entities that do not end with ';' are 6 or fewer bytes long. -const longestEntityWithoutSemicolon = 6 - -// entity is a map from HTML entity names to their values. The semicolon matters: -// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references -// lists both "amp" and "amp;" as two separate entries. -// -// Note that the HTML5 list is larger than the HTML4 list at -// http://www.w3.org/TR/html4/sgml/entities.html -var entity = map[string]rune{ - "AElig;": '\U000000C6', - "AMP;": '\U00000026', - "Aacute;": '\U000000C1', - "Abreve;": '\U00000102', - "Acirc;": '\U000000C2', - "Acy;": '\U00000410', - "Afr;": '\U0001D504', - "Agrave;": '\U000000C0', - "Alpha;": '\U00000391', - "Amacr;": '\U00000100', - "And;": '\U00002A53', - "Aogon;": '\U00000104', - "Aopf;": '\U0001D538', - "ApplyFunction;": '\U00002061', - "Aring;": '\U000000C5', - "Ascr;": '\U0001D49C', - "Assign;": '\U00002254', - "Atilde;": '\U000000C3', - "Auml;": '\U000000C4', - "Backslash;": '\U00002216', - "Barv;": '\U00002AE7', - "Barwed;": '\U00002306', - "Bcy;": '\U00000411', - "Because;": '\U00002235', - "Bernoullis;": '\U0000212C', - "Beta;": '\U00000392', - "Bfr;": '\U0001D505', - "Bopf;": '\U0001D539', - "Breve;": '\U000002D8', - "Bscr;": '\U0000212C', - "Bumpeq;": '\U0000224E', - "CHcy;": '\U00000427', - "COPY;": '\U000000A9', - "Cacute;": '\U00000106', - "Cap;": '\U000022D2', - "CapitalDifferentialD;": '\U00002145', - "Cayleys;": '\U0000212D', - "Ccaron;": '\U0000010C', - "Ccedil;": '\U000000C7', - "Ccirc;": '\U00000108', - "Cconint;": '\U00002230', - "Cdot;": '\U0000010A', - "Cedilla;": '\U000000B8', - "CenterDot;": '\U000000B7', - "Cfr;": '\U0000212D', - "Chi;": '\U000003A7', - "CircleDot;": '\U00002299', - "CircleMinus;": '\U00002296', - "CirclePlus;": '\U00002295', - "CircleTimes;": '\U00002297', - "ClockwiseContourIntegral;": '\U00002232', - "CloseCurlyDoubleQuote;": '\U0000201D', - "CloseCurlyQuote;": '\U00002019', - "Colon;": '\U00002237', - "Colone;": '\U00002A74', - "Congruent;": '\U00002261', - "Conint;": '\U0000222F', - "ContourIntegral;": '\U0000222E', - "Copf;": '\U00002102', - "Coproduct;": '\U00002210', - "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', -} - -// HTML entities that are two unicode codepoints. -var entity2 = map[string][2]rune{ - // TODO(nigeltao): Handle replacements that are wider than their names. - // "nLt;": {'\u226A', '\u20D2'}, - // "nGt;": {'\u226B', '\u20D2'}, - "NotEqualTilde;": {'\u2242', '\u0338'}, - "NotGreaterFullEqual;": {'\u2267', '\u0338'}, - "NotGreaterGreater;": {'\u226B', '\u0338'}, - "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, - "NotHumpDownHump;": {'\u224E', '\u0338'}, - "NotHumpEqual;": {'\u224F', '\u0338'}, - "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, - "NotLessLess;": {'\u226A', '\u0338'}, - "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, - "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, - "NotNestedLessLess;": {'\u2AA1', '\u0338'}, - "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, - "NotRightTriangleBar;": {'\u29D0', '\u0338'}, - "NotSquareSubset;": {'\u228F', '\u0338'}, - "NotSquareSuperset;": {'\u2290', '\u0338'}, - "NotSubset;": {'\u2282', '\u20D2'}, - "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, - "NotSucceedsTilde;": {'\u227F', '\u0338'}, - "NotSuperset;": {'\u2283', '\u20D2'}, - "ThickSpace;": {'\u205F', '\u200A'}, - "acE;": {'\u223E', '\u0333'}, - "bne;": {'\u003D', '\u20E5'}, - "bnequiv;": {'\u2261', '\u20E5'}, - "caps;": {'\u2229', '\uFE00'}, - "cups;": {'\u222A', '\uFE00'}, - "fjlig;": {'\u0066', '\u006A'}, - "gesl;": {'\u22DB', '\uFE00'}, - "gvertneqq;": {'\u2269', '\uFE00'}, - "gvnE;": {'\u2269', '\uFE00'}, - "lates;": {'\u2AAD', '\uFE00'}, - "lesg;": {'\u22DA', '\uFE00'}, - "lvertneqq;": {'\u2268', '\uFE00'}, - "lvnE;": {'\u2268', '\uFE00'}, - "nGg;": {'\u22D9', '\u0338'}, - "nGtv;": {'\u226B', '\u0338'}, - "nLl;": {'\u22D8', '\u0338'}, - "nLtv;": {'\u226A', '\u0338'}, - "nang;": {'\u2220', '\u20D2'}, - "napE;": {'\u2A70', '\u0338'}, - "napid;": {'\u224B', '\u0338'}, - "nbump;": {'\u224E', '\u0338'}, - "nbumpe;": {'\u224F', '\u0338'}, - "ncongdot;": {'\u2A6D', '\u0338'}, - "nedot;": {'\u2250', '\u0338'}, - "nesim;": {'\u2242', '\u0338'}, - "ngE;": {'\u2267', '\u0338'}, - "ngeqq;": {'\u2267', '\u0338'}, - "ngeqslant;": {'\u2A7E', '\u0338'}, - "nges;": {'\u2A7E', '\u0338'}, - "nlE;": {'\u2266', '\u0338'}, - "nleqq;": {'\u2266', '\u0338'}, - "nleqslant;": {'\u2A7D', '\u0338'}, - "nles;": {'\u2A7D', '\u0338'}, - "notinE;": {'\u22F9', '\u0338'}, - "notindot;": {'\u22F5', '\u0338'}, - "nparsl;": {'\u2AFD', '\u20E5'}, - "npart;": {'\u2202', '\u0338'}, - "npre;": {'\u2AAF', '\u0338'}, - "npreceq;": {'\u2AAF', '\u0338'}, - "nrarrc;": {'\u2933', '\u0338'}, - "nrarrw;": {'\u219D', '\u0338'}, - "nsce;": {'\u2AB0', '\u0338'}, - "nsubE;": {'\u2AC5', '\u0338'}, - "nsubset;": {'\u2282', '\u20D2'}, - "nsubseteqq;": {'\u2AC5', '\u0338'}, - "nsucceq;": {'\u2AB0', '\u0338'}, - "nsupE;": {'\u2AC6', '\u0338'}, - "nsupset;": {'\u2283', '\u20D2'}, - "nsupseteqq;": {'\u2AC6', '\u0338'}, - "nvap;": {'\u224D', '\u20D2'}, - "nvge;": {'\u2265', '\u20D2'}, - "nvgt;": {'\u003E', '\u20D2'}, - "nvle;": {'\u2264', '\u20D2'}, - "nvlt;": {'\u003C', '\u20D2'}, - "nvltrie;": {'\u22B4', '\u20D2'}, - "nvrtrie;": {'\u22B5', '\u20D2'}, - "nvsim;": {'\u223C', '\u20D2'}, - "race;": {'\u223D', '\u0331'}, - "smtes;": {'\u2AAC', '\uFE00'}, - "sqcaps;": {'\u2293', '\uFE00'}, - "sqcups;": {'\u2294', '\uFE00'}, - "varsubsetneq;": {'\u228A', '\uFE00'}, - "varsubsetneqq;": {'\u2ACB', '\uFE00'}, - "varsupsetneq;": {'\u228B', '\uFE00'}, - "varsupsetneqq;": {'\u2ACC', '\uFE00'}, - "vnsub;": {'\u2282', '\u20D2'}, - "vnsup;": {'\u2283', '\u20D2'}, - "vsubnE;": {'\u2ACB', '\uFE00'}, - "vsubne;": {'\u228A', '\uFE00'}, - "vsupnE;": {'\u2ACC', '\uFE00'}, - "vsupne;": {'\u228B', '\uFE00'}, -} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go deleted file mode 100644 index d8561396..00000000 --- a/vendor/golang.org/x/net/html/escape.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// These replacements permit compatibility with old numeric entities that -// assumed Windows-1252 encoding. -// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference -var replacementTable = [...]rune{ - '\u20AC', // First entry is what 0x80 should be replaced with. - '\u0081', - '\u201A', - '\u0192', - '\u201E', - '\u2026', - '\u2020', - '\u2021', - '\u02C6', - '\u2030', - '\u0160', - '\u2039', - '\u0152', - '\u008D', - '\u017D', - '\u008F', - '\u0090', - '\u2018', - '\u2019', - '\u201C', - '\u201D', - '\u2022', - '\u2013', - '\u2014', - '\u02DC', - '\u2122', - '\u0161', - '\u203A', - '\u0153', - '\u009D', - '\u017E', - '\u0178', // Last entry is 0x9F. - // 0x00->'\uFFFD' is handled programmatically. - // 0x0D->'\u000D' is a no-op. -} - -// unescapeEntity reads an entity like "<" from b[src:] and writes the -// corresponding "<" to b[dst:], returning the incremented dst and src cursors. -// Precondition: b[src] == '&' && dst <= src. -// attribute should be true if parsing an attribute value. -func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { - // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference - - // i starts at 1 because we already know that s[0] == '&'. - i, s := 1, b[src:] - - if len(s) <= 1 { - b[dst] = b[src] - return dst + 1, src + 1 - } - - if s[i] == '#' { - if len(s) <= 3 { // We need to have at least "&#.". - b[dst] = b[src] - return dst + 1, src + 1 - } - i++ - c := s[i] - hex := false - if c == 'x' || c == 'X' { - hex = true - i++ - } - - x := '\x00' - for i < len(s) { - c = s[i] - i++ - if hex { - if '0' <= c && c <= '9' { - x = 16*x + rune(c) - '0' - continue - } else if 'a' <= c && c <= 'f' { - x = 16*x + rune(c) - 'a' + 10 - continue - } else if 'A' <= c && c <= 'F' { - x = 16*x + rune(c) - 'A' + 10 - continue - } - } else if '0' <= c && c <= '9' { - x = 10*x + rune(c) - '0' - continue - } - if c != ';' { - i-- - } - break - } - - if i <= 3 { // No characters matched. - b[dst] = b[src] - return dst + 1, src + 1 - } - - if 0x80 <= x && x <= 0x9F { - // Replace characters from Windows-1252 with UTF-8 equivalents. - x = replacementTable[x-0x80] - } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { - // Replace invalid characters with the replacement character. - x = '\uFFFD' - } - - return dst + utf8.EncodeRune(b[dst:], x), src + i - } - - // Consume the maximum number of characters possible, with the - // consumed characters matching one of the named references. - - for i < len(s) { - c := s[i] - i++ - // Lower-cased characters are more common in entities, so we check for them first. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - continue - } - if c != ';' { - i-- - } - break - } - - entityName := string(s[1:i]) - if entityName == "" { - // No-op. - } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { - // No-op. - } else if x := entity[entityName]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + i - } else if x := entity2[entityName]; x[0] != 0 { - dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) - return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i - } else if !attribute { - maxLen := len(entityName) - 1 - if maxLen > longestEntityWithoutSemicolon { - maxLen = longestEntityWithoutSemicolon - } - for j := maxLen; j > 1; j-- { - if x := entity[entityName[:j]]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 - } - } - } - - dst1, src1 = dst+i, src+i - copy(b[dst:dst1], b[src:src1]) - return dst1, src1 -} - -// unescape unescapes b's entities in-place, so that "a<b" becomes "a': - esc = ">" - case '"': - // """ is shorter than """. - esc = """ - case '\r': - esc = " " - default: - panic("unrecognized escape character") - } - s = s[i+1:] - if _, err := w.WriteString(esc); err != nil { - return err - } - i = strings.IndexAny(s, escapedChars) - } - _, err := w.WriteString(s) - return err -} - -// EscapeString escapes special characters like "<" to become "<". It -// escapes only five such characters: <, >, &, ' and ". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func EscapeString(s string) string { - if strings.IndexAny(s, escapedChars) == -1 { - return s - } - var buf bytes.Buffer - escape(&buf, s) - return buf.String() -} - -// UnescapeString unescapes entities like "<" to become "<". It unescapes a -// larger range of entities than EscapeString escapes. For example, "á" -// unescapes to "á", as does "á" and "&xE1;". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func UnescapeString(s string) string { - for _, c := range s { - if c == '&' { - return string(unescape([]byte(s), false)) - } - } - return s -} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go deleted file mode 100644 index 9da9e9dc..00000000 --- a/vendor/golang.org/x/net/html/foreign.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { - for i := range aa { - if newName, ok := nameMap[aa[i].Key]; ok { - aa[i].Key = newName - } - } -} - -func adjustForeignAttributes(aa []Attribute) { - for i, a := range aa { - if a.Key == "" || a.Key[0] != 'x' { - continue - } - switch a.Key { - case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", - "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": - j := strings.Index(a.Key, ":") - aa[i].Namespace = a.Key[:j] - aa[i].Key = a.Key[j+1:] - } - } -} - -func htmlIntegrationPoint(n *Node) bool { - if n.Type != ElementNode { - return false - } - switch n.Namespace { - case "math": - if n.Data == "annotation-xml" { - for _, a := range n.Attr { - if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { - return true - } - } - } - } - case "svg": - switch n.Data { - case "desc", "foreignObject", "title": - return true - } - } - return false -} - -func mathMLTextIntegrationPoint(n *Node) bool { - if n.Namespace != "math" { - return false - } - switch n.Data { - case "mi", "mo", "mn", "ms", "mtext": - return true - } - return false -} - -// Section 12.2.6.5. -var breakout = map[string]bool{ - "b": true, - "big": true, - "blockquote": true, - "body": true, - "br": true, - "center": true, - "code": true, - "dd": true, - "div": true, - "dl": true, - "dt": true, - "em": true, - "embed": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "hr": true, - "i": true, - "img": true, - "li": true, - "listing": true, - "menu": true, - "meta": true, - "nobr": true, - "ol": true, - "p": true, - "pre": true, - "ruby": true, - "s": true, - "small": true, - "span": true, - "strong": true, - "strike": true, - "sub": true, - "sup": true, - "table": true, - "tt": true, - "u": true, - "ul": true, - "var": true, -} - -// Section 12.2.6.5. -var svgTagNameAdjustments = map[string]string{ - "altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath", -} - -// Section 12.2.6.1 -var mathMLAttributeAdjustments = map[string]string{ - "definitionurl": "definitionURL", -} - -var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", -} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go deleted file mode 100644 index 1350eef2..00000000 --- a/vendor/golang.org/x/net/html/node.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "golang.org/x/net/html/atom" -) - -// A NodeType is the type of a Node. -type NodeType uint32 - -const ( - ErrorNode NodeType = iota - TextNode - DocumentNode - ElementNode - CommentNode - DoctypeNode - // RawNode nodes are not returned by the parser, but can be part of the - // Node tree passed to func Render to insert raw HTML (without escaping). - // If so, this package makes no guarantee that the rendered HTML is secure - // (from e.g. Cross Site Scripting attacks) or well-formed. - RawNode - scopeMarkerNode -) - -// Section 12.2.4.3 says "The markers are inserted when entering applet, -// object, marquee, template, td, th, and caption elements, and are used -// to prevent formatting from "leaking" into applet, object, marquee, -// template, td, th, and caption elements". -var scopeMarker = Node{Type: scopeMarkerNode} - -// A Node consists of a NodeType and some Data (tag name for element nodes, -// content for text) and are part of a tree of Nodes. Element nodes may also -// have a Namespace and contain a slice of Attributes. Data is unescaped, so -// that it looks like "a 0 { - return (*s)[i-1] - } - return nil -} - -// index returns the index of the top-most occurrence of n in the stack, or -1 -// if n is not present. -func (s *nodeStack) index(n *Node) int { - for i := len(*s) - 1; i >= 0; i-- { - if (*s)[i] == n { - return i - } - } - return -1 -} - -// contains returns whether a is within s. -func (s *nodeStack) contains(a atom.Atom) bool { - for _, n := range *s { - if n.DataAtom == a && n.Namespace == "" { - return true - } - } - return false -} - -// insert inserts a node at the given index. -func (s *nodeStack) insert(i int, n *Node) { - (*s) = append(*s, nil) - copy((*s)[i+1:], (*s)[i:]) - (*s)[i] = n -} - -// remove removes a node from the stack. It is a no-op if n is not present. -func (s *nodeStack) remove(n *Node) { - i := s.index(n) - if i == -1 { - return - } - copy((*s)[i:], (*s)[i+1:]) - j := len(*s) - 1 - (*s)[j] = nil - *s = (*s)[:j] -} - -type insertionModeStack []insertionMode - -func (s *insertionModeStack) pop() (im insertionMode) { - i := len(*s) - im = (*s)[i-1] - *s = (*s)[:i-1] - return im -} - -func (s *insertionModeStack) top() insertionMode { - if i := len(*s); i > 0 { - return (*s)[i-1] - } - return nil -} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go deleted file mode 100644 index 038941d7..00000000 --- a/vendor/golang.org/x/net/html/parse.go +++ /dev/null @@ -1,2460 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "errors" - "fmt" - "io" - "strings" - - a "golang.org/x/net/html/atom" -) - -// A parser implements the HTML5 parsing algorithm: -// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction -type parser struct { - // tokenizer provides the tokens for the parser. - tokenizer *Tokenizer - // tok is the most recently read token. - tok Token - // Self-closing tags like
    are treated as start tags, except that - // hasSelfClosingToken is set while they are being processed. - hasSelfClosingToken bool - // doc is the document root element. - doc *Node - // The stack of open elements (section 12.2.4.2) and active formatting - // elements (section 12.2.4.3). - oe, afe nodeStack - // Element pointers (section 12.2.4.4). - head, form *Node - // Other parsing state flags (section 12.2.4.5). - scripting, framesetOK bool - // The stack of template insertion modes - templateStack insertionModeStack - // im is the current insertion mode. - im insertionMode - // originalIM is the insertion mode to go back to after completing a text - // or inTableText insertion mode. - originalIM insertionMode - // fosterParenting is whether new elements should be inserted according to - // the foster parenting rules (section 12.2.6.1). - fosterParenting bool - // quirks is whether the parser is operating in "quirks mode." - quirks bool - // fragment is whether the parser is parsing an HTML fragment. - fragment bool - // context is the context element when parsing an HTML fragment - // (section 12.4). - context *Node -} - -func (p *parser) top() *Node { - if n := p.oe.top(); n != nil { - return n - } - return p.doc -} - -// Stop tags for use in popUntil. These come from section 12.2.4.2. -var ( - defaultScopeStopTags = map[string][]a.Atom{ - "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, - "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, - "svg": {a.Desc, a.ForeignObject, a.Title}, - } -) - -type scope int - -const ( - defaultScope scope = iota - listItemScope - buttonScope - tableScope - tableRowScope - tableBodyScope - selectScope -) - -// popUntil pops the stack of open elements at the highest element whose tag -// is in matchTags, provided there is no higher element in the scope's stop -// tags (as defined in section 12.2.4.2). It returns whether or not there was -// such an element. If there was not, popUntil leaves the stack unchanged. -// -// For example, the set of stop tags for table scope is: "html", "table". If -// the stack was: -// ["html", "body", "font", "table", "b", "i", "u"] -// then popUntil(tableScope, "font") would return false, but -// popUntil(tableScope, "i") would return true and the stack would become: -// ["html", "body", "font", "table", "b"] -// -// If an element's tag is in both the stop tags and matchTags, then the stack -// will be popped and the function returns true (provided, of course, there was -// no higher element in the stack that was also in the stop tags). For example, -// popUntil(tableScope, "table") returns true and leaves: -// ["html", "body", "font"] -func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { - if i := p.indexOfElementInScope(s, matchTags...); i != -1 { - p.oe = p.oe[:i] - return true - } - return false -} - -// indexOfElementInScope returns the index in p.oe of the highest element whose -// tag is in matchTags that is in scope. If no matching element is in scope, it -// returns -1. -func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - if p.oe[i].Namespace == "" { - for _, t := range matchTags { - if t == tagAtom { - return i - } - } - switch s { - case defaultScope: - // No-op. - case listItemScope: - if tagAtom == a.Ol || tagAtom == a.Ul { - return -1 - } - case buttonScope: - if tagAtom == a.Button { - return -1 - } - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { - return -1 - } - case selectScope: - if tagAtom != a.Optgroup && tagAtom != a.Option { - return -1 - } - default: - panic("unreachable") - } - } - switch s { - case defaultScope, listItemScope, buttonScope: - for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { - if t == tagAtom { - return -1 - } - } - } - } - return -1 -} - -// elementInScope is like popUntil, except that it doesn't modify the stack of -// open elements. -func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { - return p.indexOfElementInScope(s, matchTags...) != -1 -} - -// clearStackToContext pops elements off the stack of open elements until a -// scope-defined element is found. -func (p *parser) clearStackToContext(s scope) { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - switch s { - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - case tableRowScope: - if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - case tableBodyScope: - if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - default: - panic("unreachable") - } - } -} - -// parseGenericRawTextElements implements the generic raw text element parsing -// algorithm defined in 12.2.6.2. -// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text -// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part -// officially, need to make tokenizer consider both states. -func (p *parser) parseGenericRawTextElement() { - p.addElement() - p.originalIM = p.im - p.im = textIM -} - -// generateImpliedEndTags pops nodes off the stack of open elements as long as -// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. -// If exceptions are specified, nodes with that name will not be popped off. -func (p *parser) generateImpliedEndTags(exceptions ...string) { - var i int -loop: - for i = len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if n.Type != ElementNode { - break - } - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: - for _, except := range exceptions { - if n.Data == except { - break loop - } - } - continue - } - break - } - - p.oe = p.oe[:i+1] -} - -// addChild adds a child node n to the top element, and pushes n onto the stack -// of open elements if it is an element node. -func (p *parser) addChild(n *Node) { - if p.shouldFosterParent() { - p.fosterParent(n) - } else { - p.top().AppendChild(n) - } - - if n.Type == ElementNode { - p.oe = append(p.oe, n) - } -} - -// shouldFosterParent returns whether the next node to be added should be -// foster parented. -func (p *parser) shouldFosterParent() bool { - if p.fosterParenting { - switch p.top().DataAtom { - case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: - return true - } - } - return false -} - -// fosterParent adds a child node according to the foster parenting rules. -// Section 12.2.6.1, "foster parenting". -func (p *parser) fosterParent(n *Node) { - var table, parent, prev, template *Node - var i int - for i = len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == a.Table { - table = p.oe[i] - break - } - } - - var j int - for j = len(p.oe) - 1; j >= 0; j-- { - if p.oe[j].DataAtom == a.Template { - template = p.oe[j] - break - } - } - - if template != nil && (table == nil || j > i) { - template.AppendChild(n) - return - } - - if table == nil { - // The foster parent is the html element. - parent = p.oe[0] - } else { - parent = table.Parent - } - if parent == nil { - parent = p.oe[i-1] - } - - if table != nil { - prev = table.PrevSibling - } else { - prev = parent.LastChild - } - if prev != nil && prev.Type == TextNode && n.Type == TextNode { - prev.Data += n.Data - return - } - - parent.InsertBefore(n, table) -} - -// addText adds text to the preceding node if it is a text node, or else it -// calls addChild with a new text node. -func (p *parser) addText(text string) { - if text == "" { - return - } - - if p.shouldFosterParent() { - p.fosterParent(&Node{ - Type: TextNode, - Data: text, - }) - return - } - - t := p.top() - if n := t.LastChild; n != nil && n.Type == TextNode { - n.Data += text - return - } - p.addChild(&Node{ - Type: TextNode, - Data: text, - }) -} - -// addElement adds a child element based on the current token. -func (p *parser) addElement() { - p.addChild(&Node{ - Type: ElementNode, - DataAtom: p.tok.DataAtom, - Data: p.tok.Data, - Attr: p.tok.Attr, - }) -} - -// Section 12.2.4.3. -func (p *parser) addFormattingElement() { - tagAtom, attr := p.tok.DataAtom, p.tok.Attr - p.addElement() - - // Implement the Noah's Ark clause, but with three per family instead of two. - identicalElements := 0 -findIdenticalElements: - for i := len(p.afe) - 1; i >= 0; i-- { - n := p.afe[i] - if n.Type == scopeMarkerNode { - break - } - if n.Type != ElementNode { - continue - } - if n.Namespace != "" { - continue - } - if n.DataAtom != tagAtom { - continue - } - if len(n.Attr) != len(attr) { - continue - } - compareAttributes: - for _, t0 := range n.Attr { - for _, t1 := range attr { - if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { - // Found a match for this attribute, continue with the next attribute. - continue compareAttributes - } - } - // If we get here, there is no attribute that matches a. - // Therefore the element is not identical to the new one. - continue findIdenticalElements - } - - identicalElements++ - if identicalElements >= 3 { - p.afe.remove(n) - } - } - - p.afe = append(p.afe, p.top()) -} - -// Section 12.2.4.3. -func (p *parser) clearActiveFormattingElements() { - for { - if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode { - return - } - } -} - -// Section 12.2.4.3. -func (p *parser) reconstructActiveFormattingElements() { - n := p.afe.top() - if n == nil { - return - } - if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { - return - } - i := len(p.afe) - 1 - for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { - if i == 0 { - i = -1 - break - } - i-- - n = p.afe[i] - } - for { - i++ - clone := p.afe[i].clone() - p.addChild(clone) - p.afe[i] = clone - if i == len(p.afe)-1 { - break - } - } -} - -// Section 12.2.5. -func (p *parser) acknowledgeSelfClosingTag() { - p.hasSelfClosingToken = false -} - -// An insertion mode (section 12.2.4.1) is the state transition function from -// a particular state in the HTML5 parser's state machine. It updates the -// parser's fields depending on parser.tok (where ErrorToken means EOF). -// It returns whether the token was consumed. -type insertionMode func(*parser) bool - -// setOriginalIM sets the insertion mode to return to after completing a text or -// inTableText insertion mode. -// Section 12.2.4.1, "using the rules for". -func (p *parser) setOriginalIM() { - if p.originalIM != nil { - panic("html: bad parser state: originalIM was set twice") - } - p.originalIM = p.im -} - -// Section 12.2.4.1, "reset the insertion mode". -func (p *parser) resetInsertionMode() { - for i := len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - last := i == 0 - if last && p.context != nil { - n = p.context - } - - switch n.DataAtom { - case a.Select: - if !last { - for ancestor, first := n, p.oe[0]; ancestor != first; { - ancestor = p.oe[p.oe.index(ancestor)-1] - switch ancestor.DataAtom { - case a.Template: - p.im = inSelectIM - return - case a.Table: - p.im = inSelectInTableIM - return - } - } - } - p.im = inSelectIM - case a.Td, a.Th: - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.im = inCellIM - case a.Tr: - p.im = inRowIM - case a.Tbody, a.Thead, a.Tfoot: - p.im = inTableBodyIM - case a.Caption: - p.im = inCaptionIM - case a.Colgroup: - p.im = inColumnGroupIM - case a.Table: - p.im = inTableIM - case a.Template: - // TODO: remove this divergence from the HTML5 spec. - if n.Namespace != "" { - continue - } - p.im = p.templateStack.top() - case a.Head: - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.im = inHeadIM - case a.Body: - p.im = inBodyIM - case a.Frameset: - p.im = inFramesetIM - case a.Html: - if p.head == nil { - p.im = beforeHeadIM - } else { - p.im = afterHeadIM - } - default: - if last { - p.im = inBodyIM - return - } - continue - } - return - } -} - -const whitespace = " \t\r\n\f" - -// Section 12.2.6.4.1. -func initialIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - n, quirks := parseDoctype(p.tok.Data) - p.doc.AppendChild(n) - p.quirks = quirks - p.im = beforeHTMLIM - return true - } - p.quirks = true - p.im = beforeHTMLIM - return false -} - -// Section 12.2.6.4.2. -func beforeHTMLIM(p *parser) bool { - switch p.tok.Type { - case DoctypeToken: - // Ignore the token. - return true - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - if p.tok.DataAtom == a.Html { - p.addElement() - p.im = beforeHeadIM - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - } - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false -} - -// Section 12.2.6.4.3. -func beforeHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Head: - p.addElement() - p.head = p.top() - p.im = inHeadIM - return true - case a.Html: - return inBodyIM(p) - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.6.4.4. -func inHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta: - p.addElement() - p.oe.pop() - p.acknowledgeSelfClosingTag() - return true - case a.Noscript: - if p.scripting { - p.parseGenericRawTextElement() - return true - } - p.addElement() - p.im = inHeadNoscriptIM - // Don't let the tokenizer go into raw text mode when scripting is disabled. - p.tokenizer.NextIsNotRawText() - return true - case a.Script, a.Title: - p.addElement() - p.setOriginalIM() - p.im = textIM - return true - case a.Noframes, a.Style: - p.parseGenericRawTextElement() - return true - case a.Head: - // Ignore the token. - return true - case a.Template: - // TODO: remove this divergence from the HTML5 spec. - // - // We don't handle all of the corner cases when mixing foreign - // content (i.e. or ) with tag. - case a.Template: - return inHeadIM(p) - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) - p.framesetOK = true - return false -} - -// copyAttributes copies attributes of src not found on dst to dst. -func copyAttributes(dst *Node, src Token) { - if len(src.Attr) == 0 { - return - } - attr := map[string]string{} - for _, t := range dst.Attr { - attr[t.Key] = t.Val - } - for _, t := range src.Attr { - if _, ok := attr[t.Key]; !ok { - dst.Attr = append(dst.Attr, t) - attr[t.Key] = t.Val - } - } -} - -// Section 12.2.6.4.7. -func inBodyIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - d := p.tok.Data - switch n := p.oe.top(); n.DataAtom { - case a.Pre, a.Listing: - if n.FirstChild == nil { - // Ignore a newline at the start of a
     block.
    -				if d != "" && d[0] == '\r' {
    -					d = d[1:]
    -				}
    -				if d != "" && d[0] == '\n' {
    -					d = d[1:]
    -				}
    -			}
    -		}
    -		d = strings.Replace(d, "\x00", "", -1)
    -		if d == "" {
    -			return true
    -		}
    -		p.reconstructActiveFormattingElements()
    -		p.addText(d)
    -		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
    -			// There were non-whitespace characters inserted.
    -			p.framesetOK = false
    -		}
    -	case StartTagToken:
    -		switch p.tok.DataAtom {
    -		case a.Html:
    -			if p.oe.contains(a.Template) {
    -				return true
    -			}
    -			copyAttributes(p.oe[0], p.tok)
    -		case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
    -			return inHeadIM(p)
    -		case a.Body:
    -			if p.oe.contains(a.Template) {
    -				return true
    -			}
    -			if len(p.oe) >= 2 {
    -				body := p.oe[1]
    -				if body.Type == ElementNode && body.DataAtom == a.Body {
    -					p.framesetOK = false
    -					copyAttributes(body, p.tok)
    -				}
    -			}
    -		case a.Frameset:
    -			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
    -				// Ignore the token.
    -				return true
    -			}
    -			body := p.oe[1]
    -			if body.Parent != nil {
    -				body.Parent.RemoveChild(body)
    -			}
    -			p.oe = p.oe[:1]
    -			p.addElement()
    -			p.im = inFramesetIM
    -			return true
    -		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
    -			p.popUntil(buttonScope, a.P)
    -			p.addElement()
    -		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
    -			p.popUntil(buttonScope, a.P)
    -			switch n := p.top(); n.DataAtom {
    -			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
    -				p.oe.pop()
    -			}
    -			p.addElement()
    -		case a.Pre, a.Listing:
    -			p.popUntil(buttonScope, a.P)
    -			p.addElement()
    -			// The newline, if any, will be dealt with by the TextToken case.
    -			p.framesetOK = false
    -		case a.Form:
    -			if p.form != nil && !p.oe.contains(a.Template) {
    -				// Ignore the token
    -				return true
    -			}
    -			p.popUntil(buttonScope, a.P)
    -			p.addElement()
    -			if !p.oe.contains(a.Template) {
    -				p.form = p.top()
    -			}
    -		case a.Li:
    -			p.framesetOK = false
    -			for i := len(p.oe) - 1; i >= 0; i-- {
    -				node := p.oe[i]
    -				switch node.DataAtom {
    -				case a.Li:
    -					p.oe = p.oe[:i]
    -				case a.Address, a.Div, a.P:
    -					continue
    -				default:
    -					if !isSpecialElement(node) {
    -						continue
    -					}
    -				}
    -				break
    -			}
    -			p.popUntil(buttonScope, a.P)
    -			p.addElement()
    -		case a.Dd, a.Dt:
    -			p.framesetOK = false
    -			for i := len(p.oe) - 1; i >= 0; i-- {
    -				node := p.oe[i]
    -				switch node.DataAtom {
    -				case a.Dd, a.Dt:
    -					p.oe = p.oe[:i]
    -				case a.Address, a.Div, a.P:
    -					continue
    -				default:
    -					if !isSpecialElement(node) {
    -						continue
    -					}
    -				}
    -				break
    -			}
    -			p.popUntil(buttonScope, a.P)
    -			p.addElement()
    -		case a.Plaintext:
    -			p.popUntil(buttonScope, a.P)
    -			p.addElement()
    -		case a.Button:
    -			p.popUntil(defaultScope, a.Button)
    -			p.reconstructActiveFormattingElements()
    -			p.addElement()
    -			p.framesetOK = false
    -		case a.A:
    -			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
    -				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
    -					p.inBodyEndTagFormatting(a.A, "a")
    -					p.oe.remove(n)
    -					p.afe.remove(n)
    -					break
    -				}
    -			}
    -			p.reconstructActiveFormattingElements()
    -			p.addFormattingElement()
    -		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
    -			p.reconstructActiveFormattingElements()
    -			p.addFormattingElement()
    -		case a.Nobr:
    -			p.reconstructActiveFormattingElements()
    -			if p.elementInScope(defaultScope, a.Nobr) {
    -				p.inBodyEndTagFormatting(a.Nobr, "nobr")
    -				p.reconstructActiveFormattingElements()
    -			}
    -			p.addFormattingElement()
    -		case a.Applet, a.Marquee, a.Object:
    -			p.reconstructActiveFormattingElements()
    -			p.addElement()
    -			p.afe = append(p.afe, &scopeMarker)
    -			p.framesetOK = false
    -		case a.Table:
    -			if !p.quirks {
    -				p.popUntil(buttonScope, a.P)
    -			}
    -			p.addElement()
    -			p.framesetOK = false
    -			p.im = inTableIM
    -			return true
    -		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
    -			p.reconstructActiveFormattingElements()
    -			p.addElement()
    -			p.oe.pop()
    -			p.acknowledgeSelfClosingTag()
    -			if p.tok.DataAtom == a.Input {
    -				for _, t := range p.tok.Attr {
    -					if t.Key == "type" {
    -						if strings.ToLower(t.Val) == "hidden" {
    -							// Skip setting framesetOK = false
    -							return true
    -						}
    -					}
    -				}
    -			}
    -			p.framesetOK = false
    -		case a.Param, a.Source, a.Track:
    -			p.addElement()
    -			p.oe.pop()
    -			p.acknowledgeSelfClosingTag()
    -		case a.Hr:
    -			p.popUntil(buttonScope, a.P)
    -			p.addElement()
    -			p.oe.pop()
    -			p.acknowledgeSelfClosingTag()
    -			p.framesetOK = false
    -		case a.Image:
    -			p.tok.DataAtom = a.Img
    -			p.tok.Data = a.Img.String()
    -			return false
    -		case a.Textarea:
    -			p.addElement()
    -			p.setOriginalIM()
    -			p.framesetOK = false
    -			p.im = textIM
    -		case a.Xmp:
    -			p.popUntil(buttonScope, a.P)
    -			p.reconstructActiveFormattingElements()
    -			p.framesetOK = false
    -			p.parseGenericRawTextElement()
    -		case a.Iframe:
    -			p.framesetOK = false
    -			p.parseGenericRawTextElement()
    -		case a.Noembed:
    -			p.parseGenericRawTextElement()
    -		case a.Noscript:
    -			if p.scripting {
    -				p.parseGenericRawTextElement()
    -				return true
    -			}
    -			p.reconstructActiveFormattingElements()
    -			p.addElement()
    -			// Don't let the tokenizer go into raw text mode when scripting is disabled.
    -			p.tokenizer.NextIsNotRawText()
    -		case a.Select:
    -			p.reconstructActiveFormattingElements()
    -			p.addElement()
    -			p.framesetOK = false
    -			p.im = inSelectIM
    -			return true
    -		case a.Optgroup, a.Option:
    -			if p.top().DataAtom == a.Option {
    -				p.oe.pop()
    -			}
    -			p.reconstructActiveFormattingElements()
    -			p.addElement()
    -		case a.Rb, a.Rtc:
    -			if p.elementInScope(defaultScope, a.Ruby) {
    -				p.generateImpliedEndTags()
    -			}
    -			p.addElement()
    -		case a.Rp, a.Rt:
    -			if p.elementInScope(defaultScope, a.Ruby) {
    -				p.generateImpliedEndTags("rtc")
    -			}
    -			p.addElement()
    -		case a.Math, a.Svg:
    -			p.reconstructActiveFormattingElements()
    -			if p.tok.DataAtom == a.Math {
    -				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
    -			} else {
    -				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
    -			}
    -			adjustForeignAttributes(p.tok.Attr)
    -			p.addElement()
    -			p.top().Namespace = p.tok.Data
    -			if p.hasSelfClosingToken {
    -				p.oe.pop()
    -				p.acknowledgeSelfClosingTag()
    -			}
    -			return true
    -		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
    -			// Ignore the token.
    -		default:
    -			p.reconstructActiveFormattingElements()
    -			p.addElement()
    -		}
    -	case EndTagToken:
    -		switch p.tok.DataAtom {
    -		case a.Body:
    -			if p.elementInScope(defaultScope, a.Body) {
    -				p.im = afterBodyIM
    -			}
    -		case a.Html:
    -			if p.elementInScope(defaultScope, a.Body) {
    -				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
    -				return false
    -			}
    -			return true
    -		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
    -			p.popUntil(defaultScope, p.tok.DataAtom)
    -		case a.Form:
    -			if p.oe.contains(a.Template) {
    -				i := p.indexOfElementInScope(defaultScope, a.Form)
    -				if i == -1 {
    -					// Ignore the token.
    -					return true
    -				}
    -				p.generateImpliedEndTags()
    -				if p.oe[i].DataAtom != a.Form {
    -					// Ignore the token.
    -					return true
    -				}
    -				p.popUntil(defaultScope, a.Form)
    -			} else {
    -				node := p.form
    -				p.form = nil
    -				i := p.indexOfElementInScope(defaultScope, a.Form)
    -				if node == nil || i == -1 || p.oe[i] != node {
    -					// Ignore the token.
    -					return true
    -				}
    -				p.generateImpliedEndTags()
    -				p.oe.remove(node)
    -			}
    -		case a.P:
    -			if !p.elementInScope(buttonScope, a.P) {
    -				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
    -			}
    -			p.popUntil(buttonScope, a.P)
    -		case a.Li:
    -			p.popUntil(listItemScope, a.Li)
    -		case a.Dd, a.Dt:
    -			p.popUntil(defaultScope, p.tok.DataAtom)
    -		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
    -			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
    -		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
    -			p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data)
    -		case a.Applet, a.Marquee, a.Object:
    -			if p.popUntil(defaultScope, p.tok.DataAtom) {
    -				p.clearActiveFormattingElements()
    -			}
    -		case a.Br:
    -			p.tok.Type = StartTagToken
    -			return false
    -		case a.Template:
    -			return inHeadIM(p)
    -		default:
    -			p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data)
    -		}
    -	case CommentToken:
    -		p.addChild(&Node{
    -			Type: CommentNode,
    -			Data: p.tok.Data,
    -		})
    -	case ErrorToken:
    -		// TODO: remove this divergence from the HTML5 spec.
    -		if len(p.templateStack) > 0 {
    -			p.im = inTemplateIM
    -			return false
    -		}
    -		for _, e := range p.oe {
    -			switch e.DataAtom {
    -			case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
    -				a.Thead, a.Tr, a.Body, a.Html:
    -			default:
    -				return true
    -			}
    -		}
    -	}
    -
    -	return true
    -}
    -
    -func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
    -	// This is the "adoption agency" algorithm, described at
    -	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
    -
    -	// TODO: this is a fairly literal line-by-line translation of that algorithm.
    -	// Once the code successfully parses the comprehensive test suite, we should
    -	// refactor this code to be more idiomatic.
    -
    -	// Steps 1-2
    -	if current := p.oe.top(); current.Data == tagName && p.afe.index(current) == -1 {
    -		p.oe.pop()
    -		return
    -	}
    -
    -	// Steps 3-5. The outer loop.
    -	for i := 0; i < 8; i++ {
    -		// Step 6. Find the formatting element.
    -		var formattingElement *Node
    -		for j := len(p.afe) - 1; j >= 0; j-- {
    -			if p.afe[j].Type == scopeMarkerNode {
    -				break
    -			}
    -			if p.afe[j].DataAtom == tagAtom {
    -				formattingElement = p.afe[j]
    -				break
    -			}
    -		}
    -		if formattingElement == nil {
    -			p.inBodyEndTagOther(tagAtom, tagName)
    -			return
    -		}
    -
    -		// Step 7. Ignore the tag if formatting element is not in the stack of open elements.
    -		feIndex := p.oe.index(formattingElement)
    -		if feIndex == -1 {
    -			p.afe.remove(formattingElement)
    -			return
    -		}
    -		// Step 8. Ignore the tag if formatting element is not in the scope.
    -		if !p.elementInScope(defaultScope, tagAtom) {
    -			// Ignore the tag.
    -			return
    -		}
    -
    -		// Step 9. This step is omitted because it's just a parse error but no need to return.
    -
    -		// Steps 10-11. Find the furthest block.
    -		var furthestBlock *Node
    -		for _, e := range p.oe[feIndex:] {
    -			if isSpecialElement(e) {
    -				furthestBlock = e
    -				break
    -			}
    -		}
    -		if furthestBlock == nil {
    -			e := p.oe.pop()
    -			for e != formattingElement {
    -				e = p.oe.pop()
    -			}
    -			p.afe.remove(e)
    -			return
    -		}
    -
    -		// Steps 12-13. Find the common ancestor and bookmark node.
    -		commonAncestor := p.oe[feIndex-1]
    -		bookmark := p.afe.index(formattingElement)
    -
    -		// Step 14. The inner loop. Find the lastNode to reparent.
    -		lastNode := furthestBlock
    -		node := furthestBlock
    -		x := p.oe.index(node)
    -		// Step 14.1.
    -		j := 0
    -		for {
    -			// Step 14.2.
    -			j++
    -			// Step. 14.3.
    -			x--
    -			node = p.oe[x]
    -			// Step 14.4. Go to the next step if node is formatting element.
    -			if node == formattingElement {
    -				break
    -			}
    -			// Step 14.5. Remove node from the list of active formatting elements if
    -			// inner loop counter is greater than three and node is in the list of
    -			// active formatting elements.
    -			if ni := p.afe.index(node); j > 3 && ni > -1 {
    -				p.afe.remove(node)
    -				// If any element of the list of active formatting elements is removed,
    -				// we need to take care whether bookmark should be decremented or not.
    -				// This is because the value of bookmark may exceed the size of the
    -				// list by removing elements from the list.
    -				if ni <= bookmark {
    -					bookmark--
    -				}
    -				continue
    -			}
    -			// Step 14.6. Continue the next inner loop if node is not in the list of
    -			// active formatting elements.
    -			if p.afe.index(node) == -1 {
    -				p.oe.remove(node)
    -				continue
    -			}
    -			// Step 14.7.
    -			clone := node.clone()
    -			p.afe[p.afe.index(node)] = clone
    -			p.oe[p.oe.index(node)] = clone
    -			node = clone
    -			// Step 14.8.
    -			if lastNode == furthestBlock {
    -				bookmark = p.afe.index(node) + 1
    -			}
    -			// Step 14.9.
    -			if lastNode.Parent != nil {
    -				lastNode.Parent.RemoveChild(lastNode)
    -			}
    -			node.AppendChild(lastNode)
    -			// Step 14.10.
    -			lastNode = node
    -		}
    -
    -		// Step 15. Reparent lastNode to the common ancestor,
    -		// or for misnested table nodes, to the foster parent.
    -		if lastNode.Parent != nil {
    -			lastNode.Parent.RemoveChild(lastNode)
    -		}
    -		switch commonAncestor.DataAtom {
    -		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
    -			p.fosterParent(lastNode)
    -		default:
    -			commonAncestor.AppendChild(lastNode)
    -		}
    -
    -		// Steps 16-18. Reparent nodes from the furthest block's children
    -		// to a clone of the formatting element.
    -		clone := formattingElement.clone()
    -		reparentChildren(clone, furthestBlock)
    -		furthestBlock.AppendChild(clone)
    -
    -		// Step 19. Fix up the list of active formatting elements.
    -		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
    -			// Move the bookmark with the rest of the list.
    -			bookmark--
    -		}
    -		p.afe.remove(formattingElement)
    -		p.afe.insert(bookmark, clone)
    -
    -		// Step 20. Fix up the stack of open elements.
    -		p.oe.remove(formattingElement)
    -		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
    -	}
    -}
    -
    -// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
    -// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
    -// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
    -func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) {
    -	for i := len(p.oe) - 1; i >= 0; i-- {
    -		// Two element nodes have the same tag if they have the same Data (a
    -		// string-typed field). As an optimization, for common HTML tags, each
    -		// Data string is assigned a unique, non-zero DataAtom (a uint32-typed
    -		// field), since integer comparison is faster than string comparison.
    -		// Uncommon (custom) tags get a zero DataAtom.
    -		//
    -		// The if condition here is equivalent to (p.oe[i].Data == tagName).
    -		if (p.oe[i].DataAtom == tagAtom) &&
    -			((tagAtom != 0) || (p.oe[i].Data == tagName)) {
    -			p.oe = p.oe[:i]
    -			break
    -		}
    -		if isSpecialElement(p.oe[i]) {
    -			break
    -		}
    -	}
    -}
    -
    -// Section 12.2.6.4.8.
    -func textIM(p *parser) bool {
    -	switch p.tok.Type {
    -	case ErrorToken:
    -		p.oe.pop()
    -	case TextToken:
    -		d := p.tok.Data
    -		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
    -			// Ignore a newline at the start of a